1 //===- InlineCost.cpp - Cost analysis for inliner -------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements inline cost analysis.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "llvm/Analysis/InlineCost.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/SetVector.h"
16 #include "llvm/ADT/SmallPtrSet.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/Analysis/AssumptionCache.h"
20 #include "llvm/Analysis/BlockFrequencyInfo.h"
21 #include "llvm/Analysis/CFG.h"
22 #include "llvm/Analysis/CodeMetrics.h"
23 #include "llvm/Analysis/ConstantFolding.h"
24 #include "llvm/Analysis/InstructionSimplify.h"
25 #include "llvm/Analysis/LoopInfo.h"
26 #include "llvm/Analysis/ProfileSummaryInfo.h"
27 #include "llvm/Analysis/TargetLibraryInfo.h"
28 #include "llvm/Analysis/TargetTransformInfo.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/Config/llvm-config.h"
31 #include "llvm/IR/AssemblyAnnotationWriter.h"
32 #include "llvm/IR/CallingConv.h"
33 #include "llvm/IR/DataLayout.h"
34 #include "llvm/IR/Dominators.h"
35 #include "llvm/IR/GetElementPtrTypeIterator.h"
36 #include "llvm/IR/GlobalAlias.h"
37 #include "llvm/IR/InstVisitor.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/Operator.h"
40 #include "llvm/IR/PatternMatch.h"
41 #include "llvm/Support/CommandLine.h"
42 #include "llvm/Support/Debug.h"
43 #include "llvm/Support/FormattedStream.h"
44 #include "llvm/Support/raw_ostream.h"
45
46 using namespace llvm;
47
48 #define DEBUG_TYPE "inline-cost"
49
50 STATISTIC(NumCallsAnalyzed, "Number of call sites analyzed");
51
52 static cl::opt<int>
53 DefaultThreshold("inlinedefault-threshold", cl::Hidden, cl::init(225),
54 cl::ZeroOrMore,
55 cl::desc("Default amount of inlining to perform"));
56
57 static cl::opt<bool> PrintInstructionComments(
58 "print-instruction-comments", cl::Hidden, cl::init(false),
59 cl::desc("Prints comments for instruction based on inline cost analysis"));
60
61 static cl::opt<int> InlineThreshold(
62 "inline-threshold", cl::Hidden, cl::init(225), cl::ZeroOrMore,
63 cl::desc("Control the amount of inlining to perform (default = 225)"));
64
65 static cl::opt<int> HintThreshold(
66 "inlinehint-threshold", cl::Hidden, cl::init(325), cl::ZeroOrMore,
67 cl::desc("Threshold for inlining functions with inline hint"));
68
69 static cl::opt<int>
70 ColdCallSiteThreshold("inline-cold-callsite-threshold", cl::Hidden,
71 cl::init(45), cl::ZeroOrMore,
72 cl::desc("Threshold for inlining cold callsites"));
73
74 // We introduce this threshold to help performance of instrumentation based
75 // PGO before we actually hook up inliner with analysis passes such as BPI and
76 // BFI.
77 static cl::opt<int> ColdThreshold(
78 "inlinecold-threshold", cl::Hidden, cl::init(45), cl::ZeroOrMore,
79 cl::desc("Threshold for inlining functions with cold attribute"));
80
81 static cl::opt<int>
82 HotCallSiteThreshold("hot-callsite-threshold", cl::Hidden, cl::init(3000),
83 cl::ZeroOrMore,
84 cl::desc("Threshold for hot callsites "));
85
86 static cl::opt<int> LocallyHotCallSiteThreshold(
87 "locally-hot-callsite-threshold", cl::Hidden, cl::init(525), cl::ZeroOrMore,
88 cl::desc("Threshold for locally hot callsites "));
89
90 static cl::opt<int> ColdCallSiteRelFreq(
91 "cold-callsite-rel-freq", cl::Hidden, cl::init(2), cl::ZeroOrMore,
92 cl::desc("Maximum block frequency, expressed as a percentage of caller's "
93 "entry frequency, for a callsite to be cold in the absence of "
94 "profile information."));
95
96 static cl::opt<int> HotCallSiteRelFreq(
97 "hot-callsite-rel-freq", cl::Hidden, cl::init(60), cl::ZeroOrMore,
98 cl::desc("Minimum block frequency, expressed as a multiple of caller's "
99 "entry frequency, for a callsite to be hot in the absence of "
100 "profile information."));
101
102 static cl::opt<bool> OptComputeFullInlineCost(
103 "inline-cost-full", cl::Hidden, cl::init(false), cl::ZeroOrMore,
104 cl::desc("Compute the full inline cost of a call site even when the cost "
105 "exceeds the threshold."));
106
107 static cl::opt<bool> InlineCallerSupersetNoBuiltin(
108 "inline-caller-superset-nobuiltin", cl::Hidden, cl::init(true),
109 cl::ZeroOrMore,
110 cl::desc("Allow inlining when caller has a superset of callee's nobuiltin "
111 "attributes."));
112
113 static cl::opt<bool> DisableGEPConstOperand(
114 "disable-gep-const-evaluation", cl::Hidden, cl::init(false),
115 cl::desc("Disables evaluation of GetElementPtr with constant operands"));
116
117 namespace {
118 class InlineCostCallAnalyzer;
119
120 // This struct is used to store information about inline cost of a
121 // particular instruction
122 struct InstructionCostDetail {
123 int CostBefore = 0;
124 int CostAfter = 0;
125 int ThresholdBefore = 0;
126 int ThresholdAfter = 0;
127
getThresholdDelta__anonae1cb0f20111::InstructionCostDetail128 int getThresholdDelta() const { return ThresholdAfter - ThresholdBefore; }
129
getCostDelta__anonae1cb0f20111::InstructionCostDetail130 int getCostDelta() const { return CostAfter - CostBefore; }
131
hasThresholdChanged__anonae1cb0f20111::InstructionCostDetail132 bool hasThresholdChanged() const { return ThresholdAfter != ThresholdBefore; }
133 };
134
135 class InlineCostAnnotationWriter : public AssemblyAnnotationWriter {
136 private:
137 InlineCostCallAnalyzer *const ICCA;
138
139 public:
InlineCostAnnotationWriter(InlineCostCallAnalyzer * ICCA)140 InlineCostAnnotationWriter(InlineCostCallAnalyzer *ICCA) : ICCA(ICCA) {}
141 virtual void emitInstructionAnnot(const Instruction *I,
142 formatted_raw_ostream &OS) override;
143 };
144
145 /// Carry out call site analysis, in order to evaluate inlinability.
146 /// NOTE: the type is currently used as implementation detail of functions such
147 /// as llvm::getInlineCost. Note the function_ref constructor parameters - the
148 /// expectation is that they come from the outer scope, from the wrapper
149 /// functions. If we want to support constructing CallAnalyzer objects where
150 /// lambdas are provided inline at construction, or where the object needs to
151 /// otherwise survive past the scope of the provided functions, we need to
152 /// revisit the argument types.
153 class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
154 typedef InstVisitor<CallAnalyzer, bool> Base;
155 friend class InstVisitor<CallAnalyzer, bool>;
156
157 protected:
~CallAnalyzer()158 virtual ~CallAnalyzer() {}
159 /// The TargetTransformInfo available for this compilation.
160 const TargetTransformInfo &TTI;
161
162 /// Getter for the cache of @llvm.assume intrinsics.
163 function_ref<AssumptionCache &(Function &)> GetAssumptionCache;
164
165 /// Getter for BlockFrequencyInfo
166 function_ref<BlockFrequencyInfo &(Function &)> GetBFI;
167
168 /// Profile summary information.
169 ProfileSummaryInfo *PSI;
170
171 /// The called function.
172 Function &F;
173
174 // Cache the DataLayout since we use it a lot.
175 const DataLayout &DL;
176
177 /// The OptimizationRemarkEmitter available for this compilation.
178 OptimizationRemarkEmitter *ORE;
179
180 /// The candidate callsite being analyzed. Please do not use this to do
181 /// analysis in the caller function; we want the inline cost query to be
182 /// easily cacheable. Instead, use the cover function paramHasAttr.
183 CallBase &CandidateCall;
184
185 /// Extension points for handling callsite features.
186 /// Called after a basic block was analyzed.
onBlockAnalyzed(const BasicBlock * BB)187 virtual void onBlockAnalyzed(const BasicBlock *BB) {}
188
189 /// Called before an instruction was analyzed
onInstructionAnalysisStart(const Instruction * I)190 virtual void onInstructionAnalysisStart(const Instruction *I) {}
191
192 /// Called after an instruction was analyzed
onInstructionAnalysisFinish(const Instruction * I)193 virtual void onInstructionAnalysisFinish(const Instruction *I) {}
194
195 /// Called at the end of the analysis of the callsite. Return the outcome of
196 /// the analysis, i.e. 'InlineResult(true)' if the inlining may happen, or
197 /// the reason it can't.
finalizeAnalysis()198 virtual InlineResult finalizeAnalysis() { return InlineResult::success(); }
199 /// Called when we're about to start processing a basic block, and every time
200 /// we are done processing an instruction. Return true if there is no point in
201 /// continuing the analysis (e.g. we've determined already the call site is
202 /// too expensive to inline)
shouldStop()203 virtual bool shouldStop() { return false; }
204
205 /// Called before the analysis of the callee body starts (with callsite
206 /// contexts propagated). It checks callsite-specific information. Return a
207 /// reason analysis can't continue if that's the case, or 'true' if it may
208 /// continue.
onAnalysisStart()209 virtual InlineResult onAnalysisStart() { return InlineResult::success(); }
210 /// Called if the analysis engine decides SROA cannot be done for the given
211 /// alloca.
onDisableSROA(AllocaInst * Arg)212 virtual void onDisableSROA(AllocaInst *Arg) {}
213
214 /// Called the analysis engine determines load elimination won't happen.
onDisableLoadElimination()215 virtual void onDisableLoadElimination() {}
216
217 /// Called to account for a call.
onCallPenalty()218 virtual void onCallPenalty() {}
219
220 /// Called to account for the expectation the inlining would result in a load
221 /// elimination.
onLoadEliminationOpportunity()222 virtual void onLoadEliminationOpportunity() {}
223
224 /// Called to account for the cost of argument setup for the Call in the
225 /// callee's body (not the callsite currently under analysis).
onCallArgumentSetup(const CallBase & Call)226 virtual void onCallArgumentSetup(const CallBase &Call) {}
227
228 /// Called to account for a load relative intrinsic.
onLoadRelativeIntrinsic()229 virtual void onLoadRelativeIntrinsic() {}
230
231 /// Called to account for a lowered call.
onLoweredCall(Function * F,CallBase & Call,bool IsIndirectCall)232 virtual void onLoweredCall(Function *F, CallBase &Call, bool IsIndirectCall) {
233 }
234
235 /// Account for a jump table of given size. Return false to stop further
236 /// processing the switch instruction
onJumpTable(unsigned JumpTableSize)237 virtual bool onJumpTable(unsigned JumpTableSize) { return true; }
238
239 /// Account for a case cluster of given size. Return false to stop further
240 /// processing of the instruction.
onCaseCluster(unsigned NumCaseCluster)241 virtual bool onCaseCluster(unsigned NumCaseCluster) { return true; }
242
243 /// Called at the end of processing a switch instruction, with the given
244 /// number of case clusters.
onFinalizeSwitch(unsigned JumpTableSize,unsigned NumCaseCluster)245 virtual void onFinalizeSwitch(unsigned JumpTableSize,
246 unsigned NumCaseCluster) {}
247
248 /// Called to account for any other instruction not specifically accounted
249 /// for.
onMissedSimplification()250 virtual void onMissedSimplification() {}
251
252 /// Start accounting potential benefits due to SROA for the given alloca.
onInitializeSROAArg(AllocaInst * Arg)253 virtual void onInitializeSROAArg(AllocaInst *Arg) {}
254
255 /// Account SROA savings for the AllocaInst value.
onAggregateSROAUse(AllocaInst * V)256 virtual void onAggregateSROAUse(AllocaInst *V) {}
257
handleSROA(Value * V,bool DoNotDisable)258 bool handleSROA(Value *V, bool DoNotDisable) {
259 // Check for SROA candidates in comparisons.
260 if (auto *SROAArg = getSROAArgForValueOrNull(V)) {
261 if (DoNotDisable) {
262 onAggregateSROAUse(SROAArg);
263 return true;
264 }
265 disableSROAForArg(SROAArg);
266 }
267 return false;
268 }
269
270 bool IsCallerRecursive = false;
271 bool IsRecursiveCall = false;
272 bool ExposesReturnsTwice = false;
273 bool HasDynamicAlloca = false;
274 bool ContainsNoDuplicateCall = false;
275 bool HasReturn = false;
276 bool HasIndirectBr = false;
277 bool HasUninlineableIntrinsic = false;
278 bool InitsVargArgs = false;
279
280 /// Number of bytes allocated statically by the callee.
281 uint64_t AllocatedSize = 0;
282 unsigned NumInstructions = 0;
283 unsigned NumVectorInstructions = 0;
284
285 /// While we walk the potentially-inlined instructions, we build up and
286 /// maintain a mapping of simplified values specific to this callsite. The
287 /// idea is to propagate any special information we have about arguments to
288 /// this call through the inlinable section of the function, and account for
289 /// likely simplifications post-inlining. The most important aspect we track
290 /// is CFG altering simplifications -- when we prove a basic block dead, that
291 /// can cause dramatic shifts in the cost of inlining a function.
292 DenseMap<Value *, Constant *> SimplifiedValues;
293
294 /// Keep track of the values which map back (through function arguments) to
295 /// allocas on the caller stack which could be simplified through SROA.
296 DenseMap<Value *, AllocaInst *> SROAArgValues;
297
298 /// Keep track of Allocas for which we believe we may get SROA optimization.
299 DenseSet<AllocaInst *> EnabledSROAAllocas;
300
301 /// Keep track of values which map to a pointer base and constant offset.
302 DenseMap<Value *, std::pair<Value *, APInt>> ConstantOffsetPtrs;
303
304 /// Keep track of dead blocks due to the constant arguments.
305 SetVector<BasicBlock *> DeadBlocks;
306
307 /// The mapping of the blocks to their known unique successors due to the
308 /// constant arguments.
309 DenseMap<BasicBlock *, BasicBlock *> KnownSuccessors;
310
311 /// Model the elimination of repeated loads that is expected to happen
312 /// whenever we simplify away the stores that would otherwise cause them to be
313 /// loads.
314 bool EnableLoadElimination;
315 SmallPtrSet<Value *, 16> LoadAddrSet;
316
getSROAArgForValueOrNull(Value * V) const317 AllocaInst *getSROAArgForValueOrNull(Value *V) const {
318 auto It = SROAArgValues.find(V);
319 if (It == SROAArgValues.end() || EnabledSROAAllocas.count(It->second) == 0)
320 return nullptr;
321 return It->second;
322 }
323
324 // Custom simplification helper routines.
325 bool isAllocaDerivedArg(Value *V);
326 void disableSROAForArg(AllocaInst *SROAArg);
327 void disableSROA(Value *V);
328 void findDeadBlocks(BasicBlock *CurrBB, BasicBlock *NextBB);
329 void disableLoadElimination();
330 bool isGEPFree(GetElementPtrInst &GEP);
331 bool canFoldInboundsGEP(GetElementPtrInst &I);
332 bool accumulateGEPOffset(GEPOperator &GEP, APInt &Offset);
333 bool simplifyCallSite(Function *F, CallBase &Call);
334 template <typename Callable>
335 bool simplifyInstruction(Instruction &I, Callable Evaluate);
336 ConstantInt *stripAndComputeInBoundsConstantOffsets(Value *&V);
337
338 /// Return true if the given argument to the function being considered for
339 /// inlining has the given attribute set either at the call site or the
340 /// function declaration. Primarily used to inspect call site specific
341 /// attributes since these can be more precise than the ones on the callee
342 /// itself.
343 bool paramHasAttr(Argument *A, Attribute::AttrKind Attr);
344
345 /// Return true if the given value is known non null within the callee if
346 /// inlined through this particular callsite.
347 bool isKnownNonNullInCallee(Value *V);
348
349 /// Return true if size growth is allowed when inlining the callee at \p Call.
350 bool allowSizeGrowth(CallBase &Call);
351
352 // Custom analysis routines.
353 InlineResult analyzeBlock(BasicBlock *BB,
354 SmallPtrSetImpl<const Value *> &EphValues);
355
356 // Disable several entry points to the visitor so we don't accidentally use
357 // them by declaring but not defining them here.
358 void visit(Module *);
359 void visit(Module &);
360 void visit(Function *);
361 void visit(Function &);
362 void visit(BasicBlock *);
363 void visit(BasicBlock &);
364
365 // Provide base case for our instruction visit.
366 bool visitInstruction(Instruction &I);
367
368 // Our visit overrides.
369 bool visitAlloca(AllocaInst &I);
370 bool visitPHI(PHINode &I);
371 bool visitGetElementPtr(GetElementPtrInst &I);
372 bool visitBitCast(BitCastInst &I);
373 bool visitPtrToInt(PtrToIntInst &I);
374 bool visitIntToPtr(IntToPtrInst &I);
375 bool visitCastInst(CastInst &I);
376 bool visitUnaryInstruction(UnaryInstruction &I);
377 bool visitCmpInst(CmpInst &I);
378 bool visitSub(BinaryOperator &I);
379 bool visitBinaryOperator(BinaryOperator &I);
380 bool visitFNeg(UnaryOperator &I);
381 bool visitLoad(LoadInst &I);
382 bool visitStore(StoreInst &I);
383 bool visitExtractValue(ExtractValueInst &I);
384 bool visitInsertValue(InsertValueInst &I);
385 bool visitCallBase(CallBase &Call);
386 bool visitReturnInst(ReturnInst &RI);
387 bool visitBranchInst(BranchInst &BI);
388 bool visitSelectInst(SelectInst &SI);
389 bool visitSwitchInst(SwitchInst &SI);
390 bool visitIndirectBrInst(IndirectBrInst &IBI);
391 bool visitResumeInst(ResumeInst &RI);
392 bool visitCleanupReturnInst(CleanupReturnInst &RI);
393 bool visitCatchReturnInst(CatchReturnInst &RI);
394 bool visitUnreachableInst(UnreachableInst &I);
395
396 public:
CallAnalyzer(Function & Callee,CallBase & Call,const TargetTransformInfo & TTI,function_ref<AssumptionCache & (Function &)> GetAssumptionCache,function_ref<BlockFrequencyInfo & (Function &)> GetBFI=nullptr,ProfileSummaryInfo * PSI=nullptr,OptimizationRemarkEmitter * ORE=nullptr)397 CallAnalyzer(
398 Function &Callee, CallBase &Call, const TargetTransformInfo &TTI,
399 function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
400 function_ref<BlockFrequencyInfo &(Function &)> GetBFI = nullptr,
401 ProfileSummaryInfo *PSI = nullptr,
402 OptimizationRemarkEmitter *ORE = nullptr)
403 : TTI(TTI), GetAssumptionCache(GetAssumptionCache), GetBFI(GetBFI),
404 PSI(PSI), F(Callee), DL(F.getParent()->getDataLayout()), ORE(ORE),
405 CandidateCall(Call), EnableLoadElimination(true) {}
406
407 InlineResult analyze();
408
getSimplifiedValue(Instruction * I)409 Optional<Constant*> getSimplifiedValue(Instruction *I) {
410 if (SimplifiedValues.find(I) != SimplifiedValues.end())
411 return SimplifiedValues[I];
412 return None;
413 }
414
415 // Keep a bunch of stats about the cost savings found so we can print them
416 // out when debugging.
417 unsigned NumConstantArgs = 0;
418 unsigned NumConstantOffsetPtrArgs = 0;
419 unsigned NumAllocaArgs = 0;
420 unsigned NumConstantPtrCmps = 0;
421 unsigned NumConstantPtrDiffs = 0;
422 unsigned NumInstructionsSimplified = 0;
423
424 void dump();
425 };
426
427 /// FIXME: if it is necessary to derive from InlineCostCallAnalyzer, note
428 /// the FIXME in onLoweredCall, when instantiating an InlineCostCallAnalyzer
429 class InlineCostCallAnalyzer final : public CallAnalyzer {
430 const int CostUpperBound = INT_MAX - InlineConstants::InstrCost - 1;
431 const bool ComputeFullInlineCost;
432 int LoadEliminationCost = 0;
433 /// Bonus to be applied when percentage of vector instructions in callee is
434 /// high (see more details in updateThreshold).
435 int VectorBonus = 0;
436 /// Bonus to be applied when the callee has only one reachable basic block.
437 int SingleBBBonus = 0;
438
439 /// Tunable parameters that control the analysis.
440 const InlineParams &Params;
441
442 // This DenseMap stores the delta change in cost and threshold after
443 // accounting for the given instruction. The map is filled only with the
444 // flag PrintInstructionComments on.
445 DenseMap<const Instruction *, InstructionCostDetail> InstructionCostDetailMap;
446
447 /// Upper bound for the inlining cost. Bonuses are being applied to account
448 /// for speculative "expected profit" of the inlining decision.
449 int Threshold = 0;
450
451 /// Attempt to evaluate indirect calls to boost its inline cost.
452 const bool BoostIndirectCalls;
453
454 /// Ignore the threshold when finalizing analysis.
455 const bool IgnoreThreshold;
456
457 /// Inlining cost measured in abstract units, accounts for all the
458 /// instructions expected to be executed for a given function invocation.
459 /// Instructions that are statically proven to be dead based on call-site
460 /// arguments are not counted here.
461 int Cost = 0;
462
463 bool SingleBB = true;
464
465 unsigned SROACostSavings = 0;
466 unsigned SROACostSavingsLost = 0;
467
468 /// The mapping of caller Alloca values to their accumulated cost savings. If
469 /// we have to disable SROA for one of the allocas, this tells us how much
470 /// cost must be added.
471 DenseMap<AllocaInst *, int> SROAArgCosts;
472
473 /// Return true if \p Call is a cold callsite.
474 bool isColdCallSite(CallBase &Call, BlockFrequencyInfo *CallerBFI);
475
476 /// Update Threshold based on callsite properties such as callee
477 /// attributes and callee hotness for PGO builds. The Callee is explicitly
478 /// passed to support analyzing indirect calls whose target is inferred by
479 /// analysis.
480 void updateThreshold(CallBase &Call, Function &Callee);
481 /// Return a higher threshold if \p Call is a hot callsite.
482 Optional<int> getHotCallSiteThreshold(CallBase &Call,
483 BlockFrequencyInfo *CallerBFI);
484
485 /// Handle a capped 'int' increment for Cost.
addCost(int64_t Inc,int64_t UpperBound=INT_MAX)486 void addCost(int64_t Inc, int64_t UpperBound = INT_MAX) {
487 assert(UpperBound > 0 && UpperBound <= INT_MAX && "invalid upper bound");
488 Cost = (int)std::min(UpperBound, Cost + Inc);
489 }
490
onDisableSROA(AllocaInst * Arg)491 void onDisableSROA(AllocaInst *Arg) override {
492 auto CostIt = SROAArgCosts.find(Arg);
493 if (CostIt == SROAArgCosts.end())
494 return;
495 addCost(CostIt->second);
496 SROACostSavings -= CostIt->second;
497 SROACostSavingsLost += CostIt->second;
498 SROAArgCosts.erase(CostIt);
499 }
500
onDisableLoadElimination()501 void onDisableLoadElimination() override {
502 addCost(LoadEliminationCost);
503 LoadEliminationCost = 0;
504 }
onCallPenalty()505 void onCallPenalty() override { addCost(InlineConstants::CallPenalty); }
onCallArgumentSetup(const CallBase & Call)506 void onCallArgumentSetup(const CallBase &Call) override {
507 // Pay the price of the argument setup. We account for the average 1
508 // instruction per call argument setup here.
509 addCost(Call.arg_size() * InlineConstants::InstrCost);
510 }
onLoadRelativeIntrinsic()511 void onLoadRelativeIntrinsic() override {
512 // This is normally lowered to 4 LLVM instructions.
513 addCost(3 * InlineConstants::InstrCost);
514 }
onLoweredCall(Function * F,CallBase & Call,bool IsIndirectCall)515 void onLoweredCall(Function *F, CallBase &Call,
516 bool IsIndirectCall) override {
517 // We account for the average 1 instruction per call argument setup here.
518 addCost(Call.arg_size() * InlineConstants::InstrCost);
519
520 // If we have a constant that we are calling as a function, we can peer
521 // through it and see the function target. This happens not infrequently
522 // during devirtualization and so we want to give it a hefty bonus for
523 // inlining, but cap that bonus in the event that inlining wouldn't pan out.
524 // Pretend to inline the function, with a custom threshold.
525 if (IsIndirectCall && BoostIndirectCalls) {
526 auto IndirectCallParams = Params;
527 IndirectCallParams.DefaultThreshold =
528 InlineConstants::IndirectCallThreshold;
529 /// FIXME: if InlineCostCallAnalyzer is derived from, this may need
530 /// to instantiate the derived class.
531 InlineCostCallAnalyzer CA(*F, Call, IndirectCallParams, TTI,
532 GetAssumptionCache, GetBFI, PSI, ORE, false);
533 if (CA.analyze().isSuccess()) {
534 // We were able to inline the indirect call! Subtract the cost from the
535 // threshold to get the bonus we want to apply, but don't go below zero.
536 Cost -= std::max(0, CA.getThreshold() - CA.getCost());
537 }
538 } else
539 // Otherwise simply add the cost for merely making the call.
540 addCost(InlineConstants::CallPenalty);
541 }
542
onFinalizeSwitch(unsigned JumpTableSize,unsigned NumCaseCluster)543 void onFinalizeSwitch(unsigned JumpTableSize,
544 unsigned NumCaseCluster) override {
545 // If suitable for a jump table, consider the cost for the table size and
546 // branch to destination.
547 // Maximum valid cost increased in this function.
548 if (JumpTableSize) {
549 int64_t JTCost = (int64_t)JumpTableSize * InlineConstants::InstrCost +
550 4 * InlineConstants::InstrCost;
551
552 addCost(JTCost, (int64_t)CostUpperBound);
553 return;
554 }
555 // Considering forming a binary search, we should find the number of nodes
556 // which is same as the number of comparisons when lowered. For a given
557 // number of clusters, n, we can define a recursive function, f(n), to find
558 // the number of nodes in the tree. The recursion is :
559 // f(n) = 1 + f(n/2) + f (n - n/2), when n > 3,
560 // and f(n) = n, when n <= 3.
561 // This will lead a binary tree where the leaf should be either f(2) or f(3)
562 // when n > 3. So, the number of comparisons from leaves should be n, while
563 // the number of non-leaf should be :
564 // 2^(log2(n) - 1) - 1
565 // = 2^log2(n) * 2^-1 - 1
566 // = n / 2 - 1.
567 // Considering comparisons from leaf and non-leaf nodes, we can estimate the
568 // number of comparisons in a simple closed form :
569 // n + n / 2 - 1 = n * 3 / 2 - 1
570 if (NumCaseCluster <= 3) {
571 // Suppose a comparison includes one compare and one conditional branch.
572 addCost(NumCaseCluster * 2 * InlineConstants::InstrCost);
573 return;
574 }
575
576 int64_t ExpectedNumberOfCompare = 3 * (int64_t)NumCaseCluster / 2 - 1;
577 int64_t SwitchCost =
578 ExpectedNumberOfCompare * 2 * InlineConstants::InstrCost;
579
580 addCost(SwitchCost, (int64_t)CostUpperBound);
581 }
onMissedSimplification()582 void onMissedSimplification() override {
583 addCost(InlineConstants::InstrCost);
584 }
585
onInitializeSROAArg(AllocaInst * Arg)586 void onInitializeSROAArg(AllocaInst *Arg) override {
587 assert(Arg != nullptr &&
588 "Should not initialize SROA costs for null value.");
589 SROAArgCosts[Arg] = 0;
590 }
591
onAggregateSROAUse(AllocaInst * SROAArg)592 void onAggregateSROAUse(AllocaInst *SROAArg) override {
593 auto CostIt = SROAArgCosts.find(SROAArg);
594 assert(CostIt != SROAArgCosts.end() &&
595 "expected this argument to have a cost");
596 CostIt->second += InlineConstants::InstrCost;
597 SROACostSavings += InlineConstants::InstrCost;
598 }
599
onBlockAnalyzed(const BasicBlock * BB)600 void onBlockAnalyzed(const BasicBlock *BB) override {
601 auto *TI = BB->getTerminator();
602 // If we had any successors at this point, than post-inlining is likely to
603 // have them as well. Note that we assume any basic blocks which existed
604 // due to branches or switches which folded above will also fold after
605 // inlining.
606 if (SingleBB && TI->getNumSuccessors() > 1) {
607 // Take off the bonus we applied to the threshold.
608 Threshold -= SingleBBBonus;
609 SingleBB = false;
610 }
611 }
612
onInstructionAnalysisStart(const Instruction * I)613 void onInstructionAnalysisStart(const Instruction *I) override {
614 // This function is called to store the initial cost of inlining before
615 // the given instruction was assessed.
616 if (!PrintInstructionComments)
617 return;
618 InstructionCostDetailMap[I].CostBefore = Cost;
619 InstructionCostDetailMap[I].ThresholdBefore = Threshold;
620 }
621
onInstructionAnalysisFinish(const Instruction * I)622 void onInstructionAnalysisFinish(const Instruction *I) override {
623 // This function is called to find new values of cost and threshold after
624 // the instruction has been assessed.
625 if (!PrintInstructionComments)
626 return;
627 InstructionCostDetailMap[I].CostAfter = Cost;
628 InstructionCostDetailMap[I].ThresholdAfter = Threshold;
629 }
630
finalizeAnalysis()631 InlineResult finalizeAnalysis() override {
632 // Loops generally act a lot like calls in that they act like barriers to
633 // movement, require a certain amount of setup, etc. So when optimising for
634 // size, we penalise any call sites that perform loops. We do this after all
635 // other costs here, so will likely only be dealing with relatively small
636 // functions (and hence DT and LI will hopefully be cheap).
637 auto *Caller = CandidateCall.getFunction();
638 if (Caller->hasMinSize()) {
639 DominatorTree DT(F);
640 LoopInfo LI(DT);
641 int NumLoops = 0;
642 for (Loop *L : LI) {
643 // Ignore loops that will not be executed
644 if (DeadBlocks.count(L->getHeader()))
645 continue;
646 NumLoops++;
647 }
648 addCost(NumLoops * InlineConstants::CallPenalty);
649 }
650
651 // We applied the maximum possible vector bonus at the beginning. Now,
652 // subtract the excess bonus, if any, from the Threshold before
653 // comparing against Cost.
654 if (NumVectorInstructions <= NumInstructions / 10)
655 Threshold -= VectorBonus;
656 else if (NumVectorInstructions <= NumInstructions / 2)
657 Threshold -= VectorBonus / 2;
658
659 if (IgnoreThreshold || Cost < std::max(1, Threshold))
660 return InlineResult::success();
661 return InlineResult::failure("Cost over threshold.");
662 }
shouldStop()663 bool shouldStop() override {
664 // Bail out the moment we cross the threshold. This means we'll under-count
665 // the cost, but only when undercounting doesn't matter.
666 return !IgnoreThreshold && Cost >= Threshold && !ComputeFullInlineCost;
667 }
668
onLoadEliminationOpportunity()669 void onLoadEliminationOpportunity() override {
670 LoadEliminationCost += InlineConstants::InstrCost;
671 }
672
onAnalysisStart()673 InlineResult onAnalysisStart() override {
674 // Perform some tweaks to the cost and threshold based on the direct
675 // callsite information.
676
677 // We want to more aggressively inline vector-dense kernels, so up the
678 // threshold, and we'll lower it if the % of vector instructions gets too
679 // low. Note that these bonuses are some what arbitrary and evolved over
680 // time by accident as much as because they are principled bonuses.
681 //
682 // FIXME: It would be nice to remove all such bonuses. At least it would be
683 // nice to base the bonus values on something more scientific.
684 assert(NumInstructions == 0);
685 assert(NumVectorInstructions == 0);
686
687 // Update the threshold based on callsite properties
688 updateThreshold(CandidateCall, F);
689
690 // While Threshold depends on commandline options that can take negative
691 // values, we want to enforce the invariant that the computed threshold and
692 // bonuses are non-negative.
693 assert(Threshold >= 0);
694 assert(SingleBBBonus >= 0);
695 assert(VectorBonus >= 0);
696
697 // Speculatively apply all possible bonuses to Threshold. If cost exceeds
698 // this Threshold any time, and cost cannot decrease, we can stop processing
699 // the rest of the function body.
700 Threshold += (SingleBBBonus + VectorBonus);
701
702 // Give out bonuses for the callsite, as the instructions setting them up
703 // will be gone after inlining.
704 addCost(-getCallsiteCost(this->CandidateCall, DL));
705
706 // If this function uses the coldcc calling convention, prefer not to inline
707 // it.
708 if (F.getCallingConv() == CallingConv::Cold)
709 Cost += InlineConstants::ColdccPenalty;
710
711 // Check if we're done. This can happen due to bonuses and penalties.
712 if (Cost >= Threshold && !ComputeFullInlineCost)
713 return InlineResult::failure("high cost");
714
715 return InlineResult::success();
716 }
717
718 public:
InlineCostCallAnalyzer(Function & Callee,CallBase & Call,const InlineParams & Params,const TargetTransformInfo & TTI,function_ref<AssumptionCache & (Function &)> GetAssumptionCache,function_ref<BlockFrequencyInfo & (Function &)> GetBFI=nullptr,ProfileSummaryInfo * PSI=nullptr,OptimizationRemarkEmitter * ORE=nullptr,bool BoostIndirect=true,bool IgnoreThreshold=false)719 InlineCostCallAnalyzer(
720 Function &Callee, CallBase &Call, const InlineParams &Params,
721 const TargetTransformInfo &TTI,
722 function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
723 function_ref<BlockFrequencyInfo &(Function &)> GetBFI = nullptr,
724 ProfileSummaryInfo *PSI = nullptr,
725 OptimizationRemarkEmitter *ORE = nullptr, bool BoostIndirect = true,
726 bool IgnoreThreshold = false)
727 : CallAnalyzer(Callee, Call, TTI, GetAssumptionCache, GetBFI, PSI, ORE),
728 ComputeFullInlineCost(OptComputeFullInlineCost ||
729 Params.ComputeFullInlineCost || ORE),
730 Params(Params), Threshold(Params.DefaultThreshold),
731 BoostIndirectCalls(BoostIndirect), IgnoreThreshold(IgnoreThreshold),
732 Writer(this) {}
733
734 /// Annotation Writer for instruction details
735 InlineCostAnnotationWriter Writer;
736
737 void dump();
738
739 // Prints the same analysis as dump(), but its definition is not dependent
740 // on the build.
741 void print();
742
getCostDetails(const Instruction * I)743 Optional<InstructionCostDetail> getCostDetails(const Instruction *I) {
744 if (InstructionCostDetailMap.find(I) != InstructionCostDetailMap.end())
745 return InstructionCostDetailMap[I];
746 return None;
747 }
748
~InlineCostCallAnalyzer()749 virtual ~InlineCostCallAnalyzer() {}
getThreshold()750 int getThreshold() { return Threshold; }
getCost()751 int getCost() { return Cost; }
752 };
753 } // namespace
754
755 /// Test whether the given value is an Alloca-derived function argument.
isAllocaDerivedArg(Value * V)756 bool CallAnalyzer::isAllocaDerivedArg(Value *V) {
757 return SROAArgValues.count(V);
758 }
759
disableSROAForArg(AllocaInst * SROAArg)760 void CallAnalyzer::disableSROAForArg(AllocaInst *SROAArg) {
761 onDisableSROA(SROAArg);
762 EnabledSROAAllocas.erase(SROAArg);
763 disableLoadElimination();
764 }
765
emitInstructionAnnot(const Instruction * I,formatted_raw_ostream & OS)766 void InlineCostAnnotationWriter::emitInstructionAnnot(const Instruction *I,
767 formatted_raw_ostream &OS) {
768 // The cost of inlining of the given instruction is printed always.
769 // The threshold delta is printed only when it is non-zero. It happens
770 // when we decided to give a bonus at a particular instruction.
771 Optional<InstructionCostDetail> Record = ICCA->getCostDetails(I);
772 if (!Record)
773 OS << "; No analysis for the instruction";
774 else {
775 OS << "; cost before = " << Record->CostBefore
776 << ", cost after = " << Record->CostAfter
777 << ", threshold before = " << Record->ThresholdBefore
778 << ", threshold after = " << Record->ThresholdAfter << ", ";
779 OS << "cost delta = " << Record->getCostDelta();
780 if (Record->hasThresholdChanged())
781 OS << ", threshold delta = " << Record->getThresholdDelta();
782 }
783 auto C = ICCA->getSimplifiedValue(const_cast<Instruction *>(I));
784 if (C) {
785 OS << ", simplified to ";
786 C.getValue()->print(OS, true);
787 }
788 OS << "\n";
789 }
790
791 /// If 'V' maps to a SROA candidate, disable SROA for it.
disableSROA(Value * V)792 void CallAnalyzer::disableSROA(Value *V) {
793 if (auto *SROAArg = getSROAArgForValueOrNull(V)) {
794 disableSROAForArg(SROAArg);
795 }
796 }
797
disableLoadElimination()798 void CallAnalyzer::disableLoadElimination() {
799 if (EnableLoadElimination) {
800 onDisableLoadElimination();
801 EnableLoadElimination = false;
802 }
803 }
804
805 /// Accumulate a constant GEP offset into an APInt if possible.
806 ///
807 /// Returns false if unable to compute the offset for any reason. Respects any
808 /// simplified values known during the analysis of this callsite.
accumulateGEPOffset(GEPOperator & GEP,APInt & Offset)809 bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) {
810 unsigned IntPtrWidth = DL.getIndexTypeSizeInBits(GEP.getType());
811 assert(IntPtrWidth == Offset.getBitWidth());
812
813 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
814 GTI != GTE; ++GTI) {
815 ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
816 if (!OpC)
817 if (Constant *SimpleOp = SimplifiedValues.lookup(GTI.getOperand()))
818 OpC = dyn_cast<ConstantInt>(SimpleOp);
819 if (!OpC)
820 return false;
821 if (OpC->isZero())
822 continue;
823
824 // Handle a struct index, which adds its field offset to the pointer.
825 if (StructType *STy = GTI.getStructTypeOrNull()) {
826 unsigned ElementIdx = OpC->getZExtValue();
827 const StructLayout *SL = DL.getStructLayout(STy);
828 Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx));
829 continue;
830 }
831
832 APInt TypeSize(IntPtrWidth, DL.getTypeAllocSize(GTI.getIndexedType()));
833 Offset += OpC->getValue().sextOrTrunc(IntPtrWidth) * TypeSize;
834 }
835 return true;
836 }
837
838 /// Use TTI to check whether a GEP is free.
839 ///
840 /// Respects any simplified values known during the analysis of this callsite.
isGEPFree(GetElementPtrInst & GEP)841 bool CallAnalyzer::isGEPFree(GetElementPtrInst &GEP) {
842 SmallVector<Value *, 4> Operands;
843 Operands.push_back(GEP.getOperand(0));
844 for (const Use &Op : GEP.indices())
845 if (Constant *SimpleOp = SimplifiedValues.lookup(Op))
846 Operands.push_back(SimpleOp);
847 else
848 Operands.push_back(Op);
849 return TargetTransformInfo::TCC_Free ==
850 TTI.getUserCost(&GEP, Operands,
851 TargetTransformInfo::TCK_SizeAndLatency);
852 }
853
visitAlloca(AllocaInst & I)854 bool CallAnalyzer::visitAlloca(AllocaInst &I) {
855 // Check whether inlining will turn a dynamic alloca into a static
856 // alloca and handle that case.
857 if (I.isArrayAllocation()) {
858 Constant *Size = SimplifiedValues.lookup(I.getArraySize());
859 if (auto *AllocSize = dyn_cast_or_null<ConstantInt>(Size)) {
860 // Sometimes a dynamic alloca could be converted into a static alloca
861 // after this constant prop, and become a huge static alloca on an
862 // unconditional CFG path. Avoid inlining if this is going to happen above
863 // a threshold.
864 // FIXME: If the threshold is removed or lowered too much, we could end up
865 // being too pessimistic and prevent inlining non-problematic code. This
866 // could result in unintended perf regressions. A better overall strategy
867 // is needed to track stack usage during inlining.
868 Type *Ty = I.getAllocatedType();
869 AllocatedSize = SaturatingMultiplyAdd(
870 AllocSize->getLimitedValue(), DL.getTypeAllocSize(Ty).getKnownMinSize(),
871 AllocatedSize);
872 if (AllocatedSize > InlineConstants::MaxSimplifiedDynamicAllocaToInline) {
873 HasDynamicAlloca = true;
874 return false;
875 }
876 return Base::visitAlloca(I);
877 }
878 }
879
880 // Accumulate the allocated size.
881 if (I.isStaticAlloca()) {
882 Type *Ty = I.getAllocatedType();
883 AllocatedSize =
884 SaturatingAdd(DL.getTypeAllocSize(Ty).getKnownMinSize(), AllocatedSize);
885 }
886
887 // We will happily inline static alloca instructions.
888 if (I.isStaticAlloca())
889 return Base::visitAlloca(I);
890
891 // FIXME: This is overly conservative. Dynamic allocas are inefficient for
892 // a variety of reasons, and so we would like to not inline them into
893 // functions which don't currently have a dynamic alloca. This simply
894 // disables inlining altogether in the presence of a dynamic alloca.
895 HasDynamicAlloca = true;
896 return false;
897 }
898
visitPHI(PHINode & I)899 bool CallAnalyzer::visitPHI(PHINode &I) {
900 // FIXME: We need to propagate SROA *disabling* through phi nodes, even
901 // though we don't want to propagate it's bonuses. The idea is to disable
902 // SROA if it *might* be used in an inappropriate manner.
903
904 // Phi nodes are always zero-cost.
905 // FIXME: Pointer sizes may differ between different address spaces, so do we
906 // need to use correct address space in the call to getPointerSizeInBits here?
907 // Or could we skip the getPointerSizeInBits call completely? As far as I can
908 // see the ZeroOffset is used as a dummy value, so we can probably use any
909 // bit width for the ZeroOffset?
910 APInt ZeroOffset = APInt::getNullValue(DL.getPointerSizeInBits(0));
911 bool CheckSROA = I.getType()->isPointerTy();
912
913 // Track the constant or pointer with constant offset we've seen so far.
914 Constant *FirstC = nullptr;
915 std::pair<Value *, APInt> FirstBaseAndOffset = {nullptr, ZeroOffset};
916 Value *FirstV = nullptr;
917
918 for (unsigned i = 0, e = I.getNumIncomingValues(); i != e; ++i) {
919 BasicBlock *Pred = I.getIncomingBlock(i);
920 // If the incoming block is dead, skip the incoming block.
921 if (DeadBlocks.count(Pred))
922 continue;
923 // If the parent block of phi is not the known successor of the incoming
924 // block, skip the incoming block.
925 BasicBlock *KnownSuccessor = KnownSuccessors[Pred];
926 if (KnownSuccessor && KnownSuccessor != I.getParent())
927 continue;
928
929 Value *V = I.getIncomingValue(i);
930 // If the incoming value is this phi itself, skip the incoming value.
931 if (&I == V)
932 continue;
933
934 Constant *C = dyn_cast<Constant>(V);
935 if (!C)
936 C = SimplifiedValues.lookup(V);
937
938 std::pair<Value *, APInt> BaseAndOffset = {nullptr, ZeroOffset};
939 if (!C && CheckSROA)
940 BaseAndOffset = ConstantOffsetPtrs.lookup(V);
941
942 if (!C && !BaseAndOffset.first)
943 // The incoming value is neither a constant nor a pointer with constant
944 // offset, exit early.
945 return true;
946
947 if (FirstC) {
948 if (FirstC == C)
949 // If we've seen a constant incoming value before and it is the same
950 // constant we see this time, continue checking the next incoming value.
951 continue;
952 // Otherwise early exit because we either see a different constant or saw
953 // a constant before but we have a pointer with constant offset this time.
954 return true;
955 }
956
957 if (FirstV) {
958 // The same logic as above, but check pointer with constant offset here.
959 if (FirstBaseAndOffset == BaseAndOffset)
960 continue;
961 return true;
962 }
963
964 if (C) {
965 // This is the 1st time we've seen a constant, record it.
966 FirstC = C;
967 continue;
968 }
969
970 // The remaining case is that this is the 1st time we've seen a pointer with
971 // constant offset, record it.
972 FirstV = V;
973 FirstBaseAndOffset = BaseAndOffset;
974 }
975
976 // Check if we can map phi to a constant.
977 if (FirstC) {
978 SimplifiedValues[&I] = FirstC;
979 return true;
980 }
981
982 // Check if we can map phi to a pointer with constant offset.
983 if (FirstBaseAndOffset.first) {
984 ConstantOffsetPtrs[&I] = FirstBaseAndOffset;
985
986 if (auto *SROAArg = getSROAArgForValueOrNull(FirstV))
987 SROAArgValues[&I] = SROAArg;
988 }
989
990 return true;
991 }
992
993 /// Check we can fold GEPs of constant-offset call site argument pointers.
994 /// This requires target data and inbounds GEPs.
995 ///
996 /// \return true if the specified GEP can be folded.
canFoldInboundsGEP(GetElementPtrInst & I)997 bool CallAnalyzer::canFoldInboundsGEP(GetElementPtrInst &I) {
998 // Check if we have a base + offset for the pointer.
999 std::pair<Value *, APInt> BaseAndOffset =
1000 ConstantOffsetPtrs.lookup(I.getPointerOperand());
1001 if (!BaseAndOffset.first)
1002 return false;
1003
1004 // Check if the offset of this GEP is constant, and if so accumulate it
1005 // into Offset.
1006 if (!accumulateGEPOffset(cast<GEPOperator>(I), BaseAndOffset.second))
1007 return false;
1008
1009 // Add the result as a new mapping to Base + Offset.
1010 ConstantOffsetPtrs[&I] = BaseAndOffset;
1011
1012 return true;
1013 }
1014
visitGetElementPtr(GetElementPtrInst & I)1015 bool CallAnalyzer::visitGetElementPtr(GetElementPtrInst &I) {
1016 auto *SROAArg = getSROAArgForValueOrNull(I.getPointerOperand());
1017
1018 // Lambda to check whether a GEP's indices are all constant.
1019 auto IsGEPOffsetConstant = [&](GetElementPtrInst &GEP) {
1020 for (const Use &Op : GEP.indices())
1021 if (!isa<Constant>(Op) && !SimplifiedValues.lookup(Op))
1022 return false;
1023 return true;
1024 };
1025
1026 if (!DisableGEPConstOperand)
1027 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1028 SmallVector<Constant *, 2> Indices;
1029 for (unsigned int Index = 1 ; Index < COps.size() ; ++Index)
1030 Indices.push_back(COps[Index]);
1031 return ConstantExpr::getGetElementPtr(I.getSourceElementType(), COps[0],
1032 Indices, I.isInBounds());
1033 }))
1034 return true;
1035
1036 if ((I.isInBounds() && canFoldInboundsGEP(I)) || IsGEPOffsetConstant(I)) {
1037 if (SROAArg)
1038 SROAArgValues[&I] = SROAArg;
1039
1040 // Constant GEPs are modeled as free.
1041 return true;
1042 }
1043
1044 // Variable GEPs will require math and will disable SROA.
1045 if (SROAArg)
1046 disableSROAForArg(SROAArg);
1047 return isGEPFree(I);
1048 }
1049
1050 /// Simplify \p I if its operands are constants and update SimplifiedValues.
1051 /// \p Evaluate is a callable specific to instruction type that evaluates the
1052 /// instruction when all the operands are constants.
1053 template <typename Callable>
simplifyInstruction(Instruction & I,Callable Evaluate)1054 bool CallAnalyzer::simplifyInstruction(Instruction &I, Callable Evaluate) {
1055 SmallVector<Constant *, 2> COps;
1056 for (Value *Op : I.operands()) {
1057 Constant *COp = dyn_cast<Constant>(Op);
1058 if (!COp)
1059 COp = SimplifiedValues.lookup(Op);
1060 if (!COp)
1061 return false;
1062 COps.push_back(COp);
1063 }
1064 auto *C = Evaluate(COps);
1065 if (!C)
1066 return false;
1067 SimplifiedValues[&I] = C;
1068 return true;
1069 }
1070
visitBitCast(BitCastInst & I)1071 bool CallAnalyzer::visitBitCast(BitCastInst &I) {
1072 // Propagate constants through bitcasts.
1073 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1074 return ConstantExpr::getBitCast(COps[0], I.getType());
1075 }))
1076 return true;
1077
1078 // Track base/offsets through casts
1079 std::pair<Value *, APInt> BaseAndOffset =
1080 ConstantOffsetPtrs.lookup(I.getOperand(0));
1081 // Casts don't change the offset, just wrap it up.
1082 if (BaseAndOffset.first)
1083 ConstantOffsetPtrs[&I] = BaseAndOffset;
1084
1085 // Also look for SROA candidates here.
1086 if (auto *SROAArg = getSROAArgForValueOrNull(I.getOperand(0)))
1087 SROAArgValues[&I] = SROAArg;
1088
1089 // Bitcasts are always zero cost.
1090 return true;
1091 }
1092
visitPtrToInt(PtrToIntInst & I)1093 bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) {
1094 // Propagate constants through ptrtoint.
1095 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1096 return ConstantExpr::getPtrToInt(COps[0], I.getType());
1097 }))
1098 return true;
1099
1100 // Track base/offset pairs when converted to a plain integer provided the
1101 // integer is large enough to represent the pointer.
1102 unsigned IntegerSize = I.getType()->getScalarSizeInBits();
1103 unsigned AS = I.getOperand(0)->getType()->getPointerAddressSpace();
1104 if (IntegerSize == DL.getPointerSizeInBits(AS)) {
1105 std::pair<Value *, APInt> BaseAndOffset =
1106 ConstantOffsetPtrs.lookup(I.getOperand(0));
1107 if (BaseAndOffset.first)
1108 ConstantOffsetPtrs[&I] = BaseAndOffset;
1109 }
1110
1111 // This is really weird. Technically, ptrtoint will disable SROA. However,
1112 // unless that ptrtoint is *used* somewhere in the live basic blocks after
1113 // inlining, it will be nuked, and SROA should proceed. All of the uses which
1114 // would block SROA would also block SROA if applied directly to a pointer,
1115 // and so we can just add the integer in here. The only places where SROA is
1116 // preserved either cannot fire on an integer, or won't in-and-of themselves
1117 // disable SROA (ext) w/o some later use that we would see and disable.
1118 if (auto *SROAArg = getSROAArgForValueOrNull(I.getOperand(0)))
1119 SROAArgValues[&I] = SROAArg;
1120
1121 return TargetTransformInfo::TCC_Free ==
1122 TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency);
1123 }
1124
visitIntToPtr(IntToPtrInst & I)1125 bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) {
1126 // Propagate constants through ptrtoint.
1127 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1128 return ConstantExpr::getIntToPtr(COps[0], I.getType());
1129 }))
1130 return true;
1131
1132 // Track base/offset pairs when round-tripped through a pointer without
1133 // modifications provided the integer is not too large.
1134 Value *Op = I.getOperand(0);
1135 unsigned IntegerSize = Op->getType()->getScalarSizeInBits();
1136 if (IntegerSize <= DL.getPointerTypeSizeInBits(I.getType())) {
1137 std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Op);
1138 if (BaseAndOffset.first)
1139 ConstantOffsetPtrs[&I] = BaseAndOffset;
1140 }
1141
1142 // "Propagate" SROA here in the same manner as we do for ptrtoint above.
1143 if (auto *SROAArg = getSROAArgForValueOrNull(Op))
1144 SROAArgValues[&I] = SROAArg;
1145
1146 return TargetTransformInfo::TCC_Free ==
1147 TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency);
1148 }
1149
visitCastInst(CastInst & I)1150 bool CallAnalyzer::visitCastInst(CastInst &I) {
1151 // Propagate constants through casts.
1152 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1153 return ConstantExpr::getCast(I.getOpcode(), COps[0], I.getType());
1154 }))
1155 return true;
1156
1157 // Disable SROA in the face of arbitrary casts we don't explicitly list
1158 // elsewhere.
1159 disableSROA(I.getOperand(0));
1160
1161 // If this is a floating-point cast, and the target says this operation
1162 // is expensive, this may eventually become a library call. Treat the cost
1163 // as such.
1164 switch (I.getOpcode()) {
1165 case Instruction::FPTrunc:
1166 case Instruction::FPExt:
1167 case Instruction::UIToFP:
1168 case Instruction::SIToFP:
1169 case Instruction::FPToUI:
1170 case Instruction::FPToSI:
1171 if (TTI.getFPOpCost(I.getType()) == TargetTransformInfo::TCC_Expensive)
1172 onCallPenalty();
1173 break;
1174 default:
1175 break;
1176 }
1177
1178 return TargetTransformInfo::TCC_Free ==
1179 TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency);
1180 }
1181
visitUnaryInstruction(UnaryInstruction & I)1182 bool CallAnalyzer::visitUnaryInstruction(UnaryInstruction &I) {
1183 Value *Operand = I.getOperand(0);
1184 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1185 return ConstantFoldInstOperands(&I, COps[0], DL);
1186 }))
1187 return true;
1188
1189 // Disable any SROA on the argument to arbitrary unary instructions.
1190 disableSROA(Operand);
1191
1192 return false;
1193 }
1194
paramHasAttr(Argument * A,Attribute::AttrKind Attr)1195 bool CallAnalyzer::paramHasAttr(Argument *A, Attribute::AttrKind Attr) {
1196 return CandidateCall.paramHasAttr(A->getArgNo(), Attr);
1197 }
1198
isKnownNonNullInCallee(Value * V)1199 bool CallAnalyzer::isKnownNonNullInCallee(Value *V) {
1200 // Does the *call site* have the NonNull attribute set on an argument? We
1201 // use the attribute on the call site to memoize any analysis done in the
1202 // caller. This will also trip if the callee function has a non-null
1203 // parameter attribute, but that's a less interesting case because hopefully
1204 // the callee would already have been simplified based on that.
1205 if (Argument *A = dyn_cast<Argument>(V))
1206 if (paramHasAttr(A, Attribute::NonNull))
1207 return true;
1208
1209 // Is this an alloca in the caller? This is distinct from the attribute case
1210 // above because attributes aren't updated within the inliner itself and we
1211 // always want to catch the alloca derived case.
1212 if (isAllocaDerivedArg(V))
1213 // We can actually predict the result of comparisons between an
1214 // alloca-derived value and null. Note that this fires regardless of
1215 // SROA firing.
1216 return true;
1217
1218 return false;
1219 }
1220
allowSizeGrowth(CallBase & Call)1221 bool CallAnalyzer::allowSizeGrowth(CallBase &Call) {
1222 // If the normal destination of the invoke or the parent block of the call
1223 // site is unreachable-terminated, there is little point in inlining this
1224 // unless there is literally zero cost.
1225 // FIXME: Note that it is possible that an unreachable-terminated block has a
1226 // hot entry. For example, in below scenario inlining hot_call_X() may be
1227 // beneficial :
1228 // main() {
1229 // hot_call_1();
1230 // ...
1231 // hot_call_N()
1232 // exit(0);
1233 // }
1234 // For now, we are not handling this corner case here as it is rare in real
1235 // code. In future, we should elaborate this based on BPI and BFI in more
1236 // general threshold adjusting heuristics in updateThreshold().
1237 if (InvokeInst *II = dyn_cast<InvokeInst>(&Call)) {
1238 if (isa<UnreachableInst>(II->getNormalDest()->getTerminator()))
1239 return false;
1240 } else if (isa<UnreachableInst>(Call.getParent()->getTerminator()))
1241 return false;
1242
1243 return true;
1244 }
1245
isColdCallSite(CallBase & Call,BlockFrequencyInfo * CallerBFI)1246 bool InlineCostCallAnalyzer::isColdCallSite(CallBase &Call,
1247 BlockFrequencyInfo *CallerBFI) {
1248 // If global profile summary is available, then callsite's coldness is
1249 // determined based on that.
1250 if (PSI && PSI->hasProfileSummary())
1251 return PSI->isColdCallSite(Call, CallerBFI);
1252
1253 // Otherwise we need BFI to be available.
1254 if (!CallerBFI)
1255 return false;
1256
1257 // Determine if the callsite is cold relative to caller's entry. We could
1258 // potentially cache the computation of scaled entry frequency, but the added
1259 // complexity is not worth it unless this scaling shows up high in the
1260 // profiles.
1261 const BranchProbability ColdProb(ColdCallSiteRelFreq, 100);
1262 auto CallSiteBB = Call.getParent();
1263 auto CallSiteFreq = CallerBFI->getBlockFreq(CallSiteBB);
1264 auto CallerEntryFreq =
1265 CallerBFI->getBlockFreq(&(Call.getCaller()->getEntryBlock()));
1266 return CallSiteFreq < CallerEntryFreq * ColdProb;
1267 }
1268
1269 Optional<int>
getHotCallSiteThreshold(CallBase & Call,BlockFrequencyInfo * CallerBFI)1270 InlineCostCallAnalyzer::getHotCallSiteThreshold(CallBase &Call,
1271 BlockFrequencyInfo *CallerBFI) {
1272
1273 // If global profile summary is available, then callsite's hotness is
1274 // determined based on that.
1275 if (PSI && PSI->hasProfileSummary() && PSI->isHotCallSite(Call, CallerBFI))
1276 return Params.HotCallSiteThreshold;
1277
1278 // Otherwise we need BFI to be available and to have a locally hot callsite
1279 // threshold.
1280 if (!CallerBFI || !Params.LocallyHotCallSiteThreshold)
1281 return None;
1282
1283 // Determine if the callsite is hot relative to caller's entry. We could
1284 // potentially cache the computation of scaled entry frequency, but the added
1285 // complexity is not worth it unless this scaling shows up high in the
1286 // profiles.
1287 auto CallSiteBB = Call.getParent();
1288 auto CallSiteFreq = CallerBFI->getBlockFreq(CallSiteBB).getFrequency();
1289 auto CallerEntryFreq = CallerBFI->getEntryFreq();
1290 if (CallSiteFreq >= CallerEntryFreq * HotCallSiteRelFreq)
1291 return Params.LocallyHotCallSiteThreshold;
1292
1293 // Otherwise treat it normally.
1294 return None;
1295 }
1296
updateThreshold(CallBase & Call,Function & Callee)1297 void InlineCostCallAnalyzer::updateThreshold(CallBase &Call, Function &Callee) {
1298 // If no size growth is allowed for this inlining, set Threshold to 0.
1299 if (!allowSizeGrowth(Call)) {
1300 Threshold = 0;
1301 return;
1302 }
1303
1304 Function *Caller = Call.getCaller();
1305
1306 // return min(A, B) if B is valid.
1307 auto MinIfValid = [](int A, Optional<int> B) {
1308 return B ? std::min(A, B.getValue()) : A;
1309 };
1310
1311 // return max(A, B) if B is valid.
1312 auto MaxIfValid = [](int A, Optional<int> B) {
1313 return B ? std::max(A, B.getValue()) : A;
1314 };
1315
1316 // Various bonus percentages. These are multiplied by Threshold to get the
1317 // bonus values.
1318 // SingleBBBonus: This bonus is applied if the callee has a single reachable
1319 // basic block at the given callsite context. This is speculatively applied
1320 // and withdrawn if more than one basic block is seen.
1321 //
1322 // LstCallToStaticBonus: This large bonus is applied to ensure the inlining
1323 // of the last call to a static function as inlining such functions is
1324 // guaranteed to reduce code size.
1325 //
1326 // These bonus percentages may be set to 0 based on properties of the caller
1327 // and the callsite.
1328 int SingleBBBonusPercent = 50;
1329 int VectorBonusPercent = TTI.getInlinerVectorBonusPercent();
1330 int LastCallToStaticBonus = InlineConstants::LastCallToStaticBonus;
1331
1332 // Lambda to set all the above bonus and bonus percentages to 0.
1333 auto DisallowAllBonuses = [&]() {
1334 SingleBBBonusPercent = 0;
1335 VectorBonusPercent = 0;
1336 LastCallToStaticBonus = 0;
1337 };
1338
1339 // Use the OptMinSizeThreshold or OptSizeThreshold knob if they are available
1340 // and reduce the threshold if the caller has the necessary attribute.
1341 if (Caller->hasMinSize()) {
1342 Threshold = MinIfValid(Threshold, Params.OptMinSizeThreshold);
1343 // For minsize, we want to disable the single BB bonus and the vector
1344 // bonuses, but not the last-call-to-static bonus. Inlining the last call to
1345 // a static function will, at the minimum, eliminate the parameter setup and
1346 // call/return instructions.
1347 SingleBBBonusPercent = 0;
1348 VectorBonusPercent = 0;
1349 } else if (Caller->hasOptSize())
1350 Threshold = MinIfValid(Threshold, Params.OptSizeThreshold);
1351
1352 // Adjust the threshold based on inlinehint attribute and profile based
1353 // hotness information if the caller does not have MinSize attribute.
1354 if (!Caller->hasMinSize()) {
1355 if (Callee.hasFnAttribute(Attribute::InlineHint))
1356 Threshold = MaxIfValid(Threshold, Params.HintThreshold);
1357
1358 // FIXME: After switching to the new passmanager, simplify the logic below
1359 // by checking only the callsite hotness/coldness as we will reliably
1360 // have local profile information.
1361 //
1362 // Callsite hotness and coldness can be determined if sample profile is
1363 // used (which adds hotness metadata to calls) or if caller's
1364 // BlockFrequencyInfo is available.
1365 BlockFrequencyInfo *CallerBFI = GetBFI ? &(GetBFI(*Caller)) : nullptr;
1366 auto HotCallSiteThreshold = getHotCallSiteThreshold(Call, CallerBFI);
1367 if (!Caller->hasOptSize() && HotCallSiteThreshold) {
1368 LLVM_DEBUG(dbgs() << "Hot callsite.\n");
1369 // FIXME: This should update the threshold only if it exceeds the
1370 // current threshold, but AutoFDO + ThinLTO currently relies on this
1371 // behavior to prevent inlining of hot callsites during ThinLTO
1372 // compile phase.
1373 Threshold = HotCallSiteThreshold.getValue();
1374 } else if (isColdCallSite(Call, CallerBFI)) {
1375 LLVM_DEBUG(dbgs() << "Cold callsite.\n");
1376 // Do not apply bonuses for a cold callsite including the
1377 // LastCallToStatic bonus. While this bonus might result in code size
1378 // reduction, it can cause the size of a non-cold caller to increase
1379 // preventing it from being inlined.
1380 DisallowAllBonuses();
1381 Threshold = MinIfValid(Threshold, Params.ColdCallSiteThreshold);
1382 } else if (PSI) {
1383 // Use callee's global profile information only if we have no way of
1384 // determining this via callsite information.
1385 if (PSI->isFunctionEntryHot(&Callee)) {
1386 LLVM_DEBUG(dbgs() << "Hot callee.\n");
1387 // If callsite hotness can not be determined, we may still know
1388 // that the callee is hot and treat it as a weaker hint for threshold
1389 // increase.
1390 Threshold = MaxIfValid(Threshold, Params.HintThreshold);
1391 } else if (PSI->isFunctionEntryCold(&Callee)) {
1392 LLVM_DEBUG(dbgs() << "Cold callee.\n");
1393 // Do not apply bonuses for a cold callee including the
1394 // LastCallToStatic bonus. While this bonus might result in code size
1395 // reduction, it can cause the size of a non-cold caller to increase
1396 // preventing it from being inlined.
1397 DisallowAllBonuses();
1398 Threshold = MinIfValid(Threshold, Params.ColdThreshold);
1399 }
1400 }
1401 }
1402
1403 // Finally, take the target-specific inlining threshold multiplier into
1404 // account.
1405 Threshold *= TTI.getInliningThresholdMultiplier();
1406
1407 SingleBBBonus = Threshold * SingleBBBonusPercent / 100;
1408 VectorBonus = Threshold * VectorBonusPercent / 100;
1409
1410 bool OnlyOneCallAndLocalLinkage =
1411 F.hasLocalLinkage() && F.hasOneUse() && &F == Call.getCalledFunction();
1412 // If there is only one call of the function, and it has internal linkage,
1413 // the cost of inlining it drops dramatically. It may seem odd to update
1414 // Cost in updateThreshold, but the bonus depends on the logic in this method.
1415 if (OnlyOneCallAndLocalLinkage)
1416 Cost -= LastCallToStaticBonus;
1417 }
1418
visitCmpInst(CmpInst & I)1419 bool CallAnalyzer::visitCmpInst(CmpInst &I) {
1420 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1421 // First try to handle simplified comparisons.
1422 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1423 return ConstantExpr::getCompare(I.getPredicate(), COps[0], COps[1]);
1424 }))
1425 return true;
1426
1427 if (I.getOpcode() == Instruction::FCmp)
1428 return false;
1429
1430 // Otherwise look for a comparison between constant offset pointers with
1431 // a common base.
1432 Value *LHSBase, *RHSBase;
1433 APInt LHSOffset, RHSOffset;
1434 std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
1435 if (LHSBase) {
1436 std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
1437 if (RHSBase && LHSBase == RHSBase) {
1438 // We have common bases, fold the icmp to a constant based on the
1439 // offsets.
1440 Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset);
1441 Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset);
1442 if (Constant *C = ConstantExpr::getICmp(I.getPredicate(), CLHS, CRHS)) {
1443 SimplifiedValues[&I] = C;
1444 ++NumConstantPtrCmps;
1445 return true;
1446 }
1447 }
1448 }
1449
1450 // If the comparison is an equality comparison with null, we can simplify it
1451 // if we know the value (argument) can't be null
1452 if (I.isEquality() && isa<ConstantPointerNull>(I.getOperand(1)) &&
1453 isKnownNonNullInCallee(I.getOperand(0))) {
1454 bool IsNotEqual = I.getPredicate() == CmpInst::ICMP_NE;
1455 SimplifiedValues[&I] = IsNotEqual ? ConstantInt::getTrue(I.getType())
1456 : ConstantInt::getFalse(I.getType());
1457 return true;
1458 }
1459 return handleSROA(I.getOperand(0), isa<ConstantPointerNull>(I.getOperand(1)));
1460 }
1461
visitSub(BinaryOperator & I)1462 bool CallAnalyzer::visitSub(BinaryOperator &I) {
1463 // Try to handle a special case: we can fold computing the difference of two
1464 // constant-related pointers.
1465 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1466 Value *LHSBase, *RHSBase;
1467 APInt LHSOffset, RHSOffset;
1468 std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
1469 if (LHSBase) {
1470 std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
1471 if (RHSBase && LHSBase == RHSBase) {
1472 // We have common bases, fold the subtract to a constant based on the
1473 // offsets.
1474 Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset);
1475 Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset);
1476 if (Constant *C = ConstantExpr::getSub(CLHS, CRHS)) {
1477 SimplifiedValues[&I] = C;
1478 ++NumConstantPtrDiffs;
1479 return true;
1480 }
1481 }
1482 }
1483
1484 // Otherwise, fall back to the generic logic for simplifying and handling
1485 // instructions.
1486 return Base::visitSub(I);
1487 }
1488
visitBinaryOperator(BinaryOperator & I)1489 bool CallAnalyzer::visitBinaryOperator(BinaryOperator &I) {
1490 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1491 Constant *CLHS = dyn_cast<Constant>(LHS);
1492 if (!CLHS)
1493 CLHS = SimplifiedValues.lookup(LHS);
1494 Constant *CRHS = dyn_cast<Constant>(RHS);
1495 if (!CRHS)
1496 CRHS = SimplifiedValues.lookup(RHS);
1497
1498 Value *SimpleV = nullptr;
1499 if (auto FI = dyn_cast<FPMathOperator>(&I))
1500 SimpleV = SimplifyBinOp(I.getOpcode(), CLHS ? CLHS : LHS, CRHS ? CRHS : RHS,
1501 FI->getFastMathFlags(), DL);
1502 else
1503 SimpleV =
1504 SimplifyBinOp(I.getOpcode(), CLHS ? CLHS : LHS, CRHS ? CRHS : RHS, DL);
1505
1506 if (Constant *C = dyn_cast_or_null<Constant>(SimpleV))
1507 SimplifiedValues[&I] = C;
1508
1509 if (SimpleV)
1510 return true;
1511
1512 // Disable any SROA on arguments to arbitrary, unsimplified binary operators.
1513 disableSROA(LHS);
1514 disableSROA(RHS);
1515
1516 // If the instruction is floating point, and the target says this operation
1517 // is expensive, this may eventually become a library call. Treat the cost
1518 // as such. Unless it's fneg which can be implemented with an xor.
1519 using namespace llvm::PatternMatch;
1520 if (I.getType()->isFloatingPointTy() &&
1521 TTI.getFPOpCost(I.getType()) == TargetTransformInfo::TCC_Expensive &&
1522 !match(&I, m_FNeg(m_Value())))
1523 onCallPenalty();
1524
1525 return false;
1526 }
1527
visitFNeg(UnaryOperator & I)1528 bool CallAnalyzer::visitFNeg(UnaryOperator &I) {
1529 Value *Op = I.getOperand(0);
1530 Constant *COp = dyn_cast<Constant>(Op);
1531 if (!COp)
1532 COp = SimplifiedValues.lookup(Op);
1533
1534 Value *SimpleV = SimplifyFNegInst(
1535 COp ? COp : Op, cast<FPMathOperator>(I).getFastMathFlags(), DL);
1536
1537 if (Constant *C = dyn_cast_or_null<Constant>(SimpleV))
1538 SimplifiedValues[&I] = C;
1539
1540 if (SimpleV)
1541 return true;
1542
1543 // Disable any SROA on arguments to arbitrary, unsimplified fneg.
1544 disableSROA(Op);
1545
1546 return false;
1547 }
1548
visitLoad(LoadInst & I)1549 bool CallAnalyzer::visitLoad(LoadInst &I) {
1550 if (handleSROA(I.getPointerOperand(), I.isSimple()))
1551 return true;
1552
1553 // If the data is already loaded from this address and hasn't been clobbered
1554 // by any stores or calls, this load is likely to be redundant and can be
1555 // eliminated.
1556 if (EnableLoadElimination &&
1557 !LoadAddrSet.insert(I.getPointerOperand()).second && I.isUnordered()) {
1558 onLoadEliminationOpportunity();
1559 return true;
1560 }
1561
1562 return false;
1563 }
1564
visitStore(StoreInst & I)1565 bool CallAnalyzer::visitStore(StoreInst &I) {
1566 if (handleSROA(I.getPointerOperand(), I.isSimple()))
1567 return true;
1568
1569 // The store can potentially clobber loads and prevent repeated loads from
1570 // being eliminated.
1571 // FIXME:
1572 // 1. We can probably keep an initial set of eliminatable loads substracted
1573 // from the cost even when we finally see a store. We just need to disable
1574 // *further* accumulation of elimination savings.
1575 // 2. We should probably at some point thread MemorySSA for the callee into
1576 // this and then use that to actually compute *really* precise savings.
1577 disableLoadElimination();
1578 return false;
1579 }
1580
visitExtractValue(ExtractValueInst & I)1581 bool CallAnalyzer::visitExtractValue(ExtractValueInst &I) {
1582 // Constant folding for extract value is trivial.
1583 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1584 return ConstantExpr::getExtractValue(COps[0], I.getIndices());
1585 }))
1586 return true;
1587
1588 // SROA can look through these but give them a cost.
1589 return false;
1590 }
1591
visitInsertValue(InsertValueInst & I)1592 bool CallAnalyzer::visitInsertValue(InsertValueInst &I) {
1593 // Constant folding for insert value is trivial.
1594 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1595 return ConstantExpr::getInsertValue(/*AggregateOperand*/ COps[0],
1596 /*InsertedValueOperand*/ COps[1],
1597 I.getIndices());
1598 }))
1599 return true;
1600
1601 // SROA can look through these but give them a cost.
1602 return false;
1603 }
1604
1605 /// Try to simplify a call site.
1606 ///
1607 /// Takes a concrete function and callsite and tries to actually simplify it by
1608 /// analyzing the arguments and call itself with instsimplify. Returns true if
1609 /// it has simplified the callsite to some other entity (a constant), making it
1610 /// free.
simplifyCallSite(Function * F,CallBase & Call)1611 bool CallAnalyzer::simplifyCallSite(Function *F, CallBase &Call) {
1612 // FIXME: Using the instsimplify logic directly for this is inefficient
1613 // because we have to continually rebuild the argument list even when no
1614 // simplifications can be performed. Until that is fixed with remapping
1615 // inside of instsimplify, directly constant fold calls here.
1616 if (!canConstantFoldCallTo(&Call, F))
1617 return false;
1618
1619 // Try to re-map the arguments to constants.
1620 SmallVector<Constant *, 4> ConstantArgs;
1621 ConstantArgs.reserve(Call.arg_size());
1622 for (Value *I : Call.args()) {
1623 Constant *C = dyn_cast<Constant>(I);
1624 if (!C)
1625 C = dyn_cast_or_null<Constant>(SimplifiedValues.lookup(I));
1626 if (!C)
1627 return false; // This argument doesn't map to a constant.
1628
1629 ConstantArgs.push_back(C);
1630 }
1631 if (Constant *C = ConstantFoldCall(&Call, F, ConstantArgs)) {
1632 SimplifiedValues[&Call] = C;
1633 return true;
1634 }
1635
1636 return false;
1637 }
1638
visitCallBase(CallBase & Call)1639 bool CallAnalyzer::visitCallBase(CallBase &Call) {
1640 if (Call.hasFnAttr(Attribute::ReturnsTwice) &&
1641 !F.hasFnAttribute(Attribute::ReturnsTwice)) {
1642 // This aborts the entire analysis.
1643 ExposesReturnsTwice = true;
1644 return false;
1645 }
1646 if (isa<CallInst>(Call) && cast<CallInst>(Call).cannotDuplicate())
1647 ContainsNoDuplicateCall = true;
1648
1649 Value *Callee = Call.getCalledOperand();
1650 Function *F = dyn_cast_or_null<Function>(Callee);
1651 bool IsIndirectCall = !F;
1652 if (IsIndirectCall) {
1653 // Check if this happens to be an indirect function call to a known function
1654 // in this inline context. If not, we've done all we can.
1655 F = dyn_cast_or_null<Function>(SimplifiedValues.lookup(Callee));
1656 if (!F) {
1657 onCallArgumentSetup(Call);
1658
1659 if (!Call.onlyReadsMemory())
1660 disableLoadElimination();
1661 return Base::visitCallBase(Call);
1662 }
1663 }
1664
1665 assert(F && "Expected a call to a known function");
1666
1667 // When we have a concrete function, first try to simplify it directly.
1668 if (simplifyCallSite(F, Call))
1669 return true;
1670
1671 // Next check if it is an intrinsic we know about.
1672 // FIXME: Lift this into part of the InstVisitor.
1673 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&Call)) {
1674 switch (II->getIntrinsicID()) {
1675 default:
1676 if (!Call.onlyReadsMemory() && !isAssumeLikeIntrinsic(II))
1677 disableLoadElimination();
1678 return Base::visitCallBase(Call);
1679
1680 case Intrinsic::load_relative:
1681 onLoadRelativeIntrinsic();
1682 return false;
1683
1684 case Intrinsic::memset:
1685 case Intrinsic::memcpy:
1686 case Intrinsic::memmove:
1687 disableLoadElimination();
1688 // SROA can usually chew through these intrinsics, but they aren't free.
1689 return false;
1690 case Intrinsic::icall_branch_funnel:
1691 case Intrinsic::localescape:
1692 HasUninlineableIntrinsic = true;
1693 return false;
1694 case Intrinsic::vastart:
1695 InitsVargArgs = true;
1696 return false;
1697 }
1698 }
1699
1700 if (F == Call.getFunction()) {
1701 // This flag will fully abort the analysis, so don't bother with anything
1702 // else.
1703 IsRecursiveCall = true;
1704 return false;
1705 }
1706
1707 if (TTI.isLoweredToCall(F)) {
1708 onLoweredCall(F, Call, IsIndirectCall);
1709 }
1710
1711 if (!(Call.onlyReadsMemory() || (IsIndirectCall && F->onlyReadsMemory())))
1712 disableLoadElimination();
1713 return Base::visitCallBase(Call);
1714 }
1715
visitReturnInst(ReturnInst & RI)1716 bool CallAnalyzer::visitReturnInst(ReturnInst &RI) {
1717 // At least one return instruction will be free after inlining.
1718 bool Free = !HasReturn;
1719 HasReturn = true;
1720 return Free;
1721 }
1722
visitBranchInst(BranchInst & BI)1723 bool CallAnalyzer::visitBranchInst(BranchInst &BI) {
1724 // We model unconditional branches as essentially free -- they really
1725 // shouldn't exist at all, but handling them makes the behavior of the
1726 // inliner more regular and predictable. Interestingly, conditional branches
1727 // which will fold away are also free.
1728 return BI.isUnconditional() || isa<ConstantInt>(BI.getCondition()) ||
1729 dyn_cast_or_null<ConstantInt>(
1730 SimplifiedValues.lookup(BI.getCondition()));
1731 }
1732
visitSelectInst(SelectInst & SI)1733 bool CallAnalyzer::visitSelectInst(SelectInst &SI) {
1734 bool CheckSROA = SI.getType()->isPointerTy();
1735 Value *TrueVal = SI.getTrueValue();
1736 Value *FalseVal = SI.getFalseValue();
1737
1738 Constant *TrueC = dyn_cast<Constant>(TrueVal);
1739 if (!TrueC)
1740 TrueC = SimplifiedValues.lookup(TrueVal);
1741 Constant *FalseC = dyn_cast<Constant>(FalseVal);
1742 if (!FalseC)
1743 FalseC = SimplifiedValues.lookup(FalseVal);
1744 Constant *CondC =
1745 dyn_cast_or_null<Constant>(SimplifiedValues.lookup(SI.getCondition()));
1746
1747 if (!CondC) {
1748 // Select C, X, X => X
1749 if (TrueC == FalseC && TrueC) {
1750 SimplifiedValues[&SI] = TrueC;
1751 return true;
1752 }
1753
1754 if (!CheckSROA)
1755 return Base::visitSelectInst(SI);
1756
1757 std::pair<Value *, APInt> TrueBaseAndOffset =
1758 ConstantOffsetPtrs.lookup(TrueVal);
1759 std::pair<Value *, APInt> FalseBaseAndOffset =
1760 ConstantOffsetPtrs.lookup(FalseVal);
1761 if (TrueBaseAndOffset == FalseBaseAndOffset && TrueBaseAndOffset.first) {
1762 ConstantOffsetPtrs[&SI] = TrueBaseAndOffset;
1763
1764 if (auto *SROAArg = getSROAArgForValueOrNull(TrueVal))
1765 SROAArgValues[&SI] = SROAArg;
1766 return true;
1767 }
1768
1769 return Base::visitSelectInst(SI);
1770 }
1771
1772 // Select condition is a constant.
1773 Value *SelectedV = CondC->isAllOnesValue()
1774 ? TrueVal
1775 : (CondC->isNullValue()) ? FalseVal : nullptr;
1776 if (!SelectedV) {
1777 // Condition is a vector constant that is not all 1s or all 0s. If all
1778 // operands are constants, ConstantExpr::getSelect() can handle the cases
1779 // such as select vectors.
1780 if (TrueC && FalseC) {
1781 if (auto *C = ConstantExpr::getSelect(CondC, TrueC, FalseC)) {
1782 SimplifiedValues[&SI] = C;
1783 return true;
1784 }
1785 }
1786 return Base::visitSelectInst(SI);
1787 }
1788
1789 // Condition is either all 1s or all 0s. SI can be simplified.
1790 if (Constant *SelectedC = dyn_cast<Constant>(SelectedV)) {
1791 SimplifiedValues[&SI] = SelectedC;
1792 return true;
1793 }
1794
1795 if (!CheckSROA)
1796 return true;
1797
1798 std::pair<Value *, APInt> BaseAndOffset =
1799 ConstantOffsetPtrs.lookup(SelectedV);
1800 if (BaseAndOffset.first) {
1801 ConstantOffsetPtrs[&SI] = BaseAndOffset;
1802
1803 if (auto *SROAArg = getSROAArgForValueOrNull(SelectedV))
1804 SROAArgValues[&SI] = SROAArg;
1805 }
1806
1807 return true;
1808 }
1809
visitSwitchInst(SwitchInst & SI)1810 bool CallAnalyzer::visitSwitchInst(SwitchInst &SI) {
1811 // We model unconditional switches as free, see the comments on handling
1812 // branches.
1813 if (isa<ConstantInt>(SI.getCondition()))
1814 return true;
1815 if (Value *V = SimplifiedValues.lookup(SI.getCondition()))
1816 if (isa<ConstantInt>(V))
1817 return true;
1818
1819 // Assume the most general case where the switch is lowered into
1820 // either a jump table, bit test, or a balanced binary tree consisting of
1821 // case clusters without merging adjacent clusters with the same
1822 // destination. We do not consider the switches that are lowered with a mix
1823 // of jump table/bit test/binary search tree. The cost of the switch is
1824 // proportional to the size of the tree or the size of jump table range.
1825 //
1826 // NB: We convert large switches which are just used to initialize large phi
1827 // nodes to lookup tables instead in simplify-cfg, so this shouldn't prevent
1828 // inlining those. It will prevent inlining in cases where the optimization
1829 // does not (yet) fire.
1830
1831 unsigned JumpTableSize = 0;
1832 BlockFrequencyInfo *BFI = GetBFI ? &(GetBFI(F)) : nullptr;
1833 unsigned NumCaseCluster =
1834 TTI.getEstimatedNumberOfCaseClusters(SI, JumpTableSize, PSI, BFI);
1835
1836 onFinalizeSwitch(JumpTableSize, NumCaseCluster);
1837 return false;
1838 }
1839
visitIndirectBrInst(IndirectBrInst & IBI)1840 bool CallAnalyzer::visitIndirectBrInst(IndirectBrInst &IBI) {
1841 // We never want to inline functions that contain an indirectbr. This is
1842 // incorrect because all the blockaddress's (in static global initializers
1843 // for example) would be referring to the original function, and this
1844 // indirect jump would jump from the inlined copy of the function into the
1845 // original function which is extremely undefined behavior.
1846 // FIXME: This logic isn't really right; we can safely inline functions with
1847 // indirectbr's as long as no other function or global references the
1848 // blockaddress of a block within the current function.
1849 HasIndirectBr = true;
1850 return false;
1851 }
1852
visitResumeInst(ResumeInst & RI)1853 bool CallAnalyzer::visitResumeInst(ResumeInst &RI) {
1854 // FIXME: It's not clear that a single instruction is an accurate model for
1855 // the inline cost of a resume instruction.
1856 return false;
1857 }
1858
visitCleanupReturnInst(CleanupReturnInst & CRI)1859 bool CallAnalyzer::visitCleanupReturnInst(CleanupReturnInst &CRI) {
1860 // FIXME: It's not clear that a single instruction is an accurate model for
1861 // the inline cost of a cleanupret instruction.
1862 return false;
1863 }
1864
visitCatchReturnInst(CatchReturnInst & CRI)1865 bool CallAnalyzer::visitCatchReturnInst(CatchReturnInst &CRI) {
1866 // FIXME: It's not clear that a single instruction is an accurate model for
1867 // the inline cost of a catchret instruction.
1868 return false;
1869 }
1870
visitUnreachableInst(UnreachableInst & I)1871 bool CallAnalyzer::visitUnreachableInst(UnreachableInst &I) {
1872 // FIXME: It might be reasonably to discount the cost of instructions leading
1873 // to unreachable as they have the lowest possible impact on both runtime and
1874 // code size.
1875 return true; // No actual code is needed for unreachable.
1876 }
1877
visitInstruction(Instruction & I)1878 bool CallAnalyzer::visitInstruction(Instruction &I) {
1879 // Some instructions are free. All of the free intrinsics can also be
1880 // handled by SROA, etc.
1881 if (TargetTransformInfo::TCC_Free ==
1882 TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency))
1883 return true;
1884
1885 // We found something we don't understand or can't handle. Mark any SROA-able
1886 // values in the operand list as no longer viable.
1887 for (const Use &Op : I.operands())
1888 disableSROA(Op);
1889
1890 return false;
1891 }
1892
1893 /// Analyze a basic block for its contribution to the inline cost.
1894 ///
1895 /// This method walks the analyzer over every instruction in the given basic
1896 /// block and accounts for their cost during inlining at this callsite. It
1897 /// aborts early if the threshold has been exceeded or an impossible to inline
1898 /// construct has been detected. It returns false if inlining is no longer
1899 /// viable, and true if inlining remains viable.
1900 InlineResult
analyzeBlock(BasicBlock * BB,SmallPtrSetImpl<const Value * > & EphValues)1901 CallAnalyzer::analyzeBlock(BasicBlock *BB,
1902 SmallPtrSetImpl<const Value *> &EphValues) {
1903 for (Instruction &I : *BB) {
1904 // FIXME: Currently, the number of instructions in a function regardless of
1905 // our ability to simplify them during inline to constants or dead code,
1906 // are actually used by the vector bonus heuristic. As long as that's true,
1907 // we have to special case debug intrinsics here to prevent differences in
1908 // inlining due to debug symbols. Eventually, the number of unsimplified
1909 // instructions shouldn't factor into the cost computation, but until then,
1910 // hack around it here.
1911 if (isa<DbgInfoIntrinsic>(I))
1912 continue;
1913
1914 // Skip pseudo-probes.
1915 if (isa<PseudoProbeInst>(I))
1916 continue;
1917
1918 // Skip ephemeral values.
1919 if (EphValues.count(&I))
1920 continue;
1921
1922 ++NumInstructions;
1923 if (isa<ExtractElementInst>(I) || I.getType()->isVectorTy())
1924 ++NumVectorInstructions;
1925
1926 // If the instruction simplified to a constant, there is no cost to this
1927 // instruction. Visit the instructions using our InstVisitor to account for
1928 // all of the per-instruction logic. The visit tree returns true if we
1929 // consumed the instruction in any way, and false if the instruction's base
1930 // cost should count against inlining.
1931 onInstructionAnalysisStart(&I);
1932
1933 if (Base::visit(&I))
1934 ++NumInstructionsSimplified;
1935 else
1936 onMissedSimplification();
1937
1938 onInstructionAnalysisFinish(&I);
1939 using namespace ore;
1940 // If the visit this instruction detected an uninlinable pattern, abort.
1941 InlineResult IR = InlineResult::success();
1942 if (IsRecursiveCall)
1943 IR = InlineResult::failure("recursive");
1944 else if (ExposesReturnsTwice)
1945 IR = InlineResult::failure("exposes returns twice");
1946 else if (HasDynamicAlloca)
1947 IR = InlineResult::failure("dynamic alloca");
1948 else if (HasIndirectBr)
1949 IR = InlineResult::failure("indirect branch");
1950 else if (HasUninlineableIntrinsic)
1951 IR = InlineResult::failure("uninlinable intrinsic");
1952 else if (InitsVargArgs)
1953 IR = InlineResult::failure("varargs");
1954 if (!IR.isSuccess()) {
1955 if (ORE)
1956 ORE->emit([&]() {
1957 return OptimizationRemarkMissed(DEBUG_TYPE, "NeverInline",
1958 &CandidateCall)
1959 << NV("Callee", &F) << " has uninlinable pattern ("
1960 << NV("InlineResult", IR.getFailureReason())
1961 << ") and cost is not fully computed";
1962 });
1963 return IR;
1964 }
1965
1966 // If the caller is a recursive function then we don't want to inline
1967 // functions which allocate a lot of stack space because it would increase
1968 // the caller stack usage dramatically.
1969 if (IsCallerRecursive &&
1970 AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller) {
1971 auto IR =
1972 InlineResult::failure("recursive and allocates too much stack space");
1973 if (ORE)
1974 ORE->emit([&]() {
1975 return OptimizationRemarkMissed(DEBUG_TYPE, "NeverInline",
1976 &CandidateCall)
1977 << NV("Callee", &F) << " is "
1978 << NV("InlineResult", IR.getFailureReason())
1979 << ". Cost is not fully computed";
1980 });
1981 return IR;
1982 }
1983
1984 if (shouldStop())
1985 return InlineResult::failure(
1986 "Call site analysis is not favorable to inlining.");
1987 }
1988
1989 return InlineResult::success();
1990 }
1991
1992 /// Compute the base pointer and cumulative constant offsets for V.
1993 ///
1994 /// This strips all constant offsets off of V, leaving it the base pointer, and
1995 /// accumulates the total constant offset applied in the returned constant. It
1996 /// returns 0 if V is not a pointer, and returns the constant '0' if there are
1997 /// no constant offsets applied.
stripAndComputeInBoundsConstantOffsets(Value * & V)1998 ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) {
1999 if (!V->getType()->isPointerTy())
2000 return nullptr;
2001
2002 unsigned AS = V->getType()->getPointerAddressSpace();
2003 unsigned IntPtrWidth = DL.getIndexSizeInBits(AS);
2004 APInt Offset = APInt::getNullValue(IntPtrWidth);
2005
2006 // Even though we don't look through PHI nodes, we could be called on an
2007 // instruction in an unreachable block, which may be on a cycle.
2008 SmallPtrSet<Value *, 4> Visited;
2009 Visited.insert(V);
2010 do {
2011 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
2012 if (!GEP->isInBounds() || !accumulateGEPOffset(*GEP, Offset))
2013 return nullptr;
2014 V = GEP->getPointerOperand();
2015 } else if (Operator::getOpcode(V) == Instruction::BitCast) {
2016 V = cast<Operator>(V)->getOperand(0);
2017 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
2018 if (GA->isInterposable())
2019 break;
2020 V = GA->getAliasee();
2021 } else {
2022 break;
2023 }
2024 assert(V->getType()->isPointerTy() && "Unexpected operand type!");
2025 } while (Visited.insert(V).second);
2026
2027 Type *IdxPtrTy = DL.getIndexType(V->getType());
2028 return cast<ConstantInt>(ConstantInt::get(IdxPtrTy, Offset));
2029 }
2030
2031 /// Find dead blocks due to deleted CFG edges during inlining.
2032 ///
2033 /// If we know the successor of the current block, \p CurrBB, has to be \p
2034 /// NextBB, the other successors of \p CurrBB are dead if these successors have
2035 /// no live incoming CFG edges. If one block is found to be dead, we can
2036 /// continue growing the dead block list by checking the successors of the dead
2037 /// blocks to see if all their incoming edges are dead or not.
findDeadBlocks(BasicBlock * CurrBB,BasicBlock * NextBB)2038 void CallAnalyzer::findDeadBlocks(BasicBlock *CurrBB, BasicBlock *NextBB) {
2039 auto IsEdgeDead = [&](BasicBlock *Pred, BasicBlock *Succ) {
2040 // A CFG edge is dead if the predecessor is dead or the predecessor has a
2041 // known successor which is not the one under exam.
2042 return (DeadBlocks.count(Pred) ||
2043 (KnownSuccessors[Pred] && KnownSuccessors[Pred] != Succ));
2044 };
2045
2046 auto IsNewlyDead = [&](BasicBlock *BB) {
2047 // If all the edges to a block are dead, the block is also dead.
2048 return (!DeadBlocks.count(BB) &&
2049 llvm::all_of(predecessors(BB),
2050 [&](BasicBlock *P) { return IsEdgeDead(P, BB); }));
2051 };
2052
2053 for (BasicBlock *Succ : successors(CurrBB)) {
2054 if (Succ == NextBB || !IsNewlyDead(Succ))
2055 continue;
2056 SmallVector<BasicBlock *, 4> NewDead;
2057 NewDead.push_back(Succ);
2058 while (!NewDead.empty()) {
2059 BasicBlock *Dead = NewDead.pop_back_val();
2060 if (DeadBlocks.insert(Dead))
2061 // Continue growing the dead block lists.
2062 for (BasicBlock *S : successors(Dead))
2063 if (IsNewlyDead(S))
2064 NewDead.push_back(S);
2065 }
2066 }
2067 }
2068
2069 /// Analyze a call site for potential inlining.
2070 ///
2071 /// Returns true if inlining this call is viable, and false if it is not
2072 /// viable. It computes the cost and adjusts the threshold based on numerous
2073 /// factors and heuristics. If this method returns false but the computed cost
2074 /// is below the computed threshold, then inlining was forcibly disabled by
2075 /// some artifact of the routine.
analyze()2076 InlineResult CallAnalyzer::analyze() {
2077 ++NumCallsAnalyzed;
2078
2079 auto Result = onAnalysisStart();
2080 if (!Result.isSuccess())
2081 return Result;
2082
2083 if (F.empty())
2084 return InlineResult::success();
2085
2086 Function *Caller = CandidateCall.getFunction();
2087 // Check if the caller function is recursive itself.
2088 for (User *U : Caller->users()) {
2089 CallBase *Call = dyn_cast<CallBase>(U);
2090 if (Call && Call->getFunction() == Caller) {
2091 IsCallerRecursive = true;
2092 break;
2093 }
2094 }
2095
2096 // Populate our simplified values by mapping from function arguments to call
2097 // arguments with known important simplifications.
2098 auto CAI = CandidateCall.arg_begin();
2099 for (Argument &FAI : F.args()) {
2100 assert(CAI != CandidateCall.arg_end());
2101 if (Constant *C = dyn_cast<Constant>(CAI))
2102 SimplifiedValues[&FAI] = C;
2103
2104 Value *PtrArg = *CAI;
2105 if (ConstantInt *C = stripAndComputeInBoundsConstantOffsets(PtrArg)) {
2106 ConstantOffsetPtrs[&FAI] = std::make_pair(PtrArg, C->getValue());
2107
2108 // We can SROA any pointer arguments derived from alloca instructions.
2109 if (auto *SROAArg = dyn_cast<AllocaInst>(PtrArg)) {
2110 SROAArgValues[&FAI] = SROAArg;
2111 onInitializeSROAArg(SROAArg);
2112 EnabledSROAAllocas.insert(SROAArg);
2113 }
2114 }
2115 ++CAI;
2116 }
2117 NumConstantArgs = SimplifiedValues.size();
2118 NumConstantOffsetPtrArgs = ConstantOffsetPtrs.size();
2119 NumAllocaArgs = SROAArgValues.size();
2120
2121 // FIXME: If a caller has multiple calls to a callee, we end up recomputing
2122 // the ephemeral values multiple times (and they're completely determined by
2123 // the callee, so this is purely duplicate work).
2124 SmallPtrSet<const Value *, 32> EphValues;
2125 CodeMetrics::collectEphemeralValues(&F, &GetAssumptionCache(F), EphValues);
2126
2127 // The worklist of live basic blocks in the callee *after* inlining. We avoid
2128 // adding basic blocks of the callee which can be proven to be dead for this
2129 // particular call site in order to get more accurate cost estimates. This
2130 // requires a somewhat heavyweight iteration pattern: we need to walk the
2131 // basic blocks in a breadth-first order as we insert live successors. To
2132 // accomplish this, prioritizing for small iterations because we exit after
2133 // crossing our threshold, we use a small-size optimized SetVector.
2134 typedef SetVector<BasicBlock *, SmallVector<BasicBlock *, 16>,
2135 SmallPtrSet<BasicBlock *, 16>>
2136 BBSetVector;
2137 BBSetVector BBWorklist;
2138 BBWorklist.insert(&F.getEntryBlock());
2139
2140 // Note that we *must not* cache the size, this loop grows the worklist.
2141 for (unsigned Idx = 0; Idx != BBWorklist.size(); ++Idx) {
2142 if (shouldStop())
2143 break;
2144
2145 BasicBlock *BB = BBWorklist[Idx];
2146 if (BB->empty())
2147 continue;
2148
2149 // Disallow inlining a blockaddress with uses other than strictly callbr.
2150 // A blockaddress only has defined behavior for an indirect branch in the
2151 // same function, and we do not currently support inlining indirect
2152 // branches. But, the inliner may not see an indirect branch that ends up
2153 // being dead code at a particular call site. If the blockaddress escapes
2154 // the function, e.g., via a global variable, inlining may lead to an
2155 // invalid cross-function reference.
2156 // FIXME: pr/39560: continue relaxing this overt restriction.
2157 if (BB->hasAddressTaken())
2158 for (User *U : BlockAddress::get(&*BB)->users())
2159 if (!isa<CallBrInst>(*U))
2160 return InlineResult::failure("blockaddress used outside of callbr");
2161
2162 // Analyze the cost of this block. If we blow through the threshold, this
2163 // returns false, and we can bail on out.
2164 InlineResult IR = analyzeBlock(BB, EphValues);
2165 if (!IR.isSuccess())
2166 return IR;
2167
2168 Instruction *TI = BB->getTerminator();
2169
2170 // Add in the live successors by first checking whether we have terminator
2171 // that may be simplified based on the values simplified by this call.
2172 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
2173 if (BI->isConditional()) {
2174 Value *Cond = BI->getCondition();
2175 if (ConstantInt *SimpleCond =
2176 dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) {
2177 BasicBlock *NextBB = BI->getSuccessor(SimpleCond->isZero() ? 1 : 0);
2178 BBWorklist.insert(NextBB);
2179 KnownSuccessors[BB] = NextBB;
2180 findDeadBlocks(BB, NextBB);
2181 continue;
2182 }
2183 }
2184 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
2185 Value *Cond = SI->getCondition();
2186 if (ConstantInt *SimpleCond =
2187 dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) {
2188 BasicBlock *NextBB = SI->findCaseValue(SimpleCond)->getCaseSuccessor();
2189 BBWorklist.insert(NextBB);
2190 KnownSuccessors[BB] = NextBB;
2191 findDeadBlocks(BB, NextBB);
2192 continue;
2193 }
2194 }
2195
2196 // If we're unable to select a particular successor, just count all of
2197 // them.
2198 for (unsigned TIdx = 0, TSize = TI->getNumSuccessors(); TIdx != TSize;
2199 ++TIdx)
2200 BBWorklist.insert(TI->getSuccessor(TIdx));
2201
2202 onBlockAnalyzed(BB);
2203 }
2204
2205 bool OnlyOneCallAndLocalLinkage = F.hasLocalLinkage() && F.hasOneUse() &&
2206 &F == CandidateCall.getCalledFunction();
2207 // If this is a noduplicate call, we can still inline as long as
2208 // inlining this would cause the removal of the caller (so the instruction
2209 // is not actually duplicated, just moved).
2210 if (!OnlyOneCallAndLocalLinkage && ContainsNoDuplicateCall)
2211 return InlineResult::failure("noduplicate");
2212
2213 return finalizeAnalysis();
2214 }
2215
print()2216 void InlineCostCallAnalyzer::print() {
2217 #define DEBUG_PRINT_STAT(x) dbgs() << " " #x ": " << x << "\n"
2218 if (PrintInstructionComments)
2219 F.print(dbgs(), &Writer);
2220 DEBUG_PRINT_STAT(NumConstantArgs);
2221 DEBUG_PRINT_STAT(NumConstantOffsetPtrArgs);
2222 DEBUG_PRINT_STAT(NumAllocaArgs);
2223 DEBUG_PRINT_STAT(NumConstantPtrCmps);
2224 DEBUG_PRINT_STAT(NumConstantPtrDiffs);
2225 DEBUG_PRINT_STAT(NumInstructionsSimplified);
2226 DEBUG_PRINT_STAT(NumInstructions);
2227 DEBUG_PRINT_STAT(SROACostSavings);
2228 DEBUG_PRINT_STAT(SROACostSavingsLost);
2229 DEBUG_PRINT_STAT(LoadEliminationCost);
2230 DEBUG_PRINT_STAT(ContainsNoDuplicateCall);
2231 DEBUG_PRINT_STAT(Cost);
2232 DEBUG_PRINT_STAT(Threshold);
2233 #undef DEBUG_PRINT_STAT
2234 }
2235
2236 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2237 /// Dump stats about this call's analysis.
dump()2238 LLVM_DUMP_METHOD void InlineCostCallAnalyzer::dump() {
2239 print();
2240 }
2241 #endif
2242
2243 /// Test that there are no attribute conflicts between Caller and Callee
2244 /// that prevent inlining.
functionsHaveCompatibleAttributes(Function * Caller,Function * Callee,TargetTransformInfo & TTI,function_ref<const TargetLibraryInfo & (Function &)> & GetTLI)2245 static bool functionsHaveCompatibleAttributes(
2246 Function *Caller, Function *Callee, TargetTransformInfo &TTI,
2247 function_ref<const TargetLibraryInfo &(Function &)> &GetTLI) {
2248 // Note that CalleeTLI must be a copy not a reference. The legacy pass manager
2249 // caches the most recently created TLI in the TargetLibraryInfoWrapperPass
2250 // object, and always returns the same object (which is overwritten on each
2251 // GetTLI call). Therefore we copy the first result.
2252 auto CalleeTLI = GetTLI(*Callee);
2253 return TTI.areInlineCompatible(Caller, Callee) &&
2254 GetTLI(*Caller).areInlineCompatible(CalleeTLI,
2255 InlineCallerSupersetNoBuiltin) &&
2256 AttributeFuncs::areInlineCompatible(*Caller, *Callee);
2257 }
2258
getCallsiteCost(CallBase & Call,const DataLayout & DL)2259 int llvm::getCallsiteCost(CallBase &Call, const DataLayout &DL) {
2260 int Cost = 0;
2261 for (unsigned I = 0, E = Call.arg_size(); I != E; ++I) {
2262 if (Call.isByValArgument(I)) {
2263 // We approximate the number of loads and stores needed by dividing the
2264 // size of the byval type by the target's pointer size.
2265 PointerType *PTy = cast<PointerType>(Call.getArgOperand(I)->getType());
2266 unsigned TypeSize = DL.getTypeSizeInBits(PTy->getElementType());
2267 unsigned AS = PTy->getAddressSpace();
2268 unsigned PointerSize = DL.getPointerSizeInBits(AS);
2269 // Ceiling division.
2270 unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize;
2271
2272 // If it generates more than 8 stores it is likely to be expanded as an
2273 // inline memcpy so we take that as an upper bound. Otherwise we assume
2274 // one load and one store per word copied.
2275 // FIXME: The maxStoresPerMemcpy setting from the target should be used
2276 // here instead of a magic number of 8, but it's not available via
2277 // DataLayout.
2278 NumStores = std::min(NumStores, 8U);
2279
2280 Cost += 2 * NumStores * InlineConstants::InstrCost;
2281 } else {
2282 // For non-byval arguments subtract off one instruction per call
2283 // argument.
2284 Cost += InlineConstants::InstrCost;
2285 }
2286 }
2287 // The call instruction also disappears after inlining.
2288 Cost += InlineConstants::InstrCost + InlineConstants::CallPenalty;
2289 return Cost;
2290 }
2291
getInlineCost(CallBase & Call,const InlineParams & Params,TargetTransformInfo & CalleeTTI,function_ref<AssumptionCache & (Function &)> GetAssumptionCache,function_ref<const TargetLibraryInfo & (Function &)> GetTLI,function_ref<BlockFrequencyInfo & (Function &)> GetBFI,ProfileSummaryInfo * PSI,OptimizationRemarkEmitter * ORE)2292 InlineCost llvm::getInlineCost(
2293 CallBase &Call, const InlineParams &Params, TargetTransformInfo &CalleeTTI,
2294 function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
2295 function_ref<const TargetLibraryInfo &(Function &)> GetTLI,
2296 function_ref<BlockFrequencyInfo &(Function &)> GetBFI,
2297 ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE) {
2298 return getInlineCost(Call, Call.getCalledFunction(), Params, CalleeTTI,
2299 GetAssumptionCache, GetTLI, GetBFI, PSI, ORE);
2300 }
2301
getInliningCostEstimate(CallBase & Call,TargetTransformInfo & CalleeTTI,function_ref<AssumptionCache & (Function &)> GetAssumptionCache,function_ref<BlockFrequencyInfo & (Function &)> GetBFI,ProfileSummaryInfo * PSI,OptimizationRemarkEmitter * ORE)2302 Optional<int> llvm::getInliningCostEstimate(
2303 CallBase &Call, TargetTransformInfo &CalleeTTI,
2304 function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
2305 function_ref<BlockFrequencyInfo &(Function &)> GetBFI,
2306 ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE) {
2307 const InlineParams Params = {/* DefaultThreshold*/ 0,
2308 /*HintThreshold*/ {},
2309 /*ColdThreshold*/ {},
2310 /*OptSizeThreshold*/ {},
2311 /*OptMinSizeThreshold*/ {},
2312 /*HotCallSiteThreshold*/ {},
2313 /*LocallyHotCallSiteThreshold*/ {},
2314 /*ColdCallSiteThreshold*/ {},
2315 /*ComputeFullInlineCost*/ true,
2316 /*EnableDeferral*/ true};
2317
2318 InlineCostCallAnalyzer CA(*Call.getCalledFunction(), Call, Params, CalleeTTI,
2319 GetAssumptionCache, GetBFI, PSI, ORE, true,
2320 /*IgnoreThreshold*/ true);
2321 auto R = CA.analyze();
2322 if (!R.isSuccess())
2323 return None;
2324 return CA.getCost();
2325 }
2326
getAttributeBasedInliningDecision(CallBase & Call,Function * Callee,TargetTransformInfo & CalleeTTI,function_ref<const TargetLibraryInfo & (Function &)> GetTLI)2327 Optional<InlineResult> llvm::getAttributeBasedInliningDecision(
2328 CallBase &Call, Function *Callee, TargetTransformInfo &CalleeTTI,
2329 function_ref<const TargetLibraryInfo &(Function &)> GetTLI) {
2330
2331 // Cannot inline indirect calls.
2332 if (!Callee)
2333 return InlineResult::failure("indirect call");
2334
2335 // When callee coroutine function is inlined into caller coroutine function
2336 // before coro-split pass,
2337 // coro-early pass can not handle this quiet well.
2338 // So we won't inline the coroutine function if it have not been unsplited
2339 if (Callee->isPresplitCoroutine())
2340 return InlineResult::failure("unsplited coroutine call");
2341
2342 // Never inline calls with byval arguments that does not have the alloca
2343 // address space. Since byval arguments can be replaced with a copy to an
2344 // alloca, the inlined code would need to be adjusted to handle that the
2345 // argument is in the alloca address space (so it is a little bit complicated
2346 // to solve).
2347 unsigned AllocaAS = Callee->getParent()->getDataLayout().getAllocaAddrSpace();
2348 for (unsigned I = 0, E = Call.arg_size(); I != E; ++I)
2349 if (Call.isByValArgument(I)) {
2350 PointerType *PTy = cast<PointerType>(Call.getArgOperand(I)->getType());
2351 if (PTy->getAddressSpace() != AllocaAS)
2352 return InlineResult::failure("byval arguments without alloca"
2353 " address space");
2354 }
2355
2356 // Calls to functions with always-inline attributes should be inlined
2357 // whenever possible.
2358 if (Call.hasFnAttr(Attribute::AlwaysInline)) {
2359 auto IsViable = isInlineViable(*Callee);
2360 if (IsViable.isSuccess())
2361 return InlineResult::success();
2362 return InlineResult::failure(IsViable.getFailureReason());
2363 }
2364
2365 // Never inline functions with conflicting attributes (unless callee has
2366 // always-inline attribute).
2367 Function *Caller = Call.getCaller();
2368 if (!functionsHaveCompatibleAttributes(Caller, Callee, CalleeTTI, GetTLI))
2369 return InlineResult::failure("conflicting attributes");
2370
2371 // Don't inline this call if the caller has the optnone attribute.
2372 if (Caller->hasOptNone())
2373 return InlineResult::failure("optnone attribute");
2374
2375 // Don't inline a function that treats null pointer as valid into a caller
2376 // that does not have this attribute.
2377 if (!Caller->nullPointerIsDefined() && Callee->nullPointerIsDefined())
2378 return InlineResult::failure("nullptr definitions incompatible");
2379
2380 // Don't inline functions which can be interposed at link-time.
2381 if (Callee->isInterposable())
2382 return InlineResult::failure("interposable");
2383
2384 // Don't inline functions marked noinline.
2385 if (Callee->hasFnAttribute(Attribute::NoInline))
2386 return InlineResult::failure("noinline function attribute");
2387
2388 // Don't inline call sites marked noinline.
2389 if (Call.isNoInline())
2390 return InlineResult::failure("noinline call site attribute");
2391
2392 // Don't inline functions if one does not have any stack protector attribute
2393 // but the other does.
2394 if (Caller->hasStackProtectorFnAttr() && !Callee->hasStackProtectorFnAttr())
2395 return InlineResult::failure(
2396 "stack protected caller but callee requested no stack protector");
2397 if (Callee->hasStackProtectorFnAttr() && !Caller->hasStackProtectorFnAttr())
2398 return InlineResult::failure(
2399 "stack protected callee but caller requested no stack protector");
2400
2401 return None;
2402 }
2403
getInlineCost(CallBase & Call,Function * Callee,const InlineParams & Params,TargetTransformInfo & CalleeTTI,function_ref<AssumptionCache & (Function &)> GetAssumptionCache,function_ref<const TargetLibraryInfo & (Function &)> GetTLI,function_ref<BlockFrequencyInfo & (Function &)> GetBFI,ProfileSummaryInfo * PSI,OptimizationRemarkEmitter * ORE)2404 InlineCost llvm::getInlineCost(
2405 CallBase &Call, Function *Callee, const InlineParams &Params,
2406 TargetTransformInfo &CalleeTTI,
2407 function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
2408 function_ref<const TargetLibraryInfo &(Function &)> GetTLI,
2409 function_ref<BlockFrequencyInfo &(Function &)> GetBFI,
2410 ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE) {
2411
2412 auto UserDecision =
2413 llvm::getAttributeBasedInliningDecision(Call, Callee, CalleeTTI, GetTLI);
2414
2415 if (UserDecision.hasValue()) {
2416 if (UserDecision->isSuccess())
2417 return llvm::InlineCost::getAlways("always inline attribute");
2418 return llvm::InlineCost::getNever(UserDecision->getFailureReason());
2419 }
2420
2421 LLVM_DEBUG(llvm::dbgs() << " Analyzing call of " << Callee->getName()
2422 << "... (caller:" << Call.getCaller()->getName()
2423 << ")\n");
2424
2425 InlineCostCallAnalyzer CA(*Callee, Call, Params, CalleeTTI,
2426 GetAssumptionCache, GetBFI, PSI, ORE);
2427 InlineResult ShouldInline = CA.analyze();
2428
2429 LLVM_DEBUG(CA.dump());
2430
2431 // Check if there was a reason to force inlining or no inlining.
2432 if (!ShouldInline.isSuccess() && CA.getCost() < CA.getThreshold())
2433 return InlineCost::getNever(ShouldInline.getFailureReason());
2434 if (ShouldInline.isSuccess() && CA.getCost() >= CA.getThreshold())
2435 return InlineCost::getAlways("empty function");
2436
2437 return llvm::InlineCost::get(CA.getCost(), CA.getThreshold());
2438 }
2439
isInlineViable(Function & F)2440 InlineResult llvm::isInlineViable(Function &F) {
2441 bool ReturnsTwice = F.hasFnAttribute(Attribute::ReturnsTwice);
2442 for (BasicBlock &BB : F) {
2443 // Disallow inlining of functions which contain indirect branches.
2444 if (isa<IndirectBrInst>(BB.getTerminator()))
2445 return InlineResult::failure("contains indirect branches");
2446
2447 // Disallow inlining of blockaddresses which are used by non-callbr
2448 // instructions.
2449 if (BB.hasAddressTaken())
2450 for (User *U : BlockAddress::get(&BB)->users())
2451 if (!isa<CallBrInst>(*U))
2452 return InlineResult::failure("blockaddress used outside of callbr");
2453
2454 for (auto &II : BB) {
2455 CallBase *Call = dyn_cast<CallBase>(&II);
2456 if (!Call)
2457 continue;
2458
2459 // Disallow recursive calls.
2460 Function *Callee = Call->getCalledFunction();
2461 if (&F == Callee)
2462 return InlineResult::failure("recursive call");
2463
2464 // Disallow calls which expose returns-twice to a function not previously
2465 // attributed as such.
2466 if (!ReturnsTwice && isa<CallInst>(Call) &&
2467 cast<CallInst>(Call)->canReturnTwice())
2468 return InlineResult::failure("exposes returns-twice attribute");
2469
2470 if (Callee)
2471 switch (Callee->getIntrinsicID()) {
2472 default:
2473 break;
2474 case llvm::Intrinsic::icall_branch_funnel:
2475 // Disallow inlining of @llvm.icall.branch.funnel because current
2476 // backend can't separate call targets from call arguments.
2477 return InlineResult::failure(
2478 "disallowed inlining of @llvm.icall.branch.funnel");
2479 case llvm::Intrinsic::localescape:
2480 // Disallow inlining functions that call @llvm.localescape. Doing this
2481 // correctly would require major changes to the inliner.
2482 return InlineResult::failure(
2483 "disallowed inlining of @llvm.localescape");
2484 case llvm::Intrinsic::vastart:
2485 // Disallow inlining of functions that initialize VarArgs with
2486 // va_start.
2487 return InlineResult::failure(
2488 "contains VarArgs initialized with va_start");
2489 }
2490 }
2491 }
2492
2493 return InlineResult::success();
2494 }
2495
2496 // APIs to create InlineParams based on command line flags and/or other
2497 // parameters.
2498
getInlineParams(int Threshold)2499 InlineParams llvm::getInlineParams(int Threshold) {
2500 InlineParams Params;
2501
2502 // This field is the threshold to use for a callee by default. This is
2503 // derived from one or more of:
2504 // * optimization or size-optimization levels,
2505 // * a value passed to createFunctionInliningPass function, or
2506 // * the -inline-threshold flag.
2507 // If the -inline-threshold flag is explicitly specified, that is used
2508 // irrespective of anything else.
2509 if (InlineThreshold.getNumOccurrences() > 0)
2510 Params.DefaultThreshold = InlineThreshold;
2511 else
2512 Params.DefaultThreshold = Threshold;
2513
2514 // Set the HintThreshold knob from the -inlinehint-threshold.
2515 Params.HintThreshold = HintThreshold;
2516
2517 // Set the HotCallSiteThreshold knob from the -hot-callsite-threshold.
2518 Params.HotCallSiteThreshold = HotCallSiteThreshold;
2519
2520 // If the -locally-hot-callsite-threshold is explicitly specified, use it to
2521 // populate LocallyHotCallSiteThreshold. Later, we populate
2522 // Params.LocallyHotCallSiteThreshold from -locally-hot-callsite-threshold if
2523 // we know that optimization level is O3 (in the getInlineParams variant that
2524 // takes the opt and size levels).
2525 // FIXME: Remove this check (and make the assignment unconditional) after
2526 // addressing size regression issues at O2.
2527 if (LocallyHotCallSiteThreshold.getNumOccurrences() > 0)
2528 Params.LocallyHotCallSiteThreshold = LocallyHotCallSiteThreshold;
2529
2530 // Set the ColdCallSiteThreshold knob from the
2531 // -inline-cold-callsite-threshold.
2532 Params.ColdCallSiteThreshold = ColdCallSiteThreshold;
2533
2534 // Set the OptMinSizeThreshold and OptSizeThreshold params only if the
2535 // -inlinehint-threshold commandline option is not explicitly given. If that
2536 // option is present, then its value applies even for callees with size and
2537 // minsize attributes.
2538 // If the -inline-threshold is not specified, set the ColdThreshold from the
2539 // -inlinecold-threshold even if it is not explicitly passed. If
2540 // -inline-threshold is specified, then -inlinecold-threshold needs to be
2541 // explicitly specified to set the ColdThreshold knob
2542 if (InlineThreshold.getNumOccurrences() == 0) {
2543 Params.OptMinSizeThreshold = InlineConstants::OptMinSizeThreshold;
2544 Params.OptSizeThreshold = InlineConstants::OptSizeThreshold;
2545 Params.ColdThreshold = ColdThreshold;
2546 } else if (ColdThreshold.getNumOccurrences() > 0) {
2547 Params.ColdThreshold = ColdThreshold;
2548 }
2549 return Params;
2550 }
2551
getInlineParams()2552 InlineParams llvm::getInlineParams() {
2553 return getInlineParams(DefaultThreshold);
2554 }
2555
2556 // Compute the default threshold for inlining based on the opt level and the
2557 // size opt level.
computeThresholdFromOptLevels(unsigned OptLevel,unsigned SizeOptLevel)2558 static int computeThresholdFromOptLevels(unsigned OptLevel,
2559 unsigned SizeOptLevel) {
2560 if (OptLevel > 2)
2561 return InlineConstants::OptAggressiveThreshold;
2562 if (SizeOptLevel == 1) // -Os
2563 return InlineConstants::OptSizeThreshold;
2564 if (SizeOptLevel == 2) // -Oz
2565 return InlineConstants::OptMinSizeThreshold;
2566 return DefaultThreshold;
2567 }
2568
getInlineParams(unsigned OptLevel,unsigned SizeOptLevel)2569 InlineParams llvm::getInlineParams(unsigned OptLevel, unsigned SizeOptLevel) {
2570 auto Params =
2571 getInlineParams(computeThresholdFromOptLevels(OptLevel, SizeOptLevel));
2572 // At O3, use the value of -locally-hot-callsite-threshold option to populate
2573 // Params.LocallyHotCallSiteThreshold. Below O3, this flag has effect only
2574 // when it is specified explicitly.
2575 if (OptLevel > 2)
2576 Params.LocallyHotCallSiteThreshold = LocallyHotCallSiteThreshold;
2577 return Params;
2578 }
2579
2580 PreservedAnalyses
run(Function & F,FunctionAnalysisManager & FAM)2581 InlineCostAnnotationPrinterPass::run(Function &F,
2582 FunctionAnalysisManager &FAM) {
2583 PrintInstructionComments = true;
2584 std::function<AssumptionCache &(Function &)> GetAssumptionCache = [&](
2585 Function &F) -> AssumptionCache & {
2586 return FAM.getResult<AssumptionAnalysis>(F);
2587 };
2588 Module *M = F.getParent();
2589 ProfileSummaryInfo PSI(*M);
2590 DataLayout DL(M);
2591 TargetTransformInfo TTI(DL);
2592 // FIXME: Redesign the usage of InlineParams to expand the scope of this pass.
2593 // In the current implementation, the type of InlineParams doesn't matter as
2594 // the pass serves only for verification of inliner's decisions.
2595 // We can add a flag which determines InlineParams for this run. Right now,
2596 // the default InlineParams are used.
2597 const InlineParams Params = llvm::getInlineParams();
2598 for (BasicBlock &BB : F) {
2599 for (Instruction &I : BB) {
2600 if (CallInst *CI = dyn_cast<CallInst>(&I)) {
2601 Function *CalledFunction = CI->getCalledFunction();
2602 if (!CalledFunction || CalledFunction->isDeclaration())
2603 continue;
2604 OptimizationRemarkEmitter ORE(CalledFunction);
2605 InlineCostCallAnalyzer ICCA(*CalledFunction, *CI, Params, TTI,
2606 GetAssumptionCache, nullptr, &PSI, &ORE);
2607 ICCA.analyze();
2608 OS << " Analyzing call of " << CalledFunction->getName()
2609 << "... (caller:" << CI->getCaller()->getName() << ")\n";
2610 ICCA.print();
2611 }
2612 }
2613 }
2614 return PreservedAnalyses::all();
2615 }
2616