1 //===- InlineFunction.cpp - Code to perform function inlining -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements inlining of a function into a call site, resolving
10 // parameters and the return value as appropriate.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/ADT/DenseMap.h"
15 #include "llvm/ADT/None.h"
16 #include "llvm/ADT/Optional.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SetVector.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/StringExtras.h"
22 #include "llvm/ADT/iterator_range.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/Analysis/AssumptionCache.h"
25 #include "llvm/Analysis/BlockFrequencyInfo.h"
26 #include "llvm/Analysis/CallGraph.h"
27 #include "llvm/Analysis/CaptureTracking.h"
28 #include "llvm/Analysis/EHPersonalities.h"
29 #include "llvm/Analysis/InstructionSimplify.h"
30 #include "llvm/Analysis/ProfileSummaryInfo.h"
31 #include "llvm/Transforms/Utils/Local.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/Analysis/VectorUtils.h"
34 #include "llvm/IR/Argument.h"
35 #include "llvm/IR/BasicBlock.h"
36 #include "llvm/IR/CFG.h"
37 #include "llvm/IR/CallSite.h"
38 #include "llvm/IR/Constant.h"
39 #include "llvm/IR/Constants.h"
40 #include "llvm/IR/DIBuilder.h"
41 #include "llvm/IR/DataLayout.h"
42 #include "llvm/IR/DebugInfoMetadata.h"
43 #include "llvm/IR/DebugLoc.h"
44 #include "llvm/IR/DerivedTypes.h"
45 #include "llvm/IR/Dominators.h"
46 #include "llvm/IR/Function.h"
47 #include "llvm/IR/IRBuilder.h"
48 #include "llvm/IR/InstrTypes.h"
49 #include "llvm/IR/Instruction.h"
50 #include "llvm/IR/Instructions.h"
51 #include "llvm/IR/IntrinsicInst.h"
52 #include "llvm/IR/Intrinsics.h"
53 #include "llvm/IR/LLVMContext.h"
54 #include "llvm/IR/MDBuilder.h"
55 #include "llvm/IR/Metadata.h"
56 #include "llvm/IR/Module.h"
57 #include "llvm/IR/Type.h"
58 #include "llvm/IR/User.h"
59 #include "llvm/IR/Value.h"
60 #include "llvm/Support/Casting.h"
61 #include "llvm/Support/CommandLine.h"
62 #include "llvm/Support/ErrorHandling.h"
63 #include "llvm/Transforms/Utils/Cloning.h"
64 #include "llvm/Transforms/Utils/ValueMapper.h"
65 #include <algorithm>
66 #include <cassert>
67 #include <cstdint>
68 #include <iterator>
69 #include <limits>
70 #include <string>
71 #include <utility>
72 #include <vector>
73
74 using namespace llvm;
75 using ProfileCount = Function::ProfileCount;
76
77 static cl::opt<bool>
78 EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true),
79 cl::Hidden,
80 cl::desc("Convert noalias attributes to metadata during inlining."));
81
82 static cl::opt<bool>
83 PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining",
84 cl::init(true), cl::Hidden,
85 cl::desc("Convert align attributes to assumptions during inlining."));
86
InlineFunction(CallBase * CB,InlineFunctionInfo & IFI,AAResults * CalleeAAR,bool InsertLifetime)87 llvm::InlineResult llvm::InlineFunction(CallBase *CB, InlineFunctionInfo &IFI,
88 AAResults *CalleeAAR,
89 bool InsertLifetime) {
90 return InlineFunction(CallSite(CB), IFI, CalleeAAR, InsertLifetime);
91 }
92
93 namespace {
94
95 /// A class for recording information about inlining a landing pad.
96 class LandingPadInliningInfo {
97 /// Destination of the invoke's unwind.
98 BasicBlock *OuterResumeDest;
99
100 /// Destination for the callee's resume.
101 BasicBlock *InnerResumeDest = nullptr;
102
103 /// LandingPadInst associated with the invoke.
104 LandingPadInst *CallerLPad = nullptr;
105
106 /// PHI for EH values from landingpad insts.
107 PHINode *InnerEHValuesPHI = nullptr;
108
109 SmallVector<Value*, 8> UnwindDestPHIValues;
110
111 public:
LandingPadInliningInfo(InvokeInst * II)112 LandingPadInliningInfo(InvokeInst *II)
113 : OuterResumeDest(II->getUnwindDest()) {
114 // If there are PHI nodes in the unwind destination block, we need to keep
115 // track of which values came into them from the invoke before removing
116 // the edge from this block.
117 BasicBlock *InvokeBB = II->getParent();
118 BasicBlock::iterator I = OuterResumeDest->begin();
119 for (; isa<PHINode>(I); ++I) {
120 // Save the value to use for this edge.
121 PHINode *PHI = cast<PHINode>(I);
122 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
123 }
124
125 CallerLPad = cast<LandingPadInst>(I);
126 }
127
128 /// The outer unwind destination is the target of
129 /// unwind edges introduced for calls within the inlined function.
getOuterResumeDest() const130 BasicBlock *getOuterResumeDest() const {
131 return OuterResumeDest;
132 }
133
134 BasicBlock *getInnerResumeDest();
135
getLandingPadInst() const136 LandingPadInst *getLandingPadInst() const { return CallerLPad; }
137
138 /// Forward the 'resume' instruction to the caller's landing pad block.
139 /// When the landing pad block has only one predecessor, this is
140 /// a simple branch. When there is more than one predecessor, we need to
141 /// split the landing pad block after the landingpad instruction and jump
142 /// to there.
143 void forwardResume(ResumeInst *RI,
144 SmallPtrSetImpl<LandingPadInst*> &InlinedLPads);
145
146 /// Add incoming-PHI values to the unwind destination block for the given
147 /// basic block, using the values for the original invoke's source block.
addIncomingPHIValuesFor(BasicBlock * BB) const148 void addIncomingPHIValuesFor(BasicBlock *BB) const {
149 addIncomingPHIValuesForInto(BB, OuterResumeDest);
150 }
151
addIncomingPHIValuesForInto(BasicBlock * src,BasicBlock * dest) const152 void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
153 BasicBlock::iterator I = dest->begin();
154 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
155 PHINode *phi = cast<PHINode>(I);
156 phi->addIncoming(UnwindDestPHIValues[i], src);
157 }
158 }
159 };
160
161 } // end anonymous namespace
162
163 /// Get or create a target for the branch from ResumeInsts.
getInnerResumeDest()164 BasicBlock *LandingPadInliningInfo::getInnerResumeDest() {
165 if (InnerResumeDest) return InnerResumeDest;
166
167 // Split the landing pad.
168 BasicBlock::iterator SplitPoint = ++CallerLPad->getIterator();
169 InnerResumeDest =
170 OuterResumeDest->splitBasicBlock(SplitPoint,
171 OuterResumeDest->getName() + ".body");
172
173 // The number of incoming edges we expect to the inner landing pad.
174 const unsigned PHICapacity = 2;
175
176 // Create corresponding new PHIs for all the PHIs in the outer landing pad.
177 Instruction *InsertPoint = &InnerResumeDest->front();
178 BasicBlock::iterator I = OuterResumeDest->begin();
179 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
180 PHINode *OuterPHI = cast<PHINode>(I);
181 PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity,
182 OuterPHI->getName() + ".lpad-body",
183 InsertPoint);
184 OuterPHI->replaceAllUsesWith(InnerPHI);
185 InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
186 }
187
188 // Create a PHI for the exception values.
189 InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity,
190 "eh.lpad-body", InsertPoint);
191 CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
192 InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
193
194 // All done.
195 return InnerResumeDest;
196 }
197
198 /// Forward the 'resume' instruction to the caller's landing pad block.
199 /// When the landing pad block has only one predecessor, this is a simple
200 /// branch. When there is more than one predecessor, we need to split the
201 /// landing pad block after the landingpad instruction and jump to there.
forwardResume(ResumeInst * RI,SmallPtrSetImpl<LandingPadInst * > & InlinedLPads)202 void LandingPadInliningInfo::forwardResume(
203 ResumeInst *RI, SmallPtrSetImpl<LandingPadInst *> &InlinedLPads) {
204 BasicBlock *Dest = getInnerResumeDest();
205 BasicBlock *Src = RI->getParent();
206
207 BranchInst::Create(Dest, Src);
208
209 // Update the PHIs in the destination. They were inserted in an order which
210 // makes this work.
211 addIncomingPHIValuesForInto(Src, Dest);
212
213 InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
214 RI->eraseFromParent();
215 }
216
217 /// Helper for getUnwindDestToken/getUnwindDestTokenHelper.
getParentPad(Value * EHPad)218 static Value *getParentPad(Value *EHPad) {
219 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
220 return FPI->getParentPad();
221 return cast<CatchSwitchInst>(EHPad)->getParentPad();
222 }
223
224 using UnwindDestMemoTy = DenseMap<Instruction *, Value *>;
225
226 /// Helper for getUnwindDestToken that does the descendant-ward part of
227 /// the search.
getUnwindDestTokenHelper(Instruction * EHPad,UnwindDestMemoTy & MemoMap)228 static Value *getUnwindDestTokenHelper(Instruction *EHPad,
229 UnwindDestMemoTy &MemoMap) {
230 SmallVector<Instruction *, 8> Worklist(1, EHPad);
231
232 while (!Worklist.empty()) {
233 Instruction *CurrentPad = Worklist.pop_back_val();
234 // We only put pads on the worklist that aren't in the MemoMap. When
235 // we find an unwind dest for a pad we may update its ancestors, but
236 // the queue only ever contains uncles/great-uncles/etc. of CurrentPad,
237 // so they should never get updated while queued on the worklist.
238 assert(!MemoMap.count(CurrentPad));
239 Value *UnwindDestToken = nullptr;
240 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) {
241 if (CatchSwitch->hasUnwindDest()) {
242 UnwindDestToken = CatchSwitch->getUnwindDest()->getFirstNonPHI();
243 } else {
244 // Catchswitch doesn't have a 'nounwind' variant, and one might be
245 // annotated as "unwinds to caller" when really it's nounwind (see
246 // e.g. SimplifyCFGOpt::SimplifyUnreachable), so we can't infer the
247 // parent's unwind dest from this. We can check its catchpads'
248 // descendants, since they might include a cleanuppad with an
249 // "unwinds to caller" cleanupret, which can be trusted.
250 for (auto HI = CatchSwitch->handler_begin(),
251 HE = CatchSwitch->handler_end();
252 HI != HE && !UnwindDestToken; ++HI) {
253 BasicBlock *HandlerBlock = *HI;
254 auto *CatchPad = cast<CatchPadInst>(HandlerBlock->getFirstNonPHI());
255 for (User *Child : CatchPad->users()) {
256 // Intentionally ignore invokes here -- since the catchswitch is
257 // marked "unwind to caller", it would be a verifier error if it
258 // contained an invoke which unwinds out of it, so any invoke we'd
259 // encounter must unwind to some child of the catch.
260 if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child))
261 continue;
262
263 Instruction *ChildPad = cast<Instruction>(Child);
264 auto Memo = MemoMap.find(ChildPad);
265 if (Memo == MemoMap.end()) {
266 // Haven't figured out this child pad yet; queue it.
267 Worklist.push_back(ChildPad);
268 continue;
269 }
270 // We've already checked this child, but might have found that
271 // it offers no proof either way.
272 Value *ChildUnwindDestToken = Memo->second;
273 if (!ChildUnwindDestToken)
274 continue;
275 // We already know the child's unwind dest, which can either
276 // be ConstantTokenNone to indicate unwind to caller, or can
277 // be another child of the catchpad. Only the former indicates
278 // the unwind dest of the catchswitch.
279 if (isa<ConstantTokenNone>(ChildUnwindDestToken)) {
280 UnwindDestToken = ChildUnwindDestToken;
281 break;
282 }
283 assert(getParentPad(ChildUnwindDestToken) == CatchPad);
284 }
285 }
286 }
287 } else {
288 auto *CleanupPad = cast<CleanupPadInst>(CurrentPad);
289 for (User *U : CleanupPad->users()) {
290 if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) {
291 if (BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest())
292 UnwindDestToken = RetUnwindDest->getFirstNonPHI();
293 else
294 UnwindDestToken = ConstantTokenNone::get(CleanupPad->getContext());
295 break;
296 }
297 Value *ChildUnwindDestToken;
298 if (auto *Invoke = dyn_cast<InvokeInst>(U)) {
299 ChildUnwindDestToken = Invoke->getUnwindDest()->getFirstNonPHI();
300 } else if (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) {
301 Instruction *ChildPad = cast<Instruction>(U);
302 auto Memo = MemoMap.find(ChildPad);
303 if (Memo == MemoMap.end()) {
304 // Haven't resolved this child yet; queue it and keep searching.
305 Worklist.push_back(ChildPad);
306 continue;
307 }
308 // We've checked this child, but still need to ignore it if it
309 // had no proof either way.
310 ChildUnwindDestToken = Memo->second;
311 if (!ChildUnwindDestToken)
312 continue;
313 } else {
314 // Not a relevant user of the cleanuppad
315 continue;
316 }
317 // In a well-formed program, the child/invoke must either unwind to
318 // an(other) child of the cleanup, or exit the cleanup. In the
319 // first case, continue searching.
320 if (isa<Instruction>(ChildUnwindDestToken) &&
321 getParentPad(ChildUnwindDestToken) == CleanupPad)
322 continue;
323 UnwindDestToken = ChildUnwindDestToken;
324 break;
325 }
326 }
327 // If we haven't found an unwind dest for CurrentPad, we may have queued its
328 // children, so move on to the next in the worklist.
329 if (!UnwindDestToken)
330 continue;
331
332 // Now we know that CurrentPad unwinds to UnwindDestToken. It also exits
333 // any ancestors of CurrentPad up to but not including UnwindDestToken's
334 // parent pad. Record this in the memo map, and check to see if the
335 // original EHPad being queried is one of the ones exited.
336 Value *UnwindParent;
337 if (auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken))
338 UnwindParent = getParentPad(UnwindPad);
339 else
340 UnwindParent = nullptr;
341 bool ExitedOriginalPad = false;
342 for (Instruction *ExitedPad = CurrentPad;
343 ExitedPad && ExitedPad != UnwindParent;
344 ExitedPad = dyn_cast<Instruction>(getParentPad(ExitedPad))) {
345 // Skip over catchpads since they just follow their catchswitches.
346 if (isa<CatchPadInst>(ExitedPad))
347 continue;
348 MemoMap[ExitedPad] = UnwindDestToken;
349 ExitedOriginalPad |= (ExitedPad == EHPad);
350 }
351
352 if (ExitedOriginalPad)
353 return UnwindDestToken;
354
355 // Continue the search.
356 }
357
358 // No definitive information is contained within this funclet.
359 return nullptr;
360 }
361
362 /// Given an EH pad, find where it unwinds. If it unwinds to an EH pad,
363 /// return that pad instruction. If it unwinds to caller, return
364 /// ConstantTokenNone. If it does not have a definitive unwind destination,
365 /// return nullptr.
366 ///
367 /// This routine gets invoked for calls in funclets in inlinees when inlining
368 /// an invoke. Since many funclets don't have calls inside them, it's queried
369 /// on-demand rather than building a map of pads to unwind dests up front.
370 /// Determining a funclet's unwind dest may require recursively searching its
371 /// descendants, and also ancestors and cousins if the descendants don't provide
372 /// an answer. Since most funclets will have their unwind dest immediately
373 /// available as the unwind dest of a catchswitch or cleanupret, this routine
374 /// searches top-down from the given pad and then up. To avoid worst-case
375 /// quadratic run-time given that approach, it uses a memo map to avoid
376 /// re-processing funclet trees. The callers that rewrite the IR as they go
377 /// take advantage of this, for correctness, by checking/forcing rewritten
378 /// pads' entries to match the original callee view.
getUnwindDestToken(Instruction * EHPad,UnwindDestMemoTy & MemoMap)379 static Value *getUnwindDestToken(Instruction *EHPad,
380 UnwindDestMemoTy &MemoMap) {
381 // Catchpads unwind to the same place as their catchswitch;
382 // redirct any queries on catchpads so the code below can
383 // deal with just catchswitches and cleanuppads.
384 if (auto *CPI = dyn_cast<CatchPadInst>(EHPad))
385 EHPad = CPI->getCatchSwitch();
386
387 // Check if we've already determined the unwind dest for this pad.
388 auto Memo = MemoMap.find(EHPad);
389 if (Memo != MemoMap.end())
390 return Memo->second;
391
392 // Search EHPad and, if necessary, its descendants.
393 Value *UnwindDestToken = getUnwindDestTokenHelper(EHPad, MemoMap);
394 assert((UnwindDestToken == nullptr) != (MemoMap.count(EHPad) != 0));
395 if (UnwindDestToken)
396 return UnwindDestToken;
397
398 // No information is available for this EHPad from itself or any of its
399 // descendants. An unwind all the way out to a pad in the caller would
400 // need also to agree with the unwind dest of the parent funclet, so
401 // search up the chain to try to find a funclet with information. Put
402 // null entries in the memo map to avoid re-processing as we go up.
403 MemoMap[EHPad] = nullptr;
404 #ifndef NDEBUG
405 SmallPtrSet<Instruction *, 4> TempMemos;
406 TempMemos.insert(EHPad);
407 #endif
408 Instruction *LastUselessPad = EHPad;
409 Value *AncestorToken;
410 for (AncestorToken = getParentPad(EHPad);
411 auto *AncestorPad = dyn_cast<Instruction>(AncestorToken);
412 AncestorToken = getParentPad(AncestorToken)) {
413 // Skip over catchpads since they just follow their catchswitches.
414 if (isa<CatchPadInst>(AncestorPad))
415 continue;
416 // If the MemoMap had an entry mapping AncestorPad to nullptr, since we
417 // haven't yet called getUnwindDestTokenHelper for AncestorPad in this
418 // call to getUnwindDestToken, that would mean that AncestorPad had no
419 // information in itself, its descendants, or its ancestors. If that
420 // were the case, then we should also have recorded the lack of information
421 // for the descendant that we're coming from. So assert that we don't
422 // find a null entry in the MemoMap for AncestorPad.
423 assert(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]);
424 auto AncestorMemo = MemoMap.find(AncestorPad);
425 if (AncestorMemo == MemoMap.end()) {
426 UnwindDestToken = getUnwindDestTokenHelper(AncestorPad, MemoMap);
427 } else {
428 UnwindDestToken = AncestorMemo->second;
429 }
430 if (UnwindDestToken)
431 break;
432 LastUselessPad = AncestorPad;
433 MemoMap[LastUselessPad] = nullptr;
434 #ifndef NDEBUG
435 TempMemos.insert(LastUselessPad);
436 #endif
437 }
438
439 // We know that getUnwindDestTokenHelper was called on LastUselessPad and
440 // returned nullptr (and likewise for EHPad and any of its ancestors up to
441 // LastUselessPad), so LastUselessPad has no information from below. Since
442 // getUnwindDestTokenHelper must investigate all downward paths through
443 // no-information nodes to prove that a node has no information like this,
444 // and since any time it finds information it records it in the MemoMap for
445 // not just the immediately-containing funclet but also any ancestors also
446 // exited, it must be the case that, walking downward from LastUselessPad,
447 // visiting just those nodes which have not been mapped to an unwind dest
448 // by getUnwindDestTokenHelper (the nullptr TempMemos notwithstanding, since
449 // they are just used to keep getUnwindDestTokenHelper from repeating work),
450 // any node visited must have been exhaustively searched with no information
451 // for it found.
452 SmallVector<Instruction *, 8> Worklist(1, LastUselessPad);
453 while (!Worklist.empty()) {
454 Instruction *UselessPad = Worklist.pop_back_val();
455 auto Memo = MemoMap.find(UselessPad);
456 if (Memo != MemoMap.end() && Memo->second) {
457 // Here the name 'UselessPad' is a bit of a misnomer, because we've found
458 // that it is a funclet that does have information about unwinding to
459 // a particular destination; its parent was a useless pad.
460 // Since its parent has no information, the unwind edge must not escape
461 // the parent, and must target a sibling of this pad. This local unwind
462 // gives us no information about EHPad. Leave it and the subtree rooted
463 // at it alone.
464 assert(getParentPad(Memo->second) == getParentPad(UselessPad));
465 continue;
466 }
467 // We know we don't have information for UselesPad. If it has an entry in
468 // the MemoMap (mapping it to nullptr), it must be one of the TempMemos
469 // added on this invocation of getUnwindDestToken; if a previous invocation
470 // recorded nullptr, it would have had to prove that the ancestors of
471 // UselessPad, which include LastUselessPad, had no information, and that
472 // in turn would have required proving that the descendants of
473 // LastUselesPad, which include EHPad, have no information about
474 // LastUselessPad, which would imply that EHPad was mapped to nullptr in
475 // the MemoMap on that invocation, which isn't the case if we got here.
476 assert(!MemoMap.count(UselessPad) || TempMemos.count(UselessPad));
477 // Assert as we enumerate users that 'UselessPad' doesn't have any unwind
478 // information that we'd be contradicting by making a map entry for it
479 // (which is something that getUnwindDestTokenHelper must have proved for
480 // us to get here). Just assert on is direct users here; the checks in
481 // this downward walk at its descendants will verify that they don't have
482 // any unwind edges that exit 'UselessPad' either (i.e. they either have no
483 // unwind edges or unwind to a sibling).
484 MemoMap[UselessPad] = UnwindDestToken;
485 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) {
486 assert(CatchSwitch->getUnwindDest() == nullptr && "Expected useless pad");
487 for (BasicBlock *HandlerBlock : CatchSwitch->handlers()) {
488 auto *CatchPad = HandlerBlock->getFirstNonPHI();
489 for (User *U : CatchPad->users()) {
490 assert(
491 (!isa<InvokeInst>(U) ||
492 (getParentPad(
493 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
494 CatchPad)) &&
495 "Expected useless pad");
496 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
497 Worklist.push_back(cast<Instruction>(U));
498 }
499 }
500 } else {
501 assert(isa<CleanupPadInst>(UselessPad));
502 for (User *U : UselessPad->users()) {
503 assert(!isa<CleanupReturnInst>(U) && "Expected useless pad");
504 assert((!isa<InvokeInst>(U) ||
505 (getParentPad(
506 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
507 UselessPad)) &&
508 "Expected useless pad");
509 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
510 Worklist.push_back(cast<Instruction>(U));
511 }
512 }
513 }
514
515 return UnwindDestToken;
516 }
517
518 /// When we inline a basic block into an invoke,
519 /// we have to turn all of the calls that can throw into invokes.
520 /// This function analyze BB to see if there are any calls, and if so,
521 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
522 /// nodes in that block with the values specified in InvokeDestPHIValues.
HandleCallsInBlockInlinedThroughInvoke(BasicBlock * BB,BasicBlock * UnwindEdge,UnwindDestMemoTy * FuncletUnwindMap=nullptr)523 static BasicBlock *HandleCallsInBlockInlinedThroughInvoke(
524 BasicBlock *BB, BasicBlock *UnwindEdge,
525 UnwindDestMemoTy *FuncletUnwindMap = nullptr) {
526 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
527 Instruction *I = &*BBI++;
528
529 // We only need to check for function calls: inlined invoke
530 // instructions require no special handling.
531 CallInst *CI = dyn_cast<CallInst>(I);
532
533 if (!CI || CI->doesNotThrow() || isa<InlineAsm>(CI->getCalledValue()))
534 continue;
535
536 // We do not need to (and in fact, cannot) convert possibly throwing calls
537 // to @llvm.experimental_deoptimize (resp. @llvm.experimental.guard) into
538 // invokes. The caller's "segment" of the deoptimization continuation
539 // attached to the newly inlined @llvm.experimental_deoptimize
540 // (resp. @llvm.experimental.guard) call should contain the exception
541 // handling logic, if any.
542 if (auto *F = CI->getCalledFunction())
543 if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize ||
544 F->getIntrinsicID() == Intrinsic::experimental_guard)
545 continue;
546
547 if (auto FuncletBundle = CI->getOperandBundle(LLVMContext::OB_funclet)) {
548 // This call is nested inside a funclet. If that funclet has an unwind
549 // destination within the inlinee, then unwinding out of this call would
550 // be UB. Rewriting this call to an invoke which targets the inlined
551 // invoke's unwind dest would give the call's parent funclet multiple
552 // unwind destinations, which is something that subsequent EH table
553 // generation can't handle and that the veirifer rejects. So when we
554 // see such a call, leave it as a call.
555 auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]);
556 Value *UnwindDestToken =
557 getUnwindDestToken(FuncletPad, *FuncletUnwindMap);
558 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
559 continue;
560 #ifndef NDEBUG
561 Instruction *MemoKey;
562 if (auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad))
563 MemoKey = CatchPad->getCatchSwitch();
564 else
565 MemoKey = FuncletPad;
566 assert(FuncletUnwindMap->count(MemoKey) &&
567 (*FuncletUnwindMap)[MemoKey] == UnwindDestToken &&
568 "must get memoized to avoid confusing later searches");
569 #endif // NDEBUG
570 }
571
572 changeToInvokeAndSplitBasicBlock(CI, UnwindEdge);
573 return BB;
574 }
575 return nullptr;
576 }
577
578 /// If we inlined an invoke site, we need to convert calls
579 /// in the body of the inlined function into invokes.
580 ///
581 /// II is the invoke instruction being inlined. FirstNewBlock is the first
582 /// block of the inlined code (the last block is the end of the function),
583 /// and InlineCodeInfo is information about the code that got inlined.
HandleInlinedLandingPad(InvokeInst * II,BasicBlock * FirstNewBlock,ClonedCodeInfo & InlinedCodeInfo)584 static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock,
585 ClonedCodeInfo &InlinedCodeInfo) {
586 BasicBlock *InvokeDest = II->getUnwindDest();
587
588 Function *Caller = FirstNewBlock->getParent();
589
590 // The inlined code is currently at the end of the function, scan from the
591 // start of the inlined code to its end, checking for stuff we need to
592 // rewrite.
593 LandingPadInliningInfo Invoke(II);
594
595 // Get all of the inlined landing pad instructions.
596 SmallPtrSet<LandingPadInst*, 16> InlinedLPads;
597 for (Function::iterator I = FirstNewBlock->getIterator(), E = Caller->end();
598 I != E; ++I)
599 if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator()))
600 InlinedLPads.insert(II->getLandingPadInst());
601
602 // Append the clauses from the outer landing pad instruction into the inlined
603 // landing pad instructions.
604 LandingPadInst *OuterLPad = Invoke.getLandingPadInst();
605 for (LandingPadInst *InlinedLPad : InlinedLPads) {
606 unsigned OuterNum = OuterLPad->getNumClauses();
607 InlinedLPad->reserveClauses(OuterNum);
608 for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
609 InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
610 if (OuterLPad->isCleanup())
611 InlinedLPad->setCleanup(true);
612 }
613
614 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
615 BB != E; ++BB) {
616 if (InlinedCodeInfo.ContainsCalls)
617 if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke(
618 &*BB, Invoke.getOuterResumeDest()))
619 // Update any PHI nodes in the exceptional block to indicate that there
620 // is now a new entry in them.
621 Invoke.addIncomingPHIValuesFor(NewBB);
622
623 // Forward any resumes that are remaining here.
624 if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
625 Invoke.forwardResume(RI, InlinedLPads);
626 }
627
628 // Now that everything is happy, we have one final detail. The PHI nodes in
629 // the exception destination block still have entries due to the original
630 // invoke instruction. Eliminate these entries (which might even delete the
631 // PHI node) now.
632 InvokeDest->removePredecessor(II->getParent());
633 }
634
635 /// If we inlined an invoke site, we need to convert calls
636 /// in the body of the inlined function into invokes.
637 ///
638 /// II is the invoke instruction being inlined. FirstNewBlock is the first
639 /// block of the inlined code (the last block is the end of the function),
640 /// and InlineCodeInfo is information about the code that got inlined.
HandleInlinedEHPad(InvokeInst * II,BasicBlock * FirstNewBlock,ClonedCodeInfo & InlinedCodeInfo)641 static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock,
642 ClonedCodeInfo &InlinedCodeInfo) {
643 BasicBlock *UnwindDest = II->getUnwindDest();
644 Function *Caller = FirstNewBlock->getParent();
645
646 assert(UnwindDest->getFirstNonPHI()->isEHPad() && "unexpected BasicBlock!");
647
648 // If there are PHI nodes in the unwind destination block, we need to keep
649 // track of which values came into them from the invoke before removing the
650 // edge from this block.
651 SmallVector<Value *, 8> UnwindDestPHIValues;
652 BasicBlock *InvokeBB = II->getParent();
653 for (Instruction &I : *UnwindDest) {
654 // Save the value to use for this edge.
655 PHINode *PHI = dyn_cast<PHINode>(&I);
656 if (!PHI)
657 break;
658 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
659 }
660
661 // Add incoming-PHI values to the unwind destination block for the given basic
662 // block, using the values for the original invoke's source block.
663 auto UpdatePHINodes = [&](BasicBlock *Src) {
664 BasicBlock::iterator I = UnwindDest->begin();
665 for (Value *V : UnwindDestPHIValues) {
666 PHINode *PHI = cast<PHINode>(I);
667 PHI->addIncoming(V, Src);
668 ++I;
669 }
670 };
671
672 // This connects all the instructions which 'unwind to caller' to the invoke
673 // destination.
674 UnwindDestMemoTy FuncletUnwindMap;
675 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
676 BB != E; ++BB) {
677 if (auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) {
678 if (CRI->unwindsToCaller()) {
679 auto *CleanupPad = CRI->getCleanupPad();
680 CleanupReturnInst::Create(CleanupPad, UnwindDest, CRI);
681 CRI->eraseFromParent();
682 UpdatePHINodes(&*BB);
683 // Finding a cleanupret with an unwind destination would confuse
684 // subsequent calls to getUnwindDestToken, so map the cleanuppad
685 // to short-circuit any such calls and recognize this as an "unwind
686 // to caller" cleanup.
687 assert(!FuncletUnwindMap.count(CleanupPad) ||
688 isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad]));
689 FuncletUnwindMap[CleanupPad] =
690 ConstantTokenNone::get(Caller->getContext());
691 }
692 }
693
694 Instruction *I = BB->getFirstNonPHI();
695 if (!I->isEHPad())
696 continue;
697
698 Instruction *Replacement = nullptr;
699 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
700 if (CatchSwitch->unwindsToCaller()) {
701 Value *UnwindDestToken;
702 if (auto *ParentPad =
703 dyn_cast<Instruction>(CatchSwitch->getParentPad())) {
704 // This catchswitch is nested inside another funclet. If that
705 // funclet has an unwind destination within the inlinee, then
706 // unwinding out of this catchswitch would be UB. Rewriting this
707 // catchswitch to unwind to the inlined invoke's unwind dest would
708 // give the parent funclet multiple unwind destinations, which is
709 // something that subsequent EH table generation can't handle and
710 // that the veirifer rejects. So when we see such a call, leave it
711 // as "unwind to caller".
712 UnwindDestToken = getUnwindDestToken(ParentPad, FuncletUnwindMap);
713 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
714 continue;
715 } else {
716 // This catchswitch has no parent to inherit constraints from, and
717 // none of its descendants can have an unwind edge that exits it and
718 // targets another funclet in the inlinee. It may or may not have a
719 // descendant that definitively has an unwind to caller. In either
720 // case, we'll have to assume that any unwinds out of it may need to
721 // be routed to the caller, so treat it as though it has a definitive
722 // unwind to caller.
723 UnwindDestToken = ConstantTokenNone::get(Caller->getContext());
724 }
725 auto *NewCatchSwitch = CatchSwitchInst::Create(
726 CatchSwitch->getParentPad(), UnwindDest,
727 CatchSwitch->getNumHandlers(), CatchSwitch->getName(),
728 CatchSwitch);
729 for (BasicBlock *PadBB : CatchSwitch->handlers())
730 NewCatchSwitch->addHandler(PadBB);
731 // Propagate info for the old catchswitch over to the new one in
732 // the unwind map. This also serves to short-circuit any subsequent
733 // checks for the unwind dest of this catchswitch, which would get
734 // confused if they found the outer handler in the callee.
735 FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken;
736 Replacement = NewCatchSwitch;
737 }
738 } else if (!isa<FuncletPadInst>(I)) {
739 llvm_unreachable("unexpected EHPad!");
740 }
741
742 if (Replacement) {
743 Replacement->takeName(I);
744 I->replaceAllUsesWith(Replacement);
745 I->eraseFromParent();
746 UpdatePHINodes(&*BB);
747 }
748 }
749
750 if (InlinedCodeInfo.ContainsCalls)
751 for (Function::iterator BB = FirstNewBlock->getIterator(),
752 E = Caller->end();
753 BB != E; ++BB)
754 if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke(
755 &*BB, UnwindDest, &FuncletUnwindMap))
756 // Update any PHI nodes in the exceptional block to indicate that there
757 // is now a new entry in them.
758 UpdatePHINodes(NewBB);
759
760 // Now that everything is happy, we have one final detail. The PHI nodes in
761 // the exception destination block still have entries due to the original
762 // invoke instruction. Eliminate these entries (which might even delete the
763 // PHI node) now.
764 UnwindDest->removePredecessor(InvokeBB);
765 }
766
767 /// When inlining a call site that has !llvm.mem.parallel_loop_access or
768 /// llvm.access.group metadata, that metadata should be propagated to all
769 /// memory-accessing cloned instructions.
PropagateParallelLoopAccessMetadata(CallSite CS,ValueToValueMapTy & VMap)770 static void PropagateParallelLoopAccessMetadata(CallSite CS,
771 ValueToValueMapTy &VMap) {
772 MDNode *M =
773 CS.getInstruction()->getMetadata(LLVMContext::MD_mem_parallel_loop_access);
774 MDNode *CallAccessGroup =
775 CS.getInstruction()->getMetadata(LLVMContext::MD_access_group);
776 if (!M && !CallAccessGroup)
777 return;
778
779 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
780 VMI != VMIE; ++VMI) {
781 if (!VMI->second)
782 continue;
783
784 Instruction *NI = dyn_cast<Instruction>(VMI->second);
785 if (!NI)
786 continue;
787
788 if (M) {
789 if (MDNode *PM =
790 NI->getMetadata(LLVMContext::MD_mem_parallel_loop_access)) {
791 M = MDNode::concatenate(PM, M);
792 NI->setMetadata(LLVMContext::MD_mem_parallel_loop_access, M);
793 } else if (NI->mayReadOrWriteMemory()) {
794 NI->setMetadata(LLVMContext::MD_mem_parallel_loop_access, M);
795 }
796 }
797
798 if (NI->mayReadOrWriteMemory()) {
799 MDNode *UnitedAccGroups = uniteAccessGroups(
800 NI->getMetadata(LLVMContext::MD_access_group), CallAccessGroup);
801 NI->setMetadata(LLVMContext::MD_access_group, UnitedAccGroups);
802 }
803 }
804 }
805
806 /// When inlining a function that contains noalias scope metadata,
807 /// this metadata needs to be cloned so that the inlined blocks
808 /// have different "unique scopes" at every call site. Were this not done, then
809 /// aliasing scopes from a function inlined into a caller multiple times could
810 /// not be differentiated (and this would lead to miscompiles because the
811 /// non-aliasing property communicated by the metadata could have
812 /// call-site-specific control dependencies).
CloneAliasScopeMetadata(CallSite CS,ValueToValueMapTy & VMap)813 static void CloneAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap) {
814 const Function *CalledFunc = CS.getCalledFunction();
815 SetVector<const MDNode *> MD;
816
817 // Note: We could only clone the metadata if it is already used in the
818 // caller. I'm omitting that check here because it might confuse
819 // inter-procedural alias analysis passes. We can revisit this if it becomes
820 // an efficiency or overhead problem.
821
822 for (const BasicBlock &I : *CalledFunc)
823 for (const Instruction &J : I) {
824 if (const MDNode *M = J.getMetadata(LLVMContext::MD_alias_scope))
825 MD.insert(M);
826 if (const MDNode *M = J.getMetadata(LLVMContext::MD_noalias))
827 MD.insert(M);
828 }
829
830 if (MD.empty())
831 return;
832
833 // Walk the existing metadata, adding the complete (perhaps cyclic) chain to
834 // the set.
835 SmallVector<const Metadata *, 16> Queue(MD.begin(), MD.end());
836 while (!Queue.empty()) {
837 const MDNode *M = cast<MDNode>(Queue.pop_back_val());
838 for (unsigned i = 0, ie = M->getNumOperands(); i != ie; ++i)
839 if (const MDNode *M1 = dyn_cast<MDNode>(M->getOperand(i)))
840 if (MD.insert(M1))
841 Queue.push_back(M1);
842 }
843
844 // Now we have a complete set of all metadata in the chains used to specify
845 // the noalias scopes and the lists of those scopes.
846 SmallVector<TempMDTuple, 16> DummyNodes;
847 DenseMap<const MDNode *, TrackingMDNodeRef> MDMap;
848 for (const MDNode *I : MD) {
849 DummyNodes.push_back(MDTuple::getTemporary(CalledFunc->getContext(), None));
850 MDMap[I].reset(DummyNodes.back().get());
851 }
852
853 // Create new metadata nodes to replace the dummy nodes, replacing old
854 // metadata references with either a dummy node or an already-created new
855 // node.
856 for (const MDNode *I : MD) {
857 SmallVector<Metadata *, 4> NewOps;
858 for (unsigned i = 0, ie = I->getNumOperands(); i != ie; ++i) {
859 const Metadata *V = I->getOperand(i);
860 if (const MDNode *M = dyn_cast<MDNode>(V))
861 NewOps.push_back(MDMap[M]);
862 else
863 NewOps.push_back(const_cast<Metadata *>(V));
864 }
865
866 MDNode *NewM = MDNode::get(CalledFunc->getContext(), NewOps);
867 MDTuple *TempM = cast<MDTuple>(MDMap[I]);
868 assert(TempM->isTemporary() && "Expected temporary node");
869
870 TempM->replaceAllUsesWith(NewM);
871 }
872
873 // Now replace the metadata in the new inlined instructions with the
874 // repacements from the map.
875 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
876 VMI != VMIE; ++VMI) {
877 if (!VMI->second)
878 continue;
879
880 Instruction *NI = dyn_cast<Instruction>(VMI->second);
881 if (!NI)
882 continue;
883
884 if (MDNode *M = NI->getMetadata(LLVMContext::MD_alias_scope)) {
885 MDNode *NewMD = MDMap[M];
886 // If the call site also had alias scope metadata (a list of scopes to
887 // which instructions inside it might belong), propagate those scopes to
888 // the inlined instructions.
889 if (MDNode *CSM =
890 CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope))
891 NewMD = MDNode::concatenate(NewMD, CSM);
892 NI->setMetadata(LLVMContext::MD_alias_scope, NewMD);
893 } else if (NI->mayReadOrWriteMemory()) {
894 if (MDNode *M =
895 CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope))
896 NI->setMetadata(LLVMContext::MD_alias_scope, M);
897 }
898
899 if (MDNode *M = NI->getMetadata(LLVMContext::MD_noalias)) {
900 MDNode *NewMD = MDMap[M];
901 // If the call site also had noalias metadata (a list of scopes with
902 // which instructions inside it don't alias), propagate those scopes to
903 // the inlined instructions.
904 if (MDNode *CSM =
905 CS.getInstruction()->getMetadata(LLVMContext::MD_noalias))
906 NewMD = MDNode::concatenate(NewMD, CSM);
907 NI->setMetadata(LLVMContext::MD_noalias, NewMD);
908 } else if (NI->mayReadOrWriteMemory()) {
909 if (MDNode *M = CS.getInstruction()->getMetadata(LLVMContext::MD_noalias))
910 NI->setMetadata(LLVMContext::MD_noalias, M);
911 }
912 }
913 }
914
915 /// If the inlined function has noalias arguments,
916 /// then add new alias scopes for each noalias argument, tag the mapped noalias
917 /// parameters with noalias metadata specifying the new scope, and tag all
918 /// non-derived loads, stores and memory intrinsics with the new alias scopes.
AddAliasScopeMetadata(CallSite CS,ValueToValueMapTy & VMap,const DataLayout & DL,AAResults * CalleeAAR)919 static void AddAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap,
920 const DataLayout &DL, AAResults *CalleeAAR) {
921 if (!EnableNoAliasConversion)
922 return;
923
924 const Function *CalledFunc = CS.getCalledFunction();
925 SmallVector<const Argument *, 4> NoAliasArgs;
926
927 for (const Argument &Arg : CalledFunc->args())
928 if (Arg.hasNoAliasAttr() && !Arg.use_empty())
929 NoAliasArgs.push_back(&Arg);
930
931 if (NoAliasArgs.empty())
932 return;
933
934 // To do a good job, if a noalias variable is captured, we need to know if
935 // the capture point dominates the particular use we're considering.
936 DominatorTree DT;
937 DT.recalculate(const_cast<Function&>(*CalledFunc));
938
939 // noalias indicates that pointer values based on the argument do not alias
940 // pointer values which are not based on it. So we add a new "scope" for each
941 // noalias function argument. Accesses using pointers based on that argument
942 // become part of that alias scope, accesses using pointers not based on that
943 // argument are tagged as noalias with that scope.
944
945 DenseMap<const Argument *, MDNode *> NewScopes;
946 MDBuilder MDB(CalledFunc->getContext());
947
948 // Create a new scope domain for this function.
949 MDNode *NewDomain =
950 MDB.createAnonymousAliasScopeDomain(CalledFunc->getName());
951 for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) {
952 const Argument *A = NoAliasArgs[i];
953
954 std::string Name = CalledFunc->getName();
955 if (A->hasName()) {
956 Name += ": %";
957 Name += A->getName();
958 } else {
959 Name += ": argument ";
960 Name += utostr(i);
961 }
962
963 // Note: We always create a new anonymous root here. This is true regardless
964 // of the linkage of the callee because the aliasing "scope" is not just a
965 // property of the callee, but also all control dependencies in the caller.
966 MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
967 NewScopes.insert(std::make_pair(A, NewScope));
968 }
969
970 // Iterate over all new instructions in the map; for all memory-access
971 // instructions, add the alias scope metadata.
972 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
973 VMI != VMIE; ++VMI) {
974 if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) {
975 if (!VMI->second)
976 continue;
977
978 Instruction *NI = dyn_cast<Instruction>(VMI->second);
979 if (!NI)
980 continue;
981
982 bool IsArgMemOnlyCall = false, IsFuncCall = false;
983 SmallVector<const Value *, 2> PtrArgs;
984
985 if (const LoadInst *LI = dyn_cast<LoadInst>(I))
986 PtrArgs.push_back(LI->getPointerOperand());
987 else if (const StoreInst *SI = dyn_cast<StoreInst>(I))
988 PtrArgs.push_back(SI->getPointerOperand());
989 else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
990 PtrArgs.push_back(VAAI->getPointerOperand());
991 else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
992 PtrArgs.push_back(CXI->getPointerOperand());
993 else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
994 PtrArgs.push_back(RMWI->getPointerOperand());
995 else if (const auto *Call = dyn_cast<CallBase>(I)) {
996 // If we know that the call does not access memory, then we'll still
997 // know that about the inlined clone of this call site, and we don't
998 // need to add metadata.
999 if (Call->doesNotAccessMemory())
1000 continue;
1001
1002 IsFuncCall = true;
1003 if (CalleeAAR) {
1004 FunctionModRefBehavior MRB = CalleeAAR->getModRefBehavior(Call);
1005 if (MRB == FMRB_OnlyAccessesArgumentPointees ||
1006 MRB == FMRB_OnlyReadsArgumentPointees)
1007 IsArgMemOnlyCall = true;
1008 }
1009
1010 for (Value *Arg : Call->args()) {
1011 // We need to check the underlying objects of all arguments, not just
1012 // the pointer arguments, because we might be passing pointers as
1013 // integers, etc.
1014 // However, if we know that the call only accesses pointer arguments,
1015 // then we only need to check the pointer arguments.
1016 if (IsArgMemOnlyCall && !Arg->getType()->isPointerTy())
1017 continue;
1018
1019 PtrArgs.push_back(Arg);
1020 }
1021 }
1022
1023 // If we found no pointers, then this instruction is not suitable for
1024 // pairing with an instruction to receive aliasing metadata.
1025 // However, if this is a call, this we might just alias with none of the
1026 // noalias arguments.
1027 if (PtrArgs.empty() && !IsFuncCall)
1028 continue;
1029
1030 // It is possible that there is only one underlying object, but you
1031 // need to go through several PHIs to see it, and thus could be
1032 // repeated in the Objects list.
1033 SmallPtrSet<const Value *, 4> ObjSet;
1034 SmallVector<Metadata *, 4> Scopes, NoAliases;
1035
1036 SmallSetVector<const Argument *, 4> NAPtrArgs;
1037 for (const Value *V : PtrArgs) {
1038 SmallVector<const Value *, 4> Objects;
1039 GetUnderlyingObjects(V, Objects, DL, /* LI = */ nullptr);
1040
1041 for (const Value *O : Objects)
1042 ObjSet.insert(O);
1043 }
1044
1045 // Figure out if we're derived from anything that is not a noalias
1046 // argument.
1047 bool CanDeriveViaCapture = false, UsesAliasingPtr = false;
1048 for (const Value *V : ObjSet) {
1049 // Is this value a constant that cannot be derived from any pointer
1050 // value (we need to exclude constant expressions, for example, that
1051 // are formed from arithmetic on global symbols).
1052 bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
1053 isa<ConstantPointerNull>(V) ||
1054 isa<ConstantDataVector>(V) || isa<UndefValue>(V);
1055 if (IsNonPtrConst)
1056 continue;
1057
1058 // If this is anything other than a noalias argument, then we cannot
1059 // completely describe the aliasing properties using alias.scope
1060 // metadata (and, thus, won't add any).
1061 if (const Argument *A = dyn_cast<Argument>(V)) {
1062 if (!A->hasNoAliasAttr())
1063 UsesAliasingPtr = true;
1064 } else {
1065 UsesAliasingPtr = true;
1066 }
1067
1068 // If this is not some identified function-local object (which cannot
1069 // directly alias a noalias argument), or some other argument (which,
1070 // by definition, also cannot alias a noalias argument), then we could
1071 // alias a noalias argument that has been captured).
1072 if (!isa<Argument>(V) &&
1073 !isIdentifiedFunctionLocal(const_cast<Value*>(V)))
1074 CanDeriveViaCapture = true;
1075 }
1076
1077 // A function call can always get captured noalias pointers (via other
1078 // parameters, globals, etc.).
1079 if (IsFuncCall && !IsArgMemOnlyCall)
1080 CanDeriveViaCapture = true;
1081
1082 // First, we want to figure out all of the sets with which we definitely
1083 // don't alias. Iterate over all noalias set, and add those for which:
1084 // 1. The noalias argument is not in the set of objects from which we
1085 // definitely derive.
1086 // 2. The noalias argument has not yet been captured.
1087 // An arbitrary function that might load pointers could see captured
1088 // noalias arguments via other noalias arguments or globals, and so we
1089 // must always check for prior capture.
1090 for (const Argument *A : NoAliasArgs) {
1091 if (!ObjSet.count(A) && (!CanDeriveViaCapture ||
1092 // It might be tempting to skip the
1093 // PointerMayBeCapturedBefore check if
1094 // A->hasNoCaptureAttr() is true, but this is
1095 // incorrect because nocapture only guarantees
1096 // that no copies outlive the function, not
1097 // that the value cannot be locally captured.
1098 !PointerMayBeCapturedBefore(A,
1099 /* ReturnCaptures */ false,
1100 /* StoreCaptures */ false, I, &DT)))
1101 NoAliases.push_back(NewScopes[A]);
1102 }
1103
1104 if (!NoAliases.empty())
1105 NI->setMetadata(LLVMContext::MD_noalias,
1106 MDNode::concatenate(
1107 NI->getMetadata(LLVMContext::MD_noalias),
1108 MDNode::get(CalledFunc->getContext(), NoAliases)));
1109
1110 // Next, we want to figure out all of the sets to which we might belong.
1111 // We might belong to a set if the noalias argument is in the set of
1112 // underlying objects. If there is some non-noalias argument in our list
1113 // of underlying objects, then we cannot add a scope because the fact
1114 // that some access does not alias with any set of our noalias arguments
1115 // cannot itself guarantee that it does not alias with this access
1116 // (because there is some pointer of unknown origin involved and the
1117 // other access might also depend on this pointer). We also cannot add
1118 // scopes to arbitrary functions unless we know they don't access any
1119 // non-parameter pointer-values.
1120 bool CanAddScopes = !UsesAliasingPtr;
1121 if (CanAddScopes && IsFuncCall)
1122 CanAddScopes = IsArgMemOnlyCall;
1123
1124 if (CanAddScopes)
1125 for (const Argument *A : NoAliasArgs) {
1126 if (ObjSet.count(A))
1127 Scopes.push_back(NewScopes[A]);
1128 }
1129
1130 if (!Scopes.empty())
1131 NI->setMetadata(
1132 LLVMContext::MD_alias_scope,
1133 MDNode::concatenate(NI->getMetadata(LLVMContext::MD_alias_scope),
1134 MDNode::get(CalledFunc->getContext(), Scopes)));
1135 }
1136 }
1137 }
1138
1139 /// If the inlined function has non-byval align arguments, then
1140 /// add @llvm.assume-based alignment assumptions to preserve this information.
AddAlignmentAssumptions(CallSite CS,InlineFunctionInfo & IFI)1141 static void AddAlignmentAssumptions(CallSite CS, InlineFunctionInfo &IFI) {
1142 if (!PreserveAlignmentAssumptions || !IFI.GetAssumptionCache)
1143 return;
1144
1145 AssumptionCache *AC = &(*IFI.GetAssumptionCache)(*CS.getCaller());
1146 auto &DL = CS.getCaller()->getParent()->getDataLayout();
1147
1148 // To avoid inserting redundant assumptions, we should check for assumptions
1149 // already in the caller. To do this, we might need a DT of the caller.
1150 DominatorTree DT;
1151 bool DTCalculated = false;
1152
1153 Function *CalledFunc = CS.getCalledFunction();
1154 for (Argument &Arg : CalledFunc->args()) {
1155 unsigned Align = Arg.getType()->isPointerTy() ? Arg.getParamAlignment() : 0;
1156 if (Align && !Arg.hasByValOrInAllocaAttr() && !Arg.hasNUses(0)) {
1157 if (!DTCalculated) {
1158 DT.recalculate(*CS.getCaller());
1159 DTCalculated = true;
1160 }
1161
1162 // If we can already prove the asserted alignment in the context of the
1163 // caller, then don't bother inserting the assumption.
1164 Value *ArgVal = CS.getArgument(Arg.getArgNo());
1165 if (getKnownAlignment(ArgVal, DL, CS.getInstruction(), AC, &DT) >= Align)
1166 continue;
1167
1168 CallInst *NewAsmp = IRBuilder<>(CS.getInstruction())
1169 .CreateAlignmentAssumption(DL, ArgVal, Align);
1170 AC->registerAssumption(NewAsmp);
1171 }
1172 }
1173 }
1174
1175 /// Once we have cloned code over from a callee into the caller,
1176 /// update the specified callgraph to reflect the changes we made.
1177 /// Note that it's possible that not all code was copied over, so only
1178 /// some edges of the callgraph may remain.
UpdateCallGraphAfterInlining(CallSite CS,Function::iterator FirstNewBlock,ValueToValueMapTy & VMap,InlineFunctionInfo & IFI)1179 static void UpdateCallGraphAfterInlining(CallSite CS,
1180 Function::iterator FirstNewBlock,
1181 ValueToValueMapTy &VMap,
1182 InlineFunctionInfo &IFI) {
1183 CallGraph &CG = *IFI.CG;
1184 const Function *Caller = CS.getCaller();
1185 const Function *Callee = CS.getCalledFunction();
1186 CallGraphNode *CalleeNode = CG[Callee];
1187 CallGraphNode *CallerNode = CG[Caller];
1188
1189 // Since we inlined some uninlined call sites in the callee into the caller,
1190 // add edges from the caller to all of the callees of the callee.
1191 CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end();
1192
1193 // Consider the case where CalleeNode == CallerNode.
1194 CallGraphNode::CalledFunctionsVector CallCache;
1195 if (CalleeNode == CallerNode) {
1196 CallCache.assign(I, E);
1197 I = CallCache.begin();
1198 E = CallCache.end();
1199 }
1200
1201 for (; I != E; ++I) {
1202 const Value *OrigCall = I->first;
1203
1204 ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
1205 // Only copy the edge if the call was inlined!
1206 if (VMI == VMap.end() || VMI->second == nullptr)
1207 continue;
1208
1209 // If the call was inlined, but then constant folded, there is no edge to
1210 // add. Check for this case.
1211 auto *NewCall = dyn_cast<CallBase>(VMI->second);
1212 if (!NewCall)
1213 continue;
1214
1215 // We do not treat intrinsic calls like real function calls because we
1216 // expect them to become inline code; do not add an edge for an intrinsic.
1217 if (NewCall->getCalledFunction() &&
1218 NewCall->getCalledFunction()->isIntrinsic())
1219 continue;
1220
1221 // Remember that this call site got inlined for the client of
1222 // InlineFunction.
1223 IFI.InlinedCalls.push_back(NewCall);
1224
1225 // It's possible that inlining the callsite will cause it to go from an
1226 // indirect to a direct call by resolving a function pointer. If this
1227 // happens, set the callee of the new call site to a more precise
1228 // destination. This can also happen if the call graph node of the caller
1229 // was just unnecessarily imprecise.
1230 if (!I->second->getFunction())
1231 if (Function *F = NewCall->getCalledFunction()) {
1232 // Indirect call site resolved to direct call.
1233 CallerNode->addCalledFunction(NewCall, CG[F]);
1234
1235 continue;
1236 }
1237
1238 CallerNode->addCalledFunction(NewCall, I->second);
1239 }
1240
1241 // Update the call graph by deleting the edge from Callee to Caller. We must
1242 // do this after the loop above in case Caller and Callee are the same.
1243 CallerNode->removeCallEdgeFor(*cast<CallBase>(CS.getInstruction()));
1244 }
1245
HandleByValArgumentInit(Value * Dst,Value * Src,Module * M,BasicBlock * InsertBlock,InlineFunctionInfo & IFI)1246 static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M,
1247 BasicBlock *InsertBlock,
1248 InlineFunctionInfo &IFI) {
1249 Type *AggTy = cast<PointerType>(Src->getType())->getElementType();
1250 IRBuilder<> Builder(InsertBlock, InsertBlock->begin());
1251
1252 Value *Size = Builder.getInt64(M->getDataLayout().getTypeStoreSize(AggTy));
1253
1254 // Always generate a memcpy of alignment 1 here because we don't know
1255 // the alignment of the src pointer. Other optimizations can infer
1256 // better alignment.
1257 Builder.CreateMemCpy(Dst, /*DstAlign*/ Align::None(), Src,
1258 /*SrcAlign*/ Align::None(), Size);
1259 }
1260
1261 /// When inlining a call site that has a byval argument,
1262 /// we have to make the implicit memcpy explicit by adding it.
HandleByValArgument(Value * Arg,Instruction * TheCall,const Function * CalledFunc,InlineFunctionInfo & IFI,unsigned ByValAlignment)1263 static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
1264 const Function *CalledFunc,
1265 InlineFunctionInfo &IFI,
1266 unsigned ByValAlignment) {
1267 PointerType *ArgTy = cast<PointerType>(Arg->getType());
1268 Type *AggTy = ArgTy->getElementType();
1269
1270 Function *Caller = TheCall->getFunction();
1271 const DataLayout &DL = Caller->getParent()->getDataLayout();
1272
1273 // If the called function is readonly, then it could not mutate the caller's
1274 // copy of the byval'd memory. In this case, it is safe to elide the copy and
1275 // temporary.
1276 if (CalledFunc->onlyReadsMemory()) {
1277 // If the byval argument has a specified alignment that is greater than the
1278 // passed in pointer, then we either have to round up the input pointer or
1279 // give up on this transformation.
1280 if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment.
1281 return Arg;
1282
1283 AssumptionCache *AC =
1284 IFI.GetAssumptionCache ? &(*IFI.GetAssumptionCache)(*Caller) : nullptr;
1285
1286 // If the pointer is already known to be sufficiently aligned, or if we can
1287 // round it up to a larger alignment, then we don't need a temporary.
1288 if (getOrEnforceKnownAlignment(Arg, ByValAlignment, DL, TheCall, AC) >=
1289 ByValAlignment)
1290 return Arg;
1291
1292 // Otherwise, we have to make a memcpy to get a safe alignment. This is bad
1293 // for code quality, but rarely happens and is required for correctness.
1294 }
1295
1296 // Create the alloca. If we have DataLayout, use nice alignment.
1297 Align Alignment(DL.getPrefTypeAlignment(AggTy));
1298
1299 // If the byval had an alignment specified, we *must* use at least that
1300 // alignment, as it is required by the byval argument (and uses of the
1301 // pointer inside the callee).
1302 Alignment = max(Alignment, MaybeAlign(ByValAlignment));
1303
1304 Value *NewAlloca =
1305 new AllocaInst(AggTy, DL.getAllocaAddrSpace(), nullptr, Alignment,
1306 Arg->getName(), &*Caller->begin()->begin());
1307 IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca));
1308
1309 // Uses of the argument in the function should use our new alloca
1310 // instead.
1311 return NewAlloca;
1312 }
1313
1314 // Check whether this Value is used by a lifetime intrinsic.
isUsedByLifetimeMarker(Value * V)1315 static bool isUsedByLifetimeMarker(Value *V) {
1316 for (User *U : V->users())
1317 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U))
1318 if (II->isLifetimeStartOrEnd())
1319 return true;
1320 return false;
1321 }
1322
1323 // Check whether the given alloca already has
1324 // lifetime.start or lifetime.end intrinsics.
hasLifetimeMarkers(AllocaInst * AI)1325 static bool hasLifetimeMarkers(AllocaInst *AI) {
1326 Type *Ty = AI->getType();
1327 Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(),
1328 Ty->getPointerAddressSpace());
1329 if (Ty == Int8PtrTy)
1330 return isUsedByLifetimeMarker(AI);
1331
1332 // Do a scan to find all the casts to i8*.
1333 for (User *U : AI->users()) {
1334 if (U->getType() != Int8PtrTy) continue;
1335 if (U->stripPointerCasts() != AI) continue;
1336 if (isUsedByLifetimeMarker(U))
1337 return true;
1338 }
1339 return false;
1340 }
1341
1342 /// Return the result of AI->isStaticAlloca() if AI were moved to the entry
1343 /// block. Allocas used in inalloca calls and allocas of dynamic array size
1344 /// cannot be static.
allocaWouldBeStaticInEntry(const AllocaInst * AI)1345 static bool allocaWouldBeStaticInEntry(const AllocaInst *AI ) {
1346 return isa<Constant>(AI->getArraySize()) && !AI->isUsedWithInAlloca();
1347 }
1348
1349 /// Returns a DebugLoc for a new DILocation which is a clone of \p OrigDL
1350 /// inlined at \p InlinedAt. \p IANodes is an inlined-at cache.
inlineDebugLoc(DebugLoc OrigDL,DILocation * InlinedAt,LLVMContext & Ctx,DenseMap<const MDNode *,MDNode * > & IANodes)1351 static DebugLoc inlineDebugLoc(DebugLoc OrigDL, DILocation *InlinedAt,
1352 LLVMContext &Ctx,
1353 DenseMap<const MDNode *, MDNode *> &IANodes) {
1354 auto IA = DebugLoc::appendInlinedAt(OrigDL, InlinedAt, Ctx, IANodes);
1355 return DebugLoc::get(OrigDL.getLine(), OrigDL.getCol(), OrigDL.getScope(),
1356 IA);
1357 }
1358
1359 /// Returns the LoopID for a loop which has has been cloned from another
1360 /// function for inlining with the new inlined-at start and end locs.
inlineLoopID(const MDNode * OrigLoopId,DILocation * InlinedAt,LLVMContext & Ctx,DenseMap<const MDNode *,MDNode * > & IANodes)1361 static MDNode *inlineLoopID(const MDNode *OrigLoopId, DILocation *InlinedAt,
1362 LLVMContext &Ctx,
1363 DenseMap<const MDNode *, MDNode *> &IANodes) {
1364 assert(OrigLoopId && OrigLoopId->getNumOperands() > 0 &&
1365 "Loop ID needs at least one operand");
1366 assert(OrigLoopId && OrigLoopId->getOperand(0).get() == OrigLoopId &&
1367 "Loop ID should refer to itself");
1368
1369 // Save space for the self-referential LoopID.
1370 SmallVector<Metadata *, 4> MDs = {nullptr};
1371
1372 for (unsigned i = 1; i < OrigLoopId->getNumOperands(); ++i) {
1373 Metadata *MD = OrigLoopId->getOperand(i);
1374 // Update the DILocations to encode the inlined-at metadata.
1375 if (DILocation *DL = dyn_cast<DILocation>(MD))
1376 MDs.push_back(inlineDebugLoc(DL, InlinedAt, Ctx, IANodes));
1377 else
1378 MDs.push_back(MD);
1379 }
1380
1381 MDNode *NewLoopID = MDNode::getDistinct(Ctx, MDs);
1382 // Insert the self-referential LoopID.
1383 NewLoopID->replaceOperandWith(0, NewLoopID);
1384 return NewLoopID;
1385 }
1386
1387 /// Update inlined instructions' line numbers to
1388 /// to encode location where these instructions are inlined.
fixupLineNumbers(Function * Fn,Function::iterator FI,Instruction * TheCall,bool CalleeHasDebugInfo)1389 static void fixupLineNumbers(Function *Fn, Function::iterator FI,
1390 Instruction *TheCall, bool CalleeHasDebugInfo) {
1391 const DebugLoc &TheCallDL = TheCall->getDebugLoc();
1392 if (!TheCallDL)
1393 return;
1394
1395 auto &Ctx = Fn->getContext();
1396 DILocation *InlinedAtNode = TheCallDL;
1397
1398 // Create a unique call site, not to be confused with any other call from the
1399 // same location.
1400 InlinedAtNode = DILocation::getDistinct(
1401 Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),
1402 InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());
1403
1404 // Cache the inlined-at nodes as they're built so they are reused, without
1405 // this every instruction's inlined-at chain would become distinct from each
1406 // other.
1407 DenseMap<const MDNode *, MDNode *> IANodes;
1408
1409 // Check if we are not generating inline line tables and want to use
1410 // the call site location instead.
1411 bool NoInlineLineTables = Fn->hasFnAttribute("no-inline-line-tables");
1412
1413 for (; FI != Fn->end(); ++FI) {
1414 for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
1415 BI != BE; ++BI) {
1416 // Loop metadata needs to be updated so that the start and end locs
1417 // reference inlined-at locations.
1418 if (MDNode *LoopID = BI->getMetadata(LLVMContext::MD_loop)) {
1419 MDNode *NewLoopID =
1420 inlineLoopID(LoopID, InlinedAtNode, BI->getContext(), IANodes);
1421 BI->setMetadata(LLVMContext::MD_loop, NewLoopID);
1422 }
1423
1424 if (!NoInlineLineTables)
1425 if (DebugLoc DL = BI->getDebugLoc()) {
1426 DebugLoc IDL =
1427 inlineDebugLoc(DL, InlinedAtNode, BI->getContext(), IANodes);
1428 BI->setDebugLoc(IDL);
1429 continue;
1430 }
1431
1432 if (CalleeHasDebugInfo && !NoInlineLineTables)
1433 continue;
1434
1435 // If the inlined instruction has no line number, or if inline info
1436 // is not being generated, make it look as if it originates from the call
1437 // location. This is important for ((__always_inline, __nodebug__))
1438 // functions which must use caller location for all instructions in their
1439 // function body.
1440
1441 // Don't update static allocas, as they may get moved later.
1442 if (auto *AI = dyn_cast<AllocaInst>(BI))
1443 if (allocaWouldBeStaticInEntry(AI))
1444 continue;
1445
1446 BI->setDebugLoc(TheCallDL);
1447 }
1448
1449 // Remove debug info intrinsics if we're not keeping inline info.
1450 if (NoInlineLineTables) {
1451 BasicBlock::iterator BI = FI->begin();
1452 while (BI != FI->end()) {
1453 if (isa<DbgInfoIntrinsic>(BI)) {
1454 BI = BI->eraseFromParent();
1455 continue;
1456 }
1457 ++BI;
1458 }
1459 }
1460
1461 }
1462 }
1463
1464 /// Update the block frequencies of the caller after a callee has been inlined.
1465 ///
1466 /// Each block cloned into the caller has its block frequency scaled by the
1467 /// ratio of CallSiteFreq/CalleeEntryFreq. This ensures that the cloned copy of
1468 /// callee's entry block gets the same frequency as the callsite block and the
1469 /// relative frequencies of all cloned blocks remain the same after cloning.
updateCallerBFI(BasicBlock * CallSiteBlock,const ValueToValueMapTy & VMap,BlockFrequencyInfo * CallerBFI,BlockFrequencyInfo * CalleeBFI,const BasicBlock & CalleeEntryBlock)1470 static void updateCallerBFI(BasicBlock *CallSiteBlock,
1471 const ValueToValueMapTy &VMap,
1472 BlockFrequencyInfo *CallerBFI,
1473 BlockFrequencyInfo *CalleeBFI,
1474 const BasicBlock &CalleeEntryBlock) {
1475 SmallPtrSet<BasicBlock *, 16> ClonedBBs;
1476 for (auto Entry : VMap) {
1477 if (!isa<BasicBlock>(Entry.first) || !Entry.second)
1478 continue;
1479 auto *OrigBB = cast<BasicBlock>(Entry.first);
1480 auto *ClonedBB = cast<BasicBlock>(Entry.second);
1481 uint64_t Freq = CalleeBFI->getBlockFreq(OrigBB).getFrequency();
1482 if (!ClonedBBs.insert(ClonedBB).second) {
1483 // Multiple blocks in the callee might get mapped to one cloned block in
1484 // the caller since we prune the callee as we clone it. When that happens,
1485 // we want to use the maximum among the original blocks' frequencies.
1486 uint64_t NewFreq = CallerBFI->getBlockFreq(ClonedBB).getFrequency();
1487 if (NewFreq > Freq)
1488 Freq = NewFreq;
1489 }
1490 CallerBFI->setBlockFreq(ClonedBB, Freq);
1491 }
1492 BasicBlock *EntryClone = cast<BasicBlock>(VMap.lookup(&CalleeEntryBlock));
1493 CallerBFI->setBlockFreqAndScale(
1494 EntryClone, CallerBFI->getBlockFreq(CallSiteBlock).getFrequency(),
1495 ClonedBBs);
1496 }
1497
1498 /// Update the branch metadata for cloned call instructions.
updateCallProfile(Function * Callee,const ValueToValueMapTy & VMap,const ProfileCount & CalleeEntryCount,const Instruction * TheCall,ProfileSummaryInfo * PSI,BlockFrequencyInfo * CallerBFI)1499 static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap,
1500 const ProfileCount &CalleeEntryCount,
1501 const Instruction *TheCall,
1502 ProfileSummaryInfo *PSI,
1503 BlockFrequencyInfo *CallerBFI) {
1504 if (!CalleeEntryCount.hasValue() || CalleeEntryCount.isSynthetic() ||
1505 CalleeEntryCount.getCount() < 1)
1506 return;
1507 auto CallSiteCount = PSI ? PSI->getProfileCount(TheCall, CallerBFI) : None;
1508 int64_t CallCount =
1509 std::min(CallSiteCount.hasValue() ? CallSiteCount.getValue() : 0,
1510 CalleeEntryCount.getCount());
1511 updateProfileCallee(Callee, -CallCount, &VMap);
1512 }
1513
updateProfileCallee(Function * Callee,int64_t entryDelta,const ValueMap<const Value *,WeakTrackingVH> * VMap)1514 void llvm::updateProfileCallee(
1515 Function *Callee, int64_t entryDelta,
1516 const ValueMap<const Value *, WeakTrackingVH> *VMap) {
1517 auto CalleeCount = Callee->getEntryCount();
1518 if (!CalleeCount.hasValue())
1519 return;
1520
1521 uint64_t priorEntryCount = CalleeCount.getCount();
1522 uint64_t newEntryCount;
1523
1524 // Since CallSiteCount is an estimate, it could exceed the original callee
1525 // count and has to be set to 0 so guard against underflow.
1526 if (entryDelta < 0 && static_cast<uint64_t>(-entryDelta) > priorEntryCount)
1527 newEntryCount = 0;
1528 else
1529 newEntryCount = priorEntryCount + entryDelta;
1530
1531 // During inlining ?
1532 if (VMap) {
1533 uint64_t cloneEntryCount = priorEntryCount - newEntryCount;
1534 for (auto Entry : *VMap)
1535 if (isa<CallInst>(Entry.first))
1536 if (auto *CI = dyn_cast_or_null<CallInst>(Entry.second))
1537 CI->updateProfWeight(cloneEntryCount, priorEntryCount);
1538 }
1539
1540 if (entryDelta) {
1541 Callee->setEntryCount(newEntryCount);
1542
1543 for (BasicBlock &BB : *Callee)
1544 // No need to update the callsite if it is pruned during inlining.
1545 if (!VMap || VMap->count(&BB))
1546 for (Instruction &I : BB)
1547 if (CallInst *CI = dyn_cast<CallInst>(&I))
1548 CI->updateProfWeight(newEntryCount, priorEntryCount);
1549 }
1550 }
1551
1552 /// This function inlines the called function into the basic block of the
1553 /// caller. This returns false if it is not possible to inline this call.
1554 /// The program is still in a well defined state if this occurs though.
1555 ///
1556 /// Note that this only does one level of inlining. For example, if the
1557 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
1558 /// exists in the instruction stream. Similarly this will inline a recursive
1559 /// function by one level.
InlineFunction(CallSite CS,InlineFunctionInfo & IFI,AAResults * CalleeAAR,bool InsertLifetime,Function * ForwardVarArgsTo)1560 llvm::InlineResult llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
1561 AAResults *CalleeAAR,
1562 bool InsertLifetime,
1563 Function *ForwardVarArgsTo) {
1564 Instruction *TheCall = CS.getInstruction();
1565 assert(TheCall->getParent() && TheCall->getFunction()
1566 && "Instruction not in function!");
1567
1568 // FIXME: we don't inline callbr yet.
1569 if (isa<CallBrInst>(TheCall))
1570 return false;
1571
1572 // If IFI has any state in it, zap it before we fill it in.
1573 IFI.reset();
1574
1575 Function *CalledFunc = CS.getCalledFunction();
1576 if (!CalledFunc || // Can't inline external function or indirect
1577 CalledFunc->isDeclaration()) // call!
1578 return "external or indirect";
1579
1580 // The inliner does not know how to inline through calls with operand bundles
1581 // in general ...
1582 if (CS.hasOperandBundles()) {
1583 for (int i = 0, e = CS.getNumOperandBundles(); i != e; ++i) {
1584 uint32_t Tag = CS.getOperandBundleAt(i).getTagID();
1585 // ... but it knows how to inline through "deopt" operand bundles ...
1586 if (Tag == LLVMContext::OB_deopt)
1587 continue;
1588 // ... and "funclet" operand bundles.
1589 if (Tag == LLVMContext::OB_funclet)
1590 continue;
1591
1592 return "unsupported operand bundle";
1593 }
1594 }
1595
1596 // If the call to the callee cannot throw, set the 'nounwind' flag on any
1597 // calls that we inline.
1598 bool MarkNoUnwind = CS.doesNotThrow();
1599
1600 BasicBlock *OrigBB = TheCall->getParent();
1601 Function *Caller = OrigBB->getParent();
1602
1603 // GC poses two hazards to inlining, which only occur when the callee has GC:
1604 // 1. If the caller has no GC, then the callee's GC must be propagated to the
1605 // caller.
1606 // 2. If the caller has a differing GC, it is invalid to inline.
1607 if (CalledFunc->hasGC()) {
1608 if (!Caller->hasGC())
1609 Caller->setGC(CalledFunc->getGC());
1610 else if (CalledFunc->getGC() != Caller->getGC())
1611 return "incompatible GC";
1612 }
1613
1614 // Get the personality function from the callee if it contains a landing pad.
1615 Constant *CalledPersonality =
1616 CalledFunc->hasPersonalityFn()
1617 ? CalledFunc->getPersonalityFn()->stripPointerCasts()
1618 : nullptr;
1619
1620 // Find the personality function used by the landing pads of the caller. If it
1621 // exists, then check to see that it matches the personality function used in
1622 // the callee.
1623 Constant *CallerPersonality =
1624 Caller->hasPersonalityFn()
1625 ? Caller->getPersonalityFn()->stripPointerCasts()
1626 : nullptr;
1627 if (CalledPersonality) {
1628 if (!CallerPersonality)
1629 Caller->setPersonalityFn(CalledPersonality);
1630 // If the personality functions match, then we can perform the
1631 // inlining. Otherwise, we can't inline.
1632 // TODO: This isn't 100% true. Some personality functions are proper
1633 // supersets of others and can be used in place of the other.
1634 else if (CalledPersonality != CallerPersonality)
1635 return "incompatible personality";
1636 }
1637
1638 // We need to figure out which funclet the callsite was in so that we may
1639 // properly nest the callee.
1640 Instruction *CallSiteEHPad = nullptr;
1641 if (CallerPersonality) {
1642 EHPersonality Personality = classifyEHPersonality(CallerPersonality);
1643 if (isScopedEHPersonality(Personality)) {
1644 Optional<OperandBundleUse> ParentFunclet =
1645 CS.getOperandBundle(LLVMContext::OB_funclet);
1646 if (ParentFunclet)
1647 CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front());
1648
1649 // OK, the inlining site is legal. What about the target function?
1650
1651 if (CallSiteEHPad) {
1652 if (Personality == EHPersonality::MSVC_CXX) {
1653 // The MSVC personality cannot tolerate catches getting inlined into
1654 // cleanup funclets.
1655 if (isa<CleanupPadInst>(CallSiteEHPad)) {
1656 // Ok, the call site is within a cleanuppad. Let's check the callee
1657 // for catchpads.
1658 for (const BasicBlock &CalledBB : *CalledFunc) {
1659 if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHI()))
1660 return "catch in cleanup funclet";
1661 }
1662 }
1663 } else if (isAsynchronousEHPersonality(Personality)) {
1664 // SEH is even less tolerant, there may not be any sort of exceptional
1665 // funclet in the callee.
1666 for (const BasicBlock &CalledBB : *CalledFunc) {
1667 if (CalledBB.isEHPad())
1668 return "SEH in cleanup funclet";
1669 }
1670 }
1671 }
1672 }
1673 }
1674
1675 // Determine if we are dealing with a call in an EHPad which does not unwind
1676 // to caller.
1677 bool EHPadForCallUnwindsLocally = false;
1678 if (CallSiteEHPad && CS.isCall()) {
1679 UnwindDestMemoTy FuncletUnwindMap;
1680 Value *CallSiteUnwindDestToken =
1681 getUnwindDestToken(CallSiteEHPad, FuncletUnwindMap);
1682
1683 EHPadForCallUnwindsLocally =
1684 CallSiteUnwindDestToken &&
1685 !isa<ConstantTokenNone>(CallSiteUnwindDestToken);
1686 }
1687
1688 // Get an iterator to the last basic block in the function, which will have
1689 // the new function inlined after it.
1690 Function::iterator LastBlock = --Caller->end();
1691
1692 // Make sure to capture all of the return instructions from the cloned
1693 // function.
1694 SmallVector<ReturnInst*, 8> Returns;
1695 ClonedCodeInfo InlinedFunctionInfo;
1696 Function::iterator FirstNewBlock;
1697
1698 { // Scope to destroy VMap after cloning.
1699 ValueToValueMapTy VMap;
1700 // Keep a list of pair (dst, src) to emit byval initializations.
1701 SmallVector<std::pair<Value*, Value*>, 4> ByValInit;
1702
1703 auto &DL = Caller->getParent()->getDataLayout();
1704
1705 // Calculate the vector of arguments to pass into the function cloner, which
1706 // matches up the formal to the actual argument values.
1707 CallSite::arg_iterator AI = CS.arg_begin();
1708 unsigned ArgNo = 0;
1709 for (Function::arg_iterator I = CalledFunc->arg_begin(),
1710 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
1711 Value *ActualArg = *AI;
1712
1713 // When byval arguments actually inlined, we need to make the copy implied
1714 // by them explicit. However, we don't do this if the callee is readonly
1715 // or readnone, because the copy would be unneeded: the callee doesn't
1716 // modify the struct.
1717 if (CS.isByValArgument(ArgNo)) {
1718 ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI,
1719 CalledFunc->getParamAlignment(ArgNo));
1720 if (ActualArg != *AI)
1721 ByValInit.push_back(std::make_pair(ActualArg, (Value*) *AI));
1722 }
1723
1724 VMap[&*I] = ActualArg;
1725 }
1726
1727 // Add alignment assumptions if necessary. We do this before the inlined
1728 // instructions are actually cloned into the caller so that we can easily
1729 // check what will be known at the start of the inlined code.
1730 AddAlignmentAssumptions(CS, IFI);
1731
1732 // We want the inliner to prune the code as it copies. We would LOVE to
1733 // have no dead or constant instructions leftover after inlining occurs
1734 // (which can happen, e.g., because an argument was constant), but we'll be
1735 // happy with whatever the cloner can do.
1736 CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
1737 /*ModuleLevelChanges=*/false, Returns, ".i",
1738 &InlinedFunctionInfo, TheCall);
1739 // Remember the first block that is newly cloned over.
1740 FirstNewBlock = LastBlock; ++FirstNewBlock;
1741
1742 if (IFI.CallerBFI != nullptr && IFI.CalleeBFI != nullptr)
1743 // Update the BFI of blocks cloned into the caller.
1744 updateCallerBFI(OrigBB, VMap, IFI.CallerBFI, IFI.CalleeBFI,
1745 CalledFunc->front());
1746
1747 updateCallProfile(CalledFunc, VMap, CalledFunc->getEntryCount(), TheCall,
1748 IFI.PSI, IFI.CallerBFI);
1749
1750 // Inject byval arguments initialization.
1751 for (std::pair<Value*, Value*> &Init : ByValInit)
1752 HandleByValArgumentInit(Init.first, Init.second, Caller->getParent(),
1753 &*FirstNewBlock, IFI);
1754
1755 Optional<OperandBundleUse> ParentDeopt =
1756 CS.getOperandBundle(LLVMContext::OB_deopt);
1757 if (ParentDeopt) {
1758 SmallVector<OperandBundleDef, 2> OpDefs;
1759
1760 for (auto &VH : InlinedFunctionInfo.OperandBundleCallSites) {
1761 Instruction *I = dyn_cast_or_null<Instruction>(VH);
1762 if (!I) continue; // instruction was DCE'd or RAUW'ed to undef
1763
1764 OpDefs.clear();
1765
1766 CallSite ICS(I);
1767 OpDefs.reserve(ICS.getNumOperandBundles());
1768
1769 for (unsigned i = 0, e = ICS.getNumOperandBundles(); i < e; ++i) {
1770 auto ChildOB = ICS.getOperandBundleAt(i);
1771 if (ChildOB.getTagID() != LLVMContext::OB_deopt) {
1772 // If the inlined call has other operand bundles, let them be
1773 OpDefs.emplace_back(ChildOB);
1774 continue;
1775 }
1776
1777 // It may be useful to separate this logic (of handling operand
1778 // bundles) out to a separate "policy" component if this gets crowded.
1779 // Prepend the parent's deoptimization continuation to the newly
1780 // inlined call's deoptimization continuation.
1781 std::vector<Value *> MergedDeoptArgs;
1782 MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() +
1783 ChildOB.Inputs.size());
1784
1785 MergedDeoptArgs.insert(MergedDeoptArgs.end(),
1786 ParentDeopt->Inputs.begin(),
1787 ParentDeopt->Inputs.end());
1788 MergedDeoptArgs.insert(MergedDeoptArgs.end(), ChildOB.Inputs.begin(),
1789 ChildOB.Inputs.end());
1790
1791 OpDefs.emplace_back("deopt", std::move(MergedDeoptArgs));
1792 }
1793
1794 Instruction *NewI = nullptr;
1795 if (isa<CallInst>(I))
1796 NewI = CallInst::Create(cast<CallInst>(I), OpDefs, I);
1797 else if (isa<CallBrInst>(I))
1798 NewI = CallBrInst::Create(cast<CallBrInst>(I), OpDefs, I);
1799 else
1800 NewI = InvokeInst::Create(cast<InvokeInst>(I), OpDefs, I);
1801
1802 // Note: the RAUW does the appropriate fixup in VMap, so we need to do
1803 // this even if the call returns void.
1804 I->replaceAllUsesWith(NewI);
1805
1806 VH = nullptr;
1807 I->eraseFromParent();
1808 }
1809 }
1810
1811 // Update the callgraph if requested.
1812 if (IFI.CG)
1813 UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI);
1814
1815 // For 'nodebug' functions, the associated DISubprogram is always null.
1816 // Conservatively avoid propagating the callsite debug location to
1817 // instructions inlined from a function whose DISubprogram is not null.
1818 fixupLineNumbers(Caller, FirstNewBlock, TheCall,
1819 CalledFunc->getSubprogram() != nullptr);
1820
1821 // Clone existing noalias metadata if necessary.
1822 CloneAliasScopeMetadata(CS, VMap);
1823
1824 // Add noalias metadata if necessary.
1825 AddAliasScopeMetadata(CS, VMap, DL, CalleeAAR);
1826
1827 // Propagate llvm.mem.parallel_loop_access if necessary.
1828 PropagateParallelLoopAccessMetadata(CS, VMap);
1829
1830 // Register any cloned assumptions.
1831 if (IFI.GetAssumptionCache)
1832 for (BasicBlock &NewBlock :
1833 make_range(FirstNewBlock->getIterator(), Caller->end()))
1834 for (Instruction &I : NewBlock) {
1835 if (auto *II = dyn_cast<IntrinsicInst>(&I))
1836 if (II->getIntrinsicID() == Intrinsic::assume)
1837 (*IFI.GetAssumptionCache)(*Caller).registerAssumption(II);
1838 }
1839 }
1840
1841 // If there are any alloca instructions in the block that used to be the entry
1842 // block for the callee, move them to the entry block of the caller. First
1843 // calculate which instruction they should be inserted before. We insert the
1844 // instructions at the end of the current alloca list.
1845 {
1846 BasicBlock::iterator InsertPoint = Caller->begin()->begin();
1847 for (BasicBlock::iterator I = FirstNewBlock->begin(),
1848 E = FirstNewBlock->end(); I != E; ) {
1849 AllocaInst *AI = dyn_cast<AllocaInst>(I++);
1850 if (!AI) continue;
1851
1852 // If the alloca is now dead, remove it. This often occurs due to code
1853 // specialization.
1854 if (AI->use_empty()) {
1855 AI->eraseFromParent();
1856 continue;
1857 }
1858
1859 if (!allocaWouldBeStaticInEntry(AI))
1860 continue;
1861
1862 // Keep track of the static allocas that we inline into the caller.
1863 IFI.StaticAllocas.push_back(AI);
1864
1865 // Scan for the block of allocas that we can move over, and move them
1866 // all at once.
1867 while (isa<AllocaInst>(I) &&
1868 !cast<AllocaInst>(I)->use_empty() &&
1869 allocaWouldBeStaticInEntry(cast<AllocaInst>(I))) {
1870 IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
1871 ++I;
1872 }
1873
1874 // Transfer all of the allocas over in a block. Using splice means
1875 // that the instructions aren't removed from the symbol table, then
1876 // reinserted.
1877 Caller->getEntryBlock().getInstList().splice(
1878 InsertPoint, FirstNewBlock->getInstList(), AI->getIterator(), I);
1879 }
1880 // Move any dbg.declares describing the allocas into the entry basic block.
1881 DIBuilder DIB(*Caller->getParent());
1882 for (auto &AI : IFI.StaticAllocas)
1883 replaceDbgDeclareForAlloca(AI, AI, DIB, DIExpression::ApplyOffset, 0);
1884 }
1885
1886 SmallVector<Value*,4> VarArgsToForward;
1887 SmallVector<AttributeSet, 4> VarArgsAttrs;
1888 for (unsigned i = CalledFunc->getFunctionType()->getNumParams();
1889 i < CS.getNumArgOperands(); i++) {
1890 VarArgsToForward.push_back(CS.getArgOperand(i));
1891 VarArgsAttrs.push_back(CS.getAttributes().getParamAttributes(i));
1892 }
1893
1894 bool InlinedMustTailCalls = false, InlinedDeoptimizeCalls = false;
1895 if (InlinedFunctionInfo.ContainsCalls) {
1896 CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None;
1897 if (CallInst *CI = dyn_cast<CallInst>(TheCall))
1898 CallSiteTailKind = CI->getTailCallKind();
1899
1900 // For inlining purposes, the "notail" marker is the same as no marker.
1901 if (CallSiteTailKind == CallInst::TCK_NoTail)
1902 CallSiteTailKind = CallInst::TCK_None;
1903
1904 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;
1905 ++BB) {
1906 for (auto II = BB->begin(); II != BB->end();) {
1907 Instruction &I = *II++;
1908 CallInst *CI = dyn_cast<CallInst>(&I);
1909 if (!CI)
1910 continue;
1911
1912 // Forward varargs from inlined call site to calls to the
1913 // ForwardVarArgsTo function, if requested, and to musttail calls.
1914 if (!VarArgsToForward.empty() &&
1915 ((ForwardVarArgsTo &&
1916 CI->getCalledFunction() == ForwardVarArgsTo) ||
1917 CI->isMustTailCall())) {
1918 // Collect attributes for non-vararg parameters.
1919 AttributeList Attrs = CI->getAttributes();
1920 SmallVector<AttributeSet, 8> ArgAttrs;
1921 if (!Attrs.isEmpty() || !VarArgsAttrs.empty()) {
1922 for (unsigned ArgNo = 0;
1923 ArgNo < CI->getFunctionType()->getNumParams(); ++ArgNo)
1924 ArgAttrs.push_back(Attrs.getParamAttributes(ArgNo));
1925 }
1926
1927 // Add VarArg attributes.
1928 ArgAttrs.append(VarArgsAttrs.begin(), VarArgsAttrs.end());
1929 Attrs = AttributeList::get(CI->getContext(), Attrs.getFnAttributes(),
1930 Attrs.getRetAttributes(), ArgAttrs);
1931 // Add VarArgs to existing parameters.
1932 SmallVector<Value *, 6> Params(CI->arg_operands());
1933 Params.append(VarArgsToForward.begin(), VarArgsToForward.end());
1934 CallInst *NewCI = CallInst::Create(
1935 CI->getFunctionType(), CI->getCalledOperand(), Params, "", CI);
1936 NewCI->setDebugLoc(CI->getDebugLoc());
1937 NewCI->setAttributes(Attrs);
1938 NewCI->setCallingConv(CI->getCallingConv());
1939 CI->replaceAllUsesWith(NewCI);
1940 CI->eraseFromParent();
1941 CI = NewCI;
1942 }
1943
1944 if (Function *F = CI->getCalledFunction())
1945 InlinedDeoptimizeCalls |=
1946 F->getIntrinsicID() == Intrinsic::experimental_deoptimize;
1947
1948 // We need to reduce the strength of any inlined tail calls. For
1949 // musttail, we have to avoid introducing potential unbounded stack
1950 // growth. For example, if functions 'f' and 'g' are mutually recursive
1951 // with musttail, we can inline 'g' into 'f' so long as we preserve
1952 // musttail on the cloned call to 'f'. If either the inlined call site
1953 // or the cloned call site is *not* musttail, the program already has
1954 // one frame of stack growth, so it's safe to remove musttail. Here is
1955 // a table of example transformations:
1956 //
1957 // f -> musttail g -> musttail f ==> f -> musttail f
1958 // f -> musttail g -> tail f ==> f -> tail f
1959 // f -> g -> musttail f ==> f -> f
1960 // f -> g -> tail f ==> f -> f
1961 //
1962 // Inlined notail calls should remain notail calls.
1963 CallInst::TailCallKind ChildTCK = CI->getTailCallKind();
1964 if (ChildTCK != CallInst::TCK_NoTail)
1965 ChildTCK = std::min(CallSiteTailKind, ChildTCK);
1966 CI->setTailCallKind(ChildTCK);
1967 InlinedMustTailCalls |= CI->isMustTailCall();
1968
1969 // Calls inlined through a 'nounwind' call site should be marked
1970 // 'nounwind'.
1971 if (MarkNoUnwind)
1972 CI->setDoesNotThrow();
1973 }
1974 }
1975 }
1976
1977 // Leave lifetime markers for the static alloca's, scoping them to the
1978 // function we just inlined.
1979 if (InsertLifetime && !IFI.StaticAllocas.empty()) {
1980 IRBuilder<> builder(&FirstNewBlock->front());
1981 for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
1982 AllocaInst *AI = IFI.StaticAllocas[ai];
1983 // Don't mark swifterror allocas. They can't have bitcast uses.
1984 if (AI->isSwiftError())
1985 continue;
1986
1987 // If the alloca is already scoped to something smaller than the whole
1988 // function then there's no need to add redundant, less accurate markers.
1989 if (hasLifetimeMarkers(AI))
1990 continue;
1991
1992 // Try to determine the size of the allocation.
1993 ConstantInt *AllocaSize = nullptr;
1994 if (ConstantInt *AIArraySize =
1995 dyn_cast<ConstantInt>(AI->getArraySize())) {
1996 auto &DL = Caller->getParent()->getDataLayout();
1997 Type *AllocaType = AI->getAllocatedType();
1998 uint64_t AllocaTypeSize = DL.getTypeAllocSize(AllocaType);
1999 uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
2000
2001 // Don't add markers for zero-sized allocas.
2002 if (AllocaArraySize == 0)
2003 continue;
2004
2005 // Check that array size doesn't saturate uint64_t and doesn't
2006 // overflow when it's multiplied by type size.
2007 if (AllocaArraySize != std::numeric_limits<uint64_t>::max() &&
2008 std::numeric_limits<uint64_t>::max() / AllocaArraySize >=
2009 AllocaTypeSize) {
2010 AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
2011 AllocaArraySize * AllocaTypeSize);
2012 }
2013 }
2014
2015 builder.CreateLifetimeStart(AI, AllocaSize);
2016 for (ReturnInst *RI : Returns) {
2017 // Don't insert llvm.lifetime.end calls between a musttail or deoptimize
2018 // call and a return. The return kills all local allocas.
2019 if (InlinedMustTailCalls &&
2020 RI->getParent()->getTerminatingMustTailCall())
2021 continue;
2022 if (InlinedDeoptimizeCalls &&
2023 RI->getParent()->getTerminatingDeoptimizeCall())
2024 continue;
2025 IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize);
2026 }
2027 }
2028 }
2029
2030 // If the inlined code contained dynamic alloca instructions, wrap the inlined
2031 // code with llvm.stacksave/llvm.stackrestore intrinsics.
2032 if (InlinedFunctionInfo.ContainsDynamicAllocas) {
2033 Module *M = Caller->getParent();
2034 // Get the two intrinsics we care about.
2035 Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
2036 Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore);
2037
2038 // Insert the llvm.stacksave.
2039 CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin())
2040 .CreateCall(StackSave, {}, "savedstack");
2041
2042 // Insert a call to llvm.stackrestore before any return instructions in the
2043 // inlined function.
2044 for (ReturnInst *RI : Returns) {
2045 // Don't insert llvm.stackrestore calls between a musttail or deoptimize
2046 // call and a return. The return will restore the stack pointer.
2047 if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall())
2048 continue;
2049 if (InlinedDeoptimizeCalls && RI->getParent()->getTerminatingDeoptimizeCall())
2050 continue;
2051 IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr);
2052 }
2053 }
2054
2055 // If we are inlining for an invoke instruction, we must make sure to rewrite
2056 // any call instructions into invoke instructions. This is sensitive to which
2057 // funclet pads were top-level in the inlinee, so must be done before
2058 // rewriting the "parent pad" links.
2059 if (auto *II = dyn_cast<InvokeInst>(TheCall)) {
2060 BasicBlock *UnwindDest = II->getUnwindDest();
2061 Instruction *FirstNonPHI = UnwindDest->getFirstNonPHI();
2062 if (isa<LandingPadInst>(FirstNonPHI)) {
2063 HandleInlinedLandingPad(II, &*FirstNewBlock, InlinedFunctionInfo);
2064 } else {
2065 HandleInlinedEHPad(II, &*FirstNewBlock, InlinedFunctionInfo);
2066 }
2067 }
2068
2069 // Update the lexical scopes of the new funclets and callsites.
2070 // Anything that had 'none' as its parent is now nested inside the callsite's
2071 // EHPad.
2072
2073 if (CallSiteEHPad) {
2074 for (Function::iterator BB = FirstNewBlock->getIterator(),
2075 E = Caller->end();
2076 BB != E; ++BB) {
2077 // Add bundle operands to any top-level call sites.
2078 SmallVector<OperandBundleDef, 1> OpBundles;
2079 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E;) {
2080 Instruction *I = &*BBI++;
2081 CallSite CS(I);
2082 if (!CS)
2083 continue;
2084
2085 // Skip call sites which are nounwind intrinsics.
2086 auto *CalledFn =
2087 dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
2088 if (CalledFn && CalledFn->isIntrinsic() && CS.doesNotThrow())
2089 continue;
2090
2091 // Skip call sites which already have a "funclet" bundle.
2092 if (CS.getOperandBundle(LLVMContext::OB_funclet))
2093 continue;
2094
2095 CS.getOperandBundlesAsDefs(OpBundles);
2096 OpBundles.emplace_back("funclet", CallSiteEHPad);
2097
2098 Instruction *NewInst;
2099 if (CS.isCall())
2100 NewInst = CallInst::Create(cast<CallInst>(I), OpBundles, I);
2101 else if (CS.isCallBr())
2102 NewInst = CallBrInst::Create(cast<CallBrInst>(I), OpBundles, I);
2103 else
2104 NewInst = InvokeInst::Create(cast<InvokeInst>(I), OpBundles, I);
2105 NewInst->takeName(I);
2106 I->replaceAllUsesWith(NewInst);
2107 I->eraseFromParent();
2108
2109 OpBundles.clear();
2110 }
2111
2112 // It is problematic if the inlinee has a cleanupret which unwinds to
2113 // caller and we inline it into a call site which doesn't unwind but into
2114 // an EH pad that does. Such an edge must be dynamically unreachable.
2115 // As such, we replace the cleanupret with unreachable.
2116 if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(BB->getTerminator()))
2117 if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally)
2118 changeToUnreachable(CleanupRet, /*UseLLVMTrap=*/false);
2119
2120 Instruction *I = BB->getFirstNonPHI();
2121 if (!I->isEHPad())
2122 continue;
2123
2124 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
2125 if (isa<ConstantTokenNone>(CatchSwitch->getParentPad()))
2126 CatchSwitch->setParentPad(CallSiteEHPad);
2127 } else {
2128 auto *FPI = cast<FuncletPadInst>(I);
2129 if (isa<ConstantTokenNone>(FPI->getParentPad()))
2130 FPI->setParentPad(CallSiteEHPad);
2131 }
2132 }
2133 }
2134
2135 if (InlinedDeoptimizeCalls) {
2136 // We need to at least remove the deoptimizing returns from the Return set,
2137 // so that the control flow from those returns does not get merged into the
2138 // caller (but terminate it instead). If the caller's return type does not
2139 // match the callee's return type, we also need to change the return type of
2140 // the intrinsic.
2141 if (Caller->getReturnType() == TheCall->getType()) {
2142 auto NewEnd = llvm::remove_if(Returns, [](ReturnInst *RI) {
2143 return RI->getParent()->getTerminatingDeoptimizeCall() != nullptr;
2144 });
2145 Returns.erase(NewEnd, Returns.end());
2146 } else {
2147 SmallVector<ReturnInst *, 8> NormalReturns;
2148 Function *NewDeoptIntrinsic = Intrinsic::getDeclaration(
2149 Caller->getParent(), Intrinsic::experimental_deoptimize,
2150 {Caller->getReturnType()});
2151
2152 for (ReturnInst *RI : Returns) {
2153 CallInst *DeoptCall = RI->getParent()->getTerminatingDeoptimizeCall();
2154 if (!DeoptCall) {
2155 NormalReturns.push_back(RI);
2156 continue;
2157 }
2158
2159 // The calling convention on the deoptimize call itself may be bogus,
2160 // since the code we're inlining may have undefined behavior (and may
2161 // never actually execute at runtime); but all
2162 // @llvm.experimental.deoptimize declarations have to have the same
2163 // calling convention in a well-formed module.
2164 auto CallingConv = DeoptCall->getCalledFunction()->getCallingConv();
2165 NewDeoptIntrinsic->setCallingConv(CallingConv);
2166 auto *CurBB = RI->getParent();
2167 RI->eraseFromParent();
2168
2169 SmallVector<Value *, 4> CallArgs(DeoptCall->arg_begin(),
2170 DeoptCall->arg_end());
2171
2172 SmallVector<OperandBundleDef, 1> OpBundles;
2173 DeoptCall->getOperandBundlesAsDefs(OpBundles);
2174 DeoptCall->eraseFromParent();
2175 assert(!OpBundles.empty() &&
2176 "Expected at least the deopt operand bundle");
2177
2178 IRBuilder<> Builder(CurBB);
2179 CallInst *NewDeoptCall =
2180 Builder.CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles);
2181 NewDeoptCall->setCallingConv(CallingConv);
2182 if (NewDeoptCall->getType()->isVoidTy())
2183 Builder.CreateRetVoid();
2184 else
2185 Builder.CreateRet(NewDeoptCall);
2186 }
2187
2188 // Leave behind the normal returns so we can merge control flow.
2189 std::swap(Returns, NormalReturns);
2190 }
2191 }
2192
2193 // Handle any inlined musttail call sites. In order for a new call site to be
2194 // musttail, the source of the clone and the inlined call site must have been
2195 // musttail. Therefore it's safe to return without merging control into the
2196 // phi below.
2197 if (InlinedMustTailCalls) {
2198 // Check if we need to bitcast the result of any musttail calls.
2199 Type *NewRetTy = Caller->getReturnType();
2200 bool NeedBitCast = !TheCall->use_empty() && TheCall->getType() != NewRetTy;
2201
2202 // Handle the returns preceded by musttail calls separately.
2203 SmallVector<ReturnInst *, 8> NormalReturns;
2204 for (ReturnInst *RI : Returns) {
2205 CallInst *ReturnedMustTail =
2206 RI->getParent()->getTerminatingMustTailCall();
2207 if (!ReturnedMustTail) {
2208 NormalReturns.push_back(RI);
2209 continue;
2210 }
2211 if (!NeedBitCast)
2212 continue;
2213
2214 // Delete the old return and any preceding bitcast.
2215 BasicBlock *CurBB = RI->getParent();
2216 auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
2217 RI->eraseFromParent();
2218 if (OldCast)
2219 OldCast->eraseFromParent();
2220
2221 // Insert a new bitcast and return with the right type.
2222 IRBuilder<> Builder(CurBB);
2223 Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy));
2224 }
2225
2226 // Leave behind the normal returns so we can merge control flow.
2227 std::swap(Returns, NormalReturns);
2228 }
2229
2230 // Now that all of the transforms on the inlined code have taken place but
2231 // before we splice the inlined code into the CFG and lose track of which
2232 // blocks were actually inlined, collect the call sites. We only do this if
2233 // call graph updates weren't requested, as those provide value handle based
2234 // tracking of inlined call sites instead.
2235 if (InlinedFunctionInfo.ContainsCalls && !IFI.CG) {
2236 // Otherwise just collect the raw call sites that were inlined.
2237 for (BasicBlock &NewBB :
2238 make_range(FirstNewBlock->getIterator(), Caller->end()))
2239 for (Instruction &I : NewBB)
2240 if (auto CS = CallSite(&I))
2241 IFI.InlinedCallSites.push_back(CS);
2242 }
2243
2244 // If we cloned in _exactly one_ basic block, and if that block ends in a
2245 // return instruction, we splice the body of the inlined callee directly into
2246 // the calling basic block.
2247 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
2248 // Move all of the instructions right before the call.
2249 OrigBB->getInstList().splice(TheCall->getIterator(),
2250 FirstNewBlock->getInstList(),
2251 FirstNewBlock->begin(), FirstNewBlock->end());
2252 // Remove the cloned basic block.
2253 Caller->getBasicBlockList().pop_back();
2254
2255 // If the call site was an invoke instruction, add a branch to the normal
2256 // destination.
2257 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
2258 BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall);
2259 NewBr->setDebugLoc(Returns[0]->getDebugLoc());
2260 }
2261
2262 // If the return instruction returned a value, replace uses of the call with
2263 // uses of the returned value.
2264 if (!TheCall->use_empty()) {
2265 ReturnInst *R = Returns[0];
2266 if (TheCall == R->getReturnValue())
2267 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
2268 else
2269 TheCall->replaceAllUsesWith(R->getReturnValue());
2270 }
2271 // Since we are now done with the Call/Invoke, we can delete it.
2272 TheCall->eraseFromParent();
2273
2274 // Since we are now done with the return instruction, delete it also.
2275 Returns[0]->eraseFromParent();
2276
2277 // We are now done with the inlining.
2278 return true;
2279 }
2280
2281 // Otherwise, we have the normal case, of more than one block to inline or
2282 // multiple return sites.
2283
2284 // We want to clone the entire callee function into the hole between the
2285 // "starter" and "ender" blocks. How we accomplish this depends on whether
2286 // this is an invoke instruction or a call instruction.
2287 BasicBlock *AfterCallBB;
2288 BranchInst *CreatedBranchToNormalDest = nullptr;
2289 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
2290
2291 // Add an unconditional branch to make this look like the CallInst case...
2292 CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), TheCall);
2293
2294 // Split the basic block. This guarantees that no PHI nodes will have to be
2295 // updated due to new incoming edges, and make the invoke case more
2296 // symmetric to the call case.
2297 AfterCallBB =
2298 OrigBB->splitBasicBlock(CreatedBranchToNormalDest->getIterator(),
2299 CalledFunc->getName() + ".exit");
2300
2301 } else { // It's a call
2302 // If this is a call instruction, we need to split the basic block that
2303 // the call lives in.
2304 //
2305 AfterCallBB = OrigBB->splitBasicBlock(TheCall->getIterator(),
2306 CalledFunc->getName() + ".exit");
2307 }
2308
2309 if (IFI.CallerBFI) {
2310 // Copy original BB's block frequency to AfterCallBB
2311 IFI.CallerBFI->setBlockFreq(
2312 AfterCallBB, IFI.CallerBFI->getBlockFreq(OrigBB).getFrequency());
2313 }
2314
2315 // Change the branch that used to go to AfterCallBB to branch to the first
2316 // basic block of the inlined function.
2317 //
2318 Instruction *Br = OrigBB->getTerminator();
2319 assert(Br && Br->getOpcode() == Instruction::Br &&
2320 "splitBasicBlock broken!");
2321 Br->setOperand(0, &*FirstNewBlock);
2322
2323 // Now that the function is correct, make it a little bit nicer. In
2324 // particular, move the basic blocks inserted from the end of the function
2325 // into the space made by splitting the source basic block.
2326 Caller->getBasicBlockList().splice(AfterCallBB->getIterator(),
2327 Caller->getBasicBlockList(), FirstNewBlock,
2328 Caller->end());
2329
2330 // Handle all of the return instructions that we just cloned in, and eliminate
2331 // any users of the original call/invoke instruction.
2332 Type *RTy = CalledFunc->getReturnType();
2333
2334 PHINode *PHI = nullptr;
2335 if (Returns.size() > 1) {
2336 // The PHI node should go at the front of the new basic block to merge all
2337 // possible incoming values.
2338 if (!TheCall->use_empty()) {
2339 PHI = PHINode::Create(RTy, Returns.size(), TheCall->getName(),
2340 &AfterCallBB->front());
2341 // Anything that used the result of the function call should now use the
2342 // PHI node as their operand.
2343 TheCall->replaceAllUsesWith(PHI);
2344 }
2345
2346 // Loop over all of the return instructions adding entries to the PHI node
2347 // as appropriate.
2348 if (PHI) {
2349 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2350 ReturnInst *RI = Returns[i];
2351 assert(RI->getReturnValue()->getType() == PHI->getType() &&
2352 "Ret value not consistent in function!");
2353 PHI->addIncoming(RI->getReturnValue(), RI->getParent());
2354 }
2355 }
2356
2357 // Add a branch to the merge points and remove return instructions.
2358 DebugLoc Loc;
2359 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2360 ReturnInst *RI = Returns[i];
2361 BranchInst* BI = BranchInst::Create(AfterCallBB, RI);
2362 Loc = RI->getDebugLoc();
2363 BI->setDebugLoc(Loc);
2364 RI->eraseFromParent();
2365 }
2366 // We need to set the debug location to *somewhere* inside the
2367 // inlined function. The line number may be nonsensical, but the
2368 // instruction will at least be associated with the right
2369 // function.
2370 if (CreatedBranchToNormalDest)
2371 CreatedBranchToNormalDest->setDebugLoc(Loc);
2372 } else if (!Returns.empty()) {
2373 // Otherwise, if there is exactly one return value, just replace anything
2374 // using the return value of the call with the computed value.
2375 if (!TheCall->use_empty()) {
2376 if (TheCall == Returns[0]->getReturnValue())
2377 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
2378 else
2379 TheCall->replaceAllUsesWith(Returns[0]->getReturnValue());
2380 }
2381
2382 // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
2383 BasicBlock *ReturnBB = Returns[0]->getParent();
2384 ReturnBB->replaceAllUsesWith(AfterCallBB);
2385
2386 // Splice the code from the return block into the block that it will return
2387 // to, which contains the code that was after the call.
2388 AfterCallBB->getInstList().splice(AfterCallBB->begin(),
2389 ReturnBB->getInstList());
2390
2391 if (CreatedBranchToNormalDest)
2392 CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc());
2393
2394 // Delete the return instruction now and empty ReturnBB now.
2395 Returns[0]->eraseFromParent();
2396 ReturnBB->eraseFromParent();
2397 } else if (!TheCall->use_empty()) {
2398 // No returns, but something is using the return value of the call. Just
2399 // nuke the result.
2400 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
2401 }
2402
2403 // Since we are now done with the Call/Invoke, we can delete it.
2404 TheCall->eraseFromParent();
2405
2406 // If we inlined any musttail calls and the original return is now
2407 // unreachable, delete it. It can only contain a bitcast and ret.
2408 if (InlinedMustTailCalls && pred_begin(AfterCallBB) == pred_end(AfterCallBB))
2409 AfterCallBB->eraseFromParent();
2410
2411 // We should always be able to fold the entry block of the function into the
2412 // single predecessor of the block...
2413 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
2414 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
2415
2416 // Splice the code entry block into calling block, right before the
2417 // unconditional branch.
2418 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes
2419 OrigBB->getInstList().splice(Br->getIterator(), CalleeEntry->getInstList());
2420
2421 // Remove the unconditional branch.
2422 OrigBB->getInstList().erase(Br);
2423
2424 // Now we can remove the CalleeEntry block, which is now empty.
2425 Caller->getBasicBlockList().erase(CalleeEntry);
2426
2427 // If we inserted a phi node, check to see if it has a single value (e.g. all
2428 // the entries are the same or undef). If so, remove the PHI so it doesn't
2429 // block other optimizations.
2430 if (PHI) {
2431 AssumptionCache *AC =
2432 IFI.GetAssumptionCache ? &(*IFI.GetAssumptionCache)(*Caller) : nullptr;
2433 auto &DL = Caller->getParent()->getDataLayout();
2434 if (Value *V = SimplifyInstruction(PHI, {DL, nullptr, nullptr, AC})) {
2435 PHI->replaceAllUsesWith(V);
2436 PHI->eraseFromParent();
2437 }
2438 }
2439
2440 return true;
2441 }
2442