1 //===- InlineFunction.cpp - Code to perform function inlining -------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements inlining of a function into a call site, resolving
11 // parameters and the return value as appropriate.
12 //
13 // The code in this file for handling inlines through invoke
14 // instructions preserves semantics only under some assumptions about
15 // the behavior of unwinders which correspond to gcc-style libUnwind
16 // exception personality functions. Eventually the IR will be
17 // improved to make this unnecessary, but until then, this code is
18 // marked [LIBUNWIND].
19 //
20 //===----------------------------------------------------------------------===//
21
22 #include "llvm/Transforms/Utils/Cloning.h"
23 #include "llvm/Constants.h"
24 #include "llvm/DerivedTypes.h"
25 #include "llvm/Module.h"
26 #include "llvm/Instructions.h"
27 #include "llvm/IntrinsicInst.h"
28 #include "llvm/Intrinsics.h"
29 #include "llvm/Attributes.h"
30 #include "llvm/Analysis/CallGraph.h"
31 #include "llvm/Analysis/DebugInfo.h"
32 #include "llvm/Analysis/InstructionSimplify.h"
33 #include "llvm/Target/TargetData.h"
34 #include "llvm/Transforms/Utils/Local.h"
35 #include "llvm/ADT/SmallVector.h"
36 #include "llvm/ADT/StringExtras.h"
37 #include "llvm/Support/CallSite.h"
38 #include "llvm/Support/IRBuilder.h"
39 using namespace llvm;
40
InlineFunction(CallInst * CI,InlineFunctionInfo & IFI)41 bool llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI) {
42 return InlineFunction(CallSite(CI), IFI);
43 }
InlineFunction(InvokeInst * II,InlineFunctionInfo & IFI)44 bool llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI) {
45 return InlineFunction(CallSite(II), IFI);
46 }
47
48 // FIXME: New EH - Remove the functions marked [LIBUNWIND] when new EH is
49 // turned on.
50
51 /// [LIBUNWIND] Look for an llvm.eh.exception call in the given block.
findExceptionInBlock(BasicBlock * bb)52 static EHExceptionInst *findExceptionInBlock(BasicBlock *bb) {
53 for (BasicBlock::iterator i = bb->begin(), e = bb->end(); i != e; i++) {
54 EHExceptionInst *exn = dyn_cast<EHExceptionInst>(i);
55 if (exn) return exn;
56 }
57
58 return 0;
59 }
60
61 /// [LIBUNWIND] Look for the 'best' llvm.eh.selector instruction for
62 /// the given llvm.eh.exception call.
findSelectorForException(EHExceptionInst * exn)63 static EHSelectorInst *findSelectorForException(EHExceptionInst *exn) {
64 BasicBlock *exnBlock = exn->getParent();
65
66 EHSelectorInst *outOfBlockSelector = 0;
67 for (Instruction::use_iterator
68 ui = exn->use_begin(), ue = exn->use_end(); ui != ue; ++ui) {
69 EHSelectorInst *sel = dyn_cast<EHSelectorInst>(*ui);
70 if (!sel) continue;
71
72 // Immediately accept an eh.selector in the same block as the
73 // excepton call.
74 if (sel->getParent() == exnBlock) return sel;
75
76 // Otherwise, use the first selector we see.
77 if (!outOfBlockSelector) outOfBlockSelector = sel;
78 }
79
80 return outOfBlockSelector;
81 }
82
83 /// [LIBUNWIND] Find the (possibly absent) call to @llvm.eh.selector
84 /// in the given landing pad. In principle, llvm.eh.exception is
85 /// required to be in the landing pad; in practice, SplitCriticalEdge
86 /// can break that invariant, and then inlining can break it further.
87 /// There's a real need for a reliable solution here, but until that
88 /// happens, we have some fragile workarounds here.
findSelectorForLandingPad(BasicBlock * lpad)89 static EHSelectorInst *findSelectorForLandingPad(BasicBlock *lpad) {
90 // Look for an exception call in the actual landing pad.
91 EHExceptionInst *exn = findExceptionInBlock(lpad);
92 if (exn) return findSelectorForException(exn);
93
94 // Okay, if that failed, look for one in an obvious successor. If
95 // we find one, we'll fix the IR by moving things back to the
96 // landing pad.
97
98 bool dominates = true; // does the lpad dominate the exn call
99 BasicBlock *nonDominated = 0; // if not, the first non-dominated block
100 BasicBlock *lastDominated = 0; // and the block which branched to it
101
102 BasicBlock *exnBlock = lpad;
103
104 // We need to protect against lpads that lead into infinite loops.
105 SmallPtrSet<BasicBlock*,4> visited;
106 visited.insert(exnBlock);
107
108 do {
109 // We're not going to apply this hack to anything more complicated
110 // than a series of unconditional branches, so if the block
111 // doesn't terminate in an unconditional branch, just fail. More
112 // complicated cases can arise when, say, sinking a call into a
113 // split unwind edge and then inlining it; but that can do almost
114 // *anything* to the CFG, including leaving the selector
115 // completely unreachable. The only way to fix that properly is
116 // to (1) prohibit transforms which move the exception or selector
117 // values away from the landing pad, e.g. by producing them with
118 // instructions that are pinned to an edge like a phi, or
119 // producing them with not-really-instructions, and (2) making
120 // transforms which split edges deal with that.
121 BranchInst *branch = dyn_cast<BranchInst>(&exnBlock->back());
122 if (!branch || branch->isConditional()) return 0;
123
124 BasicBlock *successor = branch->getSuccessor(0);
125
126 // Fail if we found an infinite loop.
127 if (!visited.insert(successor)) return 0;
128
129 // If the successor isn't dominated by exnBlock:
130 if (!successor->getSinglePredecessor()) {
131 // We don't want to have to deal with threading the exception
132 // through multiple levels of phi, so give up if we've already
133 // followed a non-dominating edge.
134 if (!dominates) return 0;
135
136 // Otherwise, remember this as a non-dominating edge.
137 dominates = false;
138 nonDominated = successor;
139 lastDominated = exnBlock;
140 }
141
142 exnBlock = successor;
143
144 // Can we stop here?
145 exn = findExceptionInBlock(exnBlock);
146 } while (!exn);
147
148 // Look for a selector call for the exception we found.
149 EHSelectorInst *selector = findSelectorForException(exn);
150 if (!selector) return 0;
151
152 // The easy case is when the landing pad still dominates the
153 // exception call, in which case we can just move both calls back to
154 // the landing pad.
155 if (dominates) {
156 selector->moveBefore(lpad->getFirstNonPHI());
157 exn->moveBefore(selector);
158 return selector;
159 }
160
161 // Otherwise, we have to split at the first non-dominating block.
162 // The CFG looks basically like this:
163 // lpad:
164 // phis_0
165 // insnsAndBranches_1
166 // br label %nonDominated
167 // nonDominated:
168 // phis_2
169 // insns_3
170 // %exn = call i8* @llvm.eh.exception()
171 // insnsAndBranches_4
172 // %selector = call @llvm.eh.selector(i8* %exn, ...
173 // We need to turn this into:
174 // lpad:
175 // phis_0
176 // %exn0 = call i8* @llvm.eh.exception()
177 // %selector0 = call @llvm.eh.selector(i8* %exn0, ...
178 // insnsAndBranches_1
179 // br label %split // from lastDominated
180 // nonDominated:
181 // phis_2 (without edge from lastDominated)
182 // %exn1 = call i8* @llvm.eh.exception()
183 // %selector1 = call i8* @llvm.eh.selector(i8* %exn1, ...
184 // br label %split
185 // split:
186 // phis_2 (edge from lastDominated, edge from split)
187 // %exn = phi ...
188 // %selector = phi ...
189 // insns_3
190 // insnsAndBranches_4
191
192 assert(nonDominated);
193 assert(lastDominated);
194
195 // First, make clones of the intrinsics to go in lpad.
196 EHExceptionInst *lpadExn = cast<EHExceptionInst>(exn->clone());
197 EHSelectorInst *lpadSelector = cast<EHSelectorInst>(selector->clone());
198 lpadSelector->setArgOperand(0, lpadExn);
199 lpadSelector->insertBefore(lpad->getFirstNonPHI());
200 lpadExn->insertBefore(lpadSelector);
201
202 // Split the non-dominated block.
203 BasicBlock *split =
204 nonDominated->splitBasicBlock(nonDominated->getFirstNonPHI(),
205 nonDominated->getName() + ".lpad-fix");
206
207 // Redirect the last dominated branch there.
208 cast<BranchInst>(lastDominated->back()).setSuccessor(0, split);
209
210 // Move the existing intrinsics to the end of the old block.
211 selector->moveBefore(&nonDominated->back());
212 exn->moveBefore(selector);
213
214 Instruction *splitIP = &split->front();
215
216 // For all the phis in nonDominated, make a new phi in split to join
217 // that phi with the edge from lastDominated.
218 for (BasicBlock::iterator
219 i = nonDominated->begin(), e = nonDominated->end(); i != e; ++i) {
220 PHINode *phi = dyn_cast<PHINode>(i);
221 if (!phi) break;
222
223 PHINode *splitPhi = PHINode::Create(phi->getType(), 2, phi->getName(),
224 splitIP);
225 phi->replaceAllUsesWith(splitPhi);
226 splitPhi->addIncoming(phi, nonDominated);
227 splitPhi->addIncoming(phi->removeIncomingValue(lastDominated),
228 lastDominated);
229 }
230
231 // Make new phis for the exception and selector.
232 PHINode *exnPhi = PHINode::Create(exn->getType(), 2, "", splitIP);
233 exn->replaceAllUsesWith(exnPhi);
234 selector->setArgOperand(0, exn); // except for this use
235 exnPhi->addIncoming(exn, nonDominated);
236 exnPhi->addIncoming(lpadExn, lastDominated);
237
238 PHINode *selectorPhi = PHINode::Create(selector->getType(), 2, "", splitIP);
239 selector->replaceAllUsesWith(selectorPhi);
240 selectorPhi->addIncoming(selector, nonDominated);
241 selectorPhi->addIncoming(lpadSelector, lastDominated);
242
243 return lpadSelector;
244 }
245
246 namespace {
247 /// A class for recording information about inlining through an invoke.
248 class InvokeInliningInfo {
249 BasicBlock *OuterUnwindDest;
250 EHSelectorInst *OuterSelector;
251 BasicBlock *InnerUnwindDest;
252 PHINode *InnerExceptionPHI;
253 PHINode *InnerSelectorPHI;
254 SmallVector<Value*, 8> UnwindDestPHIValues;
255
256 // FIXME: New EH - These will replace the analogous ones above.
257 BasicBlock *OuterResumeDest; //< Destination of the invoke's unwind.
258 BasicBlock *InnerResumeDest; //< Destination for the callee's resume.
259 LandingPadInst *CallerLPad; //< LandingPadInst associated with the invoke.
260 PHINode *InnerEHValuesPHI; //< PHI for EH values from landingpad insts.
261
262 public:
InvokeInliningInfo(InvokeInst * II)263 InvokeInliningInfo(InvokeInst *II)
264 : OuterUnwindDest(II->getUnwindDest()), OuterSelector(0),
265 InnerUnwindDest(0), InnerExceptionPHI(0), InnerSelectorPHI(0),
266 OuterResumeDest(II->getUnwindDest()), InnerResumeDest(0),
267 CallerLPad(0), InnerEHValuesPHI(0) {
268 // If there are PHI nodes in the unwind destination block, we need to keep
269 // track of which values came into them from the invoke before removing
270 // the edge from this block.
271 llvm::BasicBlock *InvokeBB = II->getParent();
272 BasicBlock::iterator I = OuterUnwindDest->begin();
273 for (; isa<PHINode>(I); ++I) {
274 // Save the value to use for this edge.
275 PHINode *PHI = cast<PHINode>(I);
276 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
277 }
278
279 // FIXME: With the new EH, this if/dyn_cast should be a 'cast'.
280 if (LandingPadInst *LPI = dyn_cast<LandingPadInst>(I)) {
281 CallerLPad = LPI;
282 }
283 }
284
285 /// The outer unwind destination is the target of unwind edges
286 /// introduced for calls within the inlined function.
getOuterUnwindDest() const287 BasicBlock *getOuterUnwindDest() const {
288 return OuterUnwindDest;
289 }
290
getOuterSelector()291 EHSelectorInst *getOuterSelector() {
292 if (!OuterSelector)
293 OuterSelector = findSelectorForLandingPad(OuterUnwindDest);
294 return OuterSelector;
295 }
296
297 BasicBlock *getInnerUnwindDest();
298
299 // FIXME: New EH - Rename when new EH is turned on.
300 BasicBlock *getInnerUnwindDestNewEH();
301
getLandingPadInst() const302 LandingPadInst *getLandingPadInst() const { return CallerLPad; }
303
304 bool forwardEHResume(CallInst *call, BasicBlock *src);
305
306 /// forwardResume - Forward the 'resume' instruction to the caller's landing
307 /// pad block. When the landing pad block has only one predecessor, this is
308 /// a simple branch. When there is more than one predecessor, we need to
309 /// split the landing pad block after the landingpad instruction and jump
310 /// to there.
311 void forwardResume(ResumeInst *RI);
312
313 /// addIncomingPHIValuesFor - Add incoming-PHI values to the unwind
314 /// destination block for the given basic block, using the values for the
315 /// original invoke's source block.
addIncomingPHIValuesFor(BasicBlock * BB) const316 void addIncomingPHIValuesFor(BasicBlock *BB) const {
317 addIncomingPHIValuesForInto(BB, OuterUnwindDest);
318 }
319
addIncomingPHIValuesForInto(BasicBlock * src,BasicBlock * dest) const320 void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
321 BasicBlock::iterator I = dest->begin();
322 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
323 PHINode *phi = cast<PHINode>(I);
324 phi->addIncoming(UnwindDestPHIValues[i], src);
325 }
326 }
327 };
328 }
329
330 /// [LIBUNWIND] Get or create a target for the branch out of rewritten calls to
331 /// llvm.eh.resume.
getInnerUnwindDest()332 BasicBlock *InvokeInliningInfo::getInnerUnwindDest() {
333 if (InnerUnwindDest) return InnerUnwindDest;
334
335 // Find and hoist the llvm.eh.exception and llvm.eh.selector calls
336 // in the outer landing pad to immediately following the phis.
337 EHSelectorInst *selector = getOuterSelector();
338 if (!selector) return 0;
339
340 // The call to llvm.eh.exception *must* be in the landing pad.
341 Instruction *exn = cast<Instruction>(selector->getArgOperand(0));
342 assert(exn->getParent() == OuterUnwindDest);
343
344 // TODO: recognize when we've already done this, so that we don't
345 // get a linear number of these when inlining calls into lots of
346 // invokes with the same landing pad.
347
348 // Do the hoisting.
349 Instruction *splitPoint = exn->getParent()->getFirstNonPHI();
350 assert(splitPoint != selector && "selector-on-exception dominance broken!");
351 if (splitPoint == exn) {
352 selector->removeFromParent();
353 selector->insertAfter(exn);
354 splitPoint = selector->getNextNode();
355 } else {
356 exn->moveBefore(splitPoint);
357 selector->moveBefore(splitPoint);
358 }
359
360 // Split the landing pad.
361 InnerUnwindDest = OuterUnwindDest->splitBasicBlock(splitPoint,
362 OuterUnwindDest->getName() + ".body");
363
364 // The number of incoming edges we expect to the inner landing pad.
365 const unsigned phiCapacity = 2;
366
367 // Create corresponding new phis for all the phis in the outer landing pad.
368 BasicBlock::iterator insertPoint = InnerUnwindDest->begin();
369 BasicBlock::iterator I = OuterUnwindDest->begin();
370 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
371 PHINode *outerPhi = cast<PHINode>(I);
372 PHINode *innerPhi = PHINode::Create(outerPhi->getType(), phiCapacity,
373 outerPhi->getName() + ".lpad-body",
374 insertPoint);
375 outerPhi->replaceAllUsesWith(innerPhi);
376 innerPhi->addIncoming(outerPhi, OuterUnwindDest);
377 }
378
379 // Create a phi for the exception value...
380 InnerExceptionPHI = PHINode::Create(exn->getType(), phiCapacity,
381 "exn.lpad-body", insertPoint);
382 exn->replaceAllUsesWith(InnerExceptionPHI);
383 selector->setArgOperand(0, exn); // restore this use
384 InnerExceptionPHI->addIncoming(exn, OuterUnwindDest);
385
386 // ...and the selector.
387 InnerSelectorPHI = PHINode::Create(selector->getType(), phiCapacity,
388 "selector.lpad-body", insertPoint);
389 selector->replaceAllUsesWith(InnerSelectorPHI);
390 InnerSelectorPHI->addIncoming(selector, OuterUnwindDest);
391
392 // All done.
393 return InnerUnwindDest;
394 }
395
396 /// [LIBUNWIND] Try to forward the given call, which logically occurs
397 /// at the end of the given block, as a branch to the inner unwind
398 /// block. Returns true if the call was forwarded.
forwardEHResume(CallInst * call,BasicBlock * src)399 bool InvokeInliningInfo::forwardEHResume(CallInst *call, BasicBlock *src) {
400 // First, check whether this is a call to the intrinsic.
401 Function *fn = dyn_cast<Function>(call->getCalledValue());
402 if (!fn || fn->getName() != "llvm.eh.resume")
403 return false;
404
405 // At this point, we need to return true on all paths, because
406 // otherwise we'll construct an invoke of the intrinsic, which is
407 // not well-formed.
408
409 // Try to find or make an inner unwind dest, which will fail if we
410 // can't find a selector call for the outer unwind dest.
411 BasicBlock *dest = getInnerUnwindDest();
412 bool hasSelector = (dest != 0);
413
414 // If we failed, just use the outer unwind dest, dropping the
415 // exception and selector on the floor.
416 if (!hasSelector)
417 dest = OuterUnwindDest;
418
419 // Make a branch.
420 BranchInst::Create(dest, src);
421
422 // Update the phis in the destination. They were inserted in an
423 // order which makes this work.
424 addIncomingPHIValuesForInto(src, dest);
425
426 if (hasSelector) {
427 InnerExceptionPHI->addIncoming(call->getArgOperand(0), src);
428 InnerSelectorPHI->addIncoming(call->getArgOperand(1), src);
429 }
430
431 return true;
432 }
433
434 /// Get or create a target for the branch from ResumeInsts.
getInnerUnwindDestNewEH()435 BasicBlock *InvokeInliningInfo::getInnerUnwindDestNewEH() {
436 // FIXME: New EH - rename this function when new EH is turned on.
437 if (InnerResumeDest) return InnerResumeDest;
438
439 // Split the landing pad.
440 BasicBlock::iterator SplitPoint = CallerLPad; ++SplitPoint;
441 InnerResumeDest =
442 OuterResumeDest->splitBasicBlock(SplitPoint,
443 OuterResumeDest->getName() + ".body");
444
445 // The number of incoming edges we expect to the inner landing pad.
446 const unsigned PHICapacity = 2;
447
448 // Create corresponding new PHIs for all the PHIs in the outer landing pad.
449 BasicBlock::iterator InsertPoint = InnerResumeDest->begin();
450 BasicBlock::iterator I = OuterResumeDest->begin();
451 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
452 PHINode *OuterPHI = cast<PHINode>(I);
453 PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity,
454 OuterPHI->getName() + ".lpad-body",
455 InsertPoint);
456 OuterPHI->replaceAllUsesWith(InnerPHI);
457 InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
458 }
459
460 // Create a PHI for the exception values.
461 InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity,
462 "eh.lpad-body", InsertPoint);
463 CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
464 InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
465
466 // All done.
467 return InnerResumeDest;
468 }
469
470 /// forwardResume - Forward the 'resume' instruction to the caller's landing pad
471 /// block. When the landing pad block has only one predecessor, this is a simple
472 /// branch. When there is more than one predecessor, we need to split the
473 /// landing pad block after the landingpad instruction and jump to there.
forwardResume(ResumeInst * RI)474 void InvokeInliningInfo::forwardResume(ResumeInst *RI) {
475 BasicBlock *Dest = getInnerUnwindDestNewEH();
476 BasicBlock *Src = RI->getParent();
477
478 BranchInst::Create(Dest, Src);
479
480 // Update the PHIs in the destination. They were inserted in an order which
481 // makes this work.
482 addIncomingPHIValuesForInto(Src, Dest);
483
484 InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
485 RI->eraseFromParent();
486 }
487
488 /// [LIBUNWIND] Check whether this selector is "only cleanups":
489 /// call i32 @llvm.eh.selector(blah, blah, i32 0)
isCleanupOnlySelector(EHSelectorInst * selector)490 static bool isCleanupOnlySelector(EHSelectorInst *selector) {
491 if (selector->getNumArgOperands() != 3) return false;
492 ConstantInt *val = dyn_cast<ConstantInt>(selector->getArgOperand(2));
493 return (val && val->isZero());
494 }
495
496 /// HandleCallsInBlockInlinedThroughInvoke - When we inline a basic block into
497 /// an invoke, we have to turn all of the calls that can throw into
498 /// invokes. This function analyze BB to see if there are any calls, and if so,
499 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
500 /// nodes in that block with the values specified in InvokeDestPHIValues.
501 ///
502 /// Returns true to indicate that the next block should be skipped.
HandleCallsInBlockInlinedThroughInvoke(BasicBlock * BB,InvokeInliningInfo & Invoke)503 static bool HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB,
504 InvokeInliningInfo &Invoke) {
505 LandingPadInst *LPI = Invoke.getLandingPadInst();
506
507 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
508 Instruction *I = BBI++;
509
510 if (LPI) // FIXME: New EH - This won't be NULL in the new EH.
511 if (LandingPadInst *L = dyn_cast<LandingPadInst>(I)) {
512 unsigned NumClauses = LPI->getNumClauses();
513 L->reserveClauses(NumClauses);
514 for (unsigned i = 0; i != NumClauses; ++i)
515 L->addClause(LPI->getClause(i));
516 }
517
518 // We only need to check for function calls: inlined invoke
519 // instructions require no special handling.
520 CallInst *CI = dyn_cast<CallInst>(I);
521 if (CI == 0) continue;
522
523 // LIBUNWIND: merge selector instructions.
524 if (EHSelectorInst *Inner = dyn_cast<EHSelectorInst>(CI)) {
525 EHSelectorInst *Outer = Invoke.getOuterSelector();
526 if (!Outer) continue;
527
528 bool innerIsOnlyCleanup = isCleanupOnlySelector(Inner);
529 bool outerIsOnlyCleanup = isCleanupOnlySelector(Outer);
530
531 // If both selectors contain only cleanups, we don't need to do
532 // anything. TODO: this is really just a very specific instance
533 // of a much more general optimization.
534 if (innerIsOnlyCleanup && outerIsOnlyCleanup) continue;
535
536 // Otherwise, we just append the outer selector to the inner selector.
537 SmallVector<Value*, 16> NewSelector;
538 for (unsigned i = 0, e = Inner->getNumArgOperands(); i != e; ++i)
539 NewSelector.push_back(Inner->getArgOperand(i));
540 for (unsigned i = 2, e = Outer->getNumArgOperands(); i != e; ++i)
541 NewSelector.push_back(Outer->getArgOperand(i));
542
543 CallInst *NewInner =
544 IRBuilder<>(Inner).CreateCall(Inner->getCalledValue(), NewSelector);
545 // No need to copy attributes, calling convention, etc.
546 NewInner->takeName(Inner);
547 Inner->replaceAllUsesWith(NewInner);
548 Inner->eraseFromParent();
549 continue;
550 }
551
552 // If this call cannot unwind, don't convert it to an invoke.
553 if (CI->doesNotThrow())
554 continue;
555
556 // Convert this function call into an invoke instruction.
557 // First, split the basic block.
558 BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc");
559
560 // Delete the unconditional branch inserted by splitBasicBlock
561 BB->getInstList().pop_back();
562
563 // LIBUNWIND: If this is a call to @llvm.eh.resume, just branch
564 // directly to the new landing pad.
565 if (Invoke.forwardEHResume(CI, BB)) {
566 // TODO: 'Split' is now unreachable; clean it up.
567
568 // We want to leave the original call intact so that the call
569 // graph and other structures won't get misled. We also have to
570 // avoid processing the next block, or we'll iterate here forever.
571 return true;
572 }
573
574 // Otherwise, create the new invoke instruction.
575 ImmutableCallSite CS(CI);
576 SmallVector<Value*, 8> InvokeArgs(CS.arg_begin(), CS.arg_end());
577 InvokeInst *II =
578 InvokeInst::Create(CI->getCalledValue(), Split,
579 Invoke.getOuterUnwindDest(),
580 InvokeArgs, CI->getName(), BB);
581 II->setCallingConv(CI->getCallingConv());
582 II->setAttributes(CI->getAttributes());
583
584 // Make sure that anything using the call now uses the invoke! This also
585 // updates the CallGraph if present, because it uses a WeakVH.
586 CI->replaceAllUsesWith(II);
587
588 Split->getInstList().pop_front(); // Delete the original call
589
590 // Update any PHI nodes in the exceptional block to indicate that
591 // there is now a new entry in them.
592 Invoke.addIncomingPHIValuesFor(BB);
593 return false;
594 }
595
596 return false;
597 }
598
599
600 /// HandleInlinedInvoke - If we inlined an invoke site, we need to convert calls
601 /// in the body of the inlined function into invokes and turn unwind
602 /// instructions into branches to the invoke unwind dest.
603 ///
604 /// II is the invoke instruction being inlined. FirstNewBlock is the first
605 /// block of the inlined code (the last block is the end of the function),
606 /// and InlineCodeInfo is information about the code that got inlined.
HandleInlinedInvoke(InvokeInst * II,BasicBlock * FirstNewBlock,ClonedCodeInfo & InlinedCodeInfo)607 static void HandleInlinedInvoke(InvokeInst *II, BasicBlock *FirstNewBlock,
608 ClonedCodeInfo &InlinedCodeInfo) {
609 BasicBlock *InvokeDest = II->getUnwindDest();
610
611 Function *Caller = FirstNewBlock->getParent();
612
613 // The inlined code is currently at the end of the function, scan from the
614 // start of the inlined code to its end, checking for stuff we need to
615 // rewrite. If the code doesn't have calls or unwinds, we know there is
616 // nothing to rewrite.
617 if (!InlinedCodeInfo.ContainsCalls && !InlinedCodeInfo.ContainsUnwinds) {
618 // Now that everything is happy, we have one final detail. The PHI nodes in
619 // the exception destination block still have entries due to the original
620 // invoke instruction. Eliminate these entries (which might even delete the
621 // PHI node) now.
622 InvokeDest->removePredecessor(II->getParent());
623 return;
624 }
625
626 InvokeInliningInfo Invoke(II);
627
628 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E; ++BB){
629 if (InlinedCodeInfo.ContainsCalls)
630 if (HandleCallsInBlockInlinedThroughInvoke(BB, Invoke)) {
631 // Honor a request to skip the next block. We don't need to
632 // consider UnwindInsts in this case either.
633 ++BB;
634 continue;
635 }
636
637 if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
638 // An UnwindInst requires special handling when it gets inlined into an
639 // invoke site. Once this happens, we know that the unwind would cause
640 // a control transfer to the invoke exception destination, so we can
641 // transform it into a direct branch to the exception destination.
642 BranchInst::Create(InvokeDest, UI);
643
644 // Delete the unwind instruction!
645 UI->eraseFromParent();
646
647 // Update any PHI nodes in the exceptional block to indicate that
648 // there is now a new entry in them.
649 Invoke.addIncomingPHIValuesFor(BB);
650 }
651
652 if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator())) {
653 Invoke.forwardResume(RI);
654 }
655 }
656
657 // Now that everything is happy, we have one final detail. The PHI nodes in
658 // the exception destination block still have entries due to the original
659 // invoke instruction. Eliminate these entries (which might even delete the
660 // PHI node) now.
661 InvokeDest->removePredecessor(II->getParent());
662 }
663
664 /// UpdateCallGraphAfterInlining - Once we have cloned code over from a callee
665 /// into the caller, update the specified callgraph to reflect the changes we
666 /// made. Note that it's possible that not all code was copied over, so only
667 /// some edges of the callgraph may remain.
UpdateCallGraphAfterInlining(CallSite CS,Function::iterator FirstNewBlock,ValueToValueMapTy & VMap,InlineFunctionInfo & IFI)668 static void UpdateCallGraphAfterInlining(CallSite CS,
669 Function::iterator FirstNewBlock,
670 ValueToValueMapTy &VMap,
671 InlineFunctionInfo &IFI) {
672 CallGraph &CG = *IFI.CG;
673 const Function *Caller = CS.getInstruction()->getParent()->getParent();
674 const Function *Callee = CS.getCalledFunction();
675 CallGraphNode *CalleeNode = CG[Callee];
676 CallGraphNode *CallerNode = CG[Caller];
677
678 // Since we inlined some uninlined call sites in the callee into the caller,
679 // add edges from the caller to all of the callees of the callee.
680 CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end();
681
682 // Consider the case where CalleeNode == CallerNode.
683 CallGraphNode::CalledFunctionsVector CallCache;
684 if (CalleeNode == CallerNode) {
685 CallCache.assign(I, E);
686 I = CallCache.begin();
687 E = CallCache.end();
688 }
689
690 for (; I != E; ++I) {
691 const Value *OrigCall = I->first;
692
693 ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
694 // Only copy the edge if the call was inlined!
695 if (VMI == VMap.end() || VMI->second == 0)
696 continue;
697
698 // If the call was inlined, but then constant folded, there is no edge to
699 // add. Check for this case.
700 Instruction *NewCall = dyn_cast<Instruction>(VMI->second);
701 if (NewCall == 0) continue;
702
703 // Remember that this call site got inlined for the client of
704 // InlineFunction.
705 IFI.InlinedCalls.push_back(NewCall);
706
707 // It's possible that inlining the callsite will cause it to go from an
708 // indirect to a direct call by resolving a function pointer. If this
709 // happens, set the callee of the new call site to a more precise
710 // destination. This can also happen if the call graph node of the caller
711 // was just unnecessarily imprecise.
712 if (I->second->getFunction() == 0)
713 if (Function *F = CallSite(NewCall).getCalledFunction()) {
714 // Indirect call site resolved to direct call.
715 CallerNode->addCalledFunction(CallSite(NewCall), CG[F]);
716
717 continue;
718 }
719
720 CallerNode->addCalledFunction(CallSite(NewCall), I->second);
721 }
722
723 // Update the call graph by deleting the edge from Callee to Caller. We must
724 // do this after the loop above in case Caller and Callee are the same.
725 CallerNode->removeCallEdgeFor(CS);
726 }
727
728 /// HandleByValArgument - When inlining a call site that has a byval argument,
729 /// we have to make the implicit memcpy explicit by adding it.
HandleByValArgument(Value * Arg,Instruction * TheCall,const Function * CalledFunc,InlineFunctionInfo & IFI,unsigned ByValAlignment)730 static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
731 const Function *CalledFunc,
732 InlineFunctionInfo &IFI,
733 unsigned ByValAlignment) {
734 Type *AggTy = cast<PointerType>(Arg->getType())->getElementType();
735
736 // If the called function is readonly, then it could not mutate the caller's
737 // copy of the byval'd memory. In this case, it is safe to elide the copy and
738 // temporary.
739 if (CalledFunc->onlyReadsMemory()) {
740 // If the byval argument has a specified alignment that is greater than the
741 // passed in pointer, then we either have to round up the input pointer or
742 // give up on this transformation.
743 if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment.
744 return Arg;
745
746 // If the pointer is already known to be sufficiently aligned, or if we can
747 // round it up to a larger alignment, then we don't need a temporary.
748 if (getOrEnforceKnownAlignment(Arg, ByValAlignment,
749 IFI.TD) >= ByValAlignment)
750 return Arg;
751
752 // Otherwise, we have to make a memcpy to get a safe alignment. This is bad
753 // for code quality, but rarely happens and is required for correctness.
754 }
755
756 LLVMContext &Context = Arg->getContext();
757
758 Type *VoidPtrTy = Type::getInt8PtrTy(Context);
759
760 // Create the alloca. If we have TargetData, use nice alignment.
761 unsigned Align = 1;
762 if (IFI.TD)
763 Align = IFI.TD->getPrefTypeAlignment(AggTy);
764
765 // If the byval had an alignment specified, we *must* use at least that
766 // alignment, as it is required by the byval argument (and uses of the
767 // pointer inside the callee).
768 Align = std::max(Align, ByValAlignment);
769
770 Function *Caller = TheCall->getParent()->getParent();
771
772 Value *NewAlloca = new AllocaInst(AggTy, 0, Align, Arg->getName(),
773 &*Caller->begin()->begin());
774 // Emit a memcpy.
775 Type *Tys[3] = {VoidPtrTy, VoidPtrTy, Type::getInt64Ty(Context)};
776 Function *MemCpyFn = Intrinsic::getDeclaration(Caller->getParent(),
777 Intrinsic::memcpy,
778 Tys);
779 Value *DestCast = new BitCastInst(NewAlloca, VoidPtrTy, "tmp", TheCall);
780 Value *SrcCast = new BitCastInst(Arg, VoidPtrTy, "tmp", TheCall);
781
782 Value *Size;
783 if (IFI.TD == 0)
784 Size = ConstantExpr::getSizeOf(AggTy);
785 else
786 Size = ConstantInt::get(Type::getInt64Ty(Context),
787 IFI.TD->getTypeStoreSize(AggTy));
788
789 // Always generate a memcpy of alignment 1 here because we don't know
790 // the alignment of the src pointer. Other optimizations can infer
791 // better alignment.
792 Value *CallArgs[] = {
793 DestCast, SrcCast, Size,
794 ConstantInt::get(Type::getInt32Ty(Context), 1),
795 ConstantInt::getFalse(Context) // isVolatile
796 };
797 IRBuilder<>(TheCall).CreateCall(MemCpyFn, CallArgs);
798
799 // Uses of the argument in the function should use our new alloca
800 // instead.
801 return NewAlloca;
802 }
803
804 // isUsedByLifetimeMarker - Check whether this Value is used by a lifetime
805 // intrinsic.
isUsedByLifetimeMarker(Value * V)806 static bool isUsedByLifetimeMarker(Value *V) {
807 for (Value::use_iterator UI = V->use_begin(), UE = V->use_end(); UI != UE;
808 ++UI) {
809 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(*UI)) {
810 switch (II->getIntrinsicID()) {
811 default: break;
812 case Intrinsic::lifetime_start:
813 case Intrinsic::lifetime_end:
814 return true;
815 }
816 }
817 }
818 return false;
819 }
820
821 // hasLifetimeMarkers - Check whether the given alloca already has
822 // lifetime.start or lifetime.end intrinsics.
hasLifetimeMarkers(AllocaInst * AI)823 static bool hasLifetimeMarkers(AllocaInst *AI) {
824 Type *Int8PtrTy = Type::getInt8PtrTy(AI->getType()->getContext());
825 if (AI->getType() == Int8PtrTy)
826 return isUsedByLifetimeMarker(AI);
827
828 // Do a scan to find all the casts to i8*.
829 for (Value::use_iterator I = AI->use_begin(), E = AI->use_end(); I != E;
830 ++I) {
831 if (I->getType() != Int8PtrTy) continue;
832 if (I->stripPointerCasts() != AI) continue;
833 if (isUsedByLifetimeMarker(*I))
834 return true;
835 }
836 return false;
837 }
838
839 /// updateInlinedAtInfo - Helper function used by fixupLineNumbers to recursively
840 /// update InlinedAtEntry of a DebugLoc.
updateInlinedAtInfo(const DebugLoc & DL,const DebugLoc & InlinedAtDL,LLVMContext & Ctx)841 static DebugLoc updateInlinedAtInfo(const DebugLoc &DL,
842 const DebugLoc &InlinedAtDL,
843 LLVMContext &Ctx) {
844 if (MDNode *IA = DL.getInlinedAt(Ctx)) {
845 DebugLoc NewInlinedAtDL
846 = updateInlinedAtInfo(DebugLoc::getFromDILocation(IA), InlinedAtDL, Ctx);
847 return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(Ctx),
848 NewInlinedAtDL.getAsMDNode(Ctx));
849 }
850
851 return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(Ctx),
852 InlinedAtDL.getAsMDNode(Ctx));
853 }
854
855
856 /// fixupLineNumbers - Update inlined instructions' line numbers to
857 /// to encode location where these instructions are inlined.
fixupLineNumbers(Function * Fn,Function::iterator FI,Instruction * TheCall)858 static void fixupLineNumbers(Function *Fn, Function::iterator FI,
859 Instruction *TheCall) {
860 DebugLoc TheCallDL = TheCall->getDebugLoc();
861 if (TheCallDL.isUnknown())
862 return;
863
864 for (; FI != Fn->end(); ++FI) {
865 for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
866 BI != BE; ++BI) {
867 DebugLoc DL = BI->getDebugLoc();
868 if (!DL.isUnknown()) {
869 BI->setDebugLoc(updateInlinedAtInfo(DL, TheCallDL, BI->getContext()));
870 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(BI)) {
871 LLVMContext &Ctx = BI->getContext();
872 MDNode *InlinedAt = BI->getDebugLoc().getInlinedAt(Ctx);
873 DVI->setOperand(2, createInlinedVariable(DVI->getVariable(),
874 InlinedAt, Ctx));
875 }
876 }
877 }
878 }
879 }
880
881 // InlineFunction - This function inlines the called function into the basic
882 // block of the caller. This returns false if it is not possible to inline this
883 // call. The program is still in a well defined state if this occurs though.
884 //
885 // Note that this only does one level of inlining. For example, if the
886 // instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
887 // exists in the instruction stream. Similarly this will inline a recursive
888 // function by one level.
889 //
InlineFunction(CallSite CS,InlineFunctionInfo & IFI)890 bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI) {
891 Instruction *TheCall = CS.getInstruction();
892 LLVMContext &Context = TheCall->getContext();
893 assert(TheCall->getParent() && TheCall->getParent()->getParent() &&
894 "Instruction not in function!");
895
896 // If IFI has any state in it, zap it before we fill it in.
897 IFI.reset();
898
899 const Function *CalledFunc = CS.getCalledFunction();
900 if (CalledFunc == 0 || // Can't inline external function or indirect
901 CalledFunc->isDeclaration() || // call, or call to a vararg function!
902 CalledFunc->getFunctionType()->isVarArg()) return false;
903
904 // If the call to the callee is not a tail call, we must clear the 'tail'
905 // flags on any calls that we inline.
906 bool MustClearTailCallFlags =
907 !(isa<CallInst>(TheCall) && cast<CallInst>(TheCall)->isTailCall());
908
909 // If the call to the callee cannot throw, set the 'nounwind' flag on any
910 // calls that we inline.
911 bool MarkNoUnwind = CS.doesNotThrow();
912
913 BasicBlock *OrigBB = TheCall->getParent();
914 Function *Caller = OrigBB->getParent();
915
916 // GC poses two hazards to inlining, which only occur when the callee has GC:
917 // 1. If the caller has no GC, then the callee's GC must be propagated to the
918 // caller.
919 // 2. If the caller has a differing GC, it is invalid to inline.
920 if (CalledFunc->hasGC()) {
921 if (!Caller->hasGC())
922 Caller->setGC(CalledFunc->getGC());
923 else if (CalledFunc->getGC() != Caller->getGC())
924 return false;
925 }
926
927 // Find the personality function used by the landing pads of the caller. If it
928 // exists, then check to see that it matches the personality function used in
929 // the callee.
930 for (Function::const_iterator
931 I = Caller->begin(), E = Caller->end(); I != E; ++I)
932 if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) {
933 const BasicBlock *BB = II->getUnwindDest();
934 // FIXME: This 'isa' here should become go away once the new EH system is
935 // in place.
936 if (!isa<LandingPadInst>(BB->getFirstNonPHI()))
937 continue;
938 const LandingPadInst *LP = cast<LandingPadInst>(BB->getFirstNonPHI());
939 const Value *CallerPersFn = LP->getPersonalityFn();
940
941 // If the personality functions match, then we can perform the
942 // inlining. Otherwise, we can't inline.
943 // TODO: This isn't 100% true. Some personality functions are proper
944 // supersets of others and can be used in place of the other.
945 for (Function::const_iterator
946 I = CalledFunc->begin(), E = CalledFunc->end(); I != E; ++I)
947 if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) {
948 const BasicBlock *BB = II->getUnwindDest();
949 // FIXME: This 'if/dyn_cast' here should become a normal 'cast' once
950 // the new EH system is in place.
951 if (const LandingPadInst *LP =
952 dyn_cast<LandingPadInst>(BB->getFirstNonPHI()))
953 if (CallerPersFn != LP->getPersonalityFn())
954 return false;
955 break;
956 }
957
958 break;
959 }
960
961 // Get an iterator to the last basic block in the function, which will have
962 // the new function inlined after it.
963 //
964 Function::iterator LastBlock = &Caller->back();
965
966 // Make sure to capture all of the return instructions from the cloned
967 // function.
968 SmallVector<ReturnInst*, 8> Returns;
969 ClonedCodeInfo InlinedFunctionInfo;
970 Function::iterator FirstNewBlock;
971
972 { // Scope to destroy VMap after cloning.
973 ValueToValueMapTy VMap;
974
975 assert(CalledFunc->arg_size() == CS.arg_size() &&
976 "No varargs calls can be inlined!");
977
978 // Calculate the vector of arguments to pass into the function cloner, which
979 // matches up the formal to the actual argument values.
980 CallSite::arg_iterator AI = CS.arg_begin();
981 unsigned ArgNo = 0;
982 for (Function::const_arg_iterator I = CalledFunc->arg_begin(),
983 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
984 Value *ActualArg = *AI;
985
986 // When byval arguments actually inlined, we need to make the copy implied
987 // by them explicit. However, we don't do this if the callee is readonly
988 // or readnone, because the copy would be unneeded: the callee doesn't
989 // modify the struct.
990 if (CalledFunc->paramHasAttr(ArgNo+1, Attribute::ByVal)) {
991 ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI,
992 CalledFunc->getParamAlignment(ArgNo+1));
993
994 // Calls that we inline may use the new alloca, so we need to clear
995 // their 'tail' flags if HandleByValArgument introduced a new alloca and
996 // the callee has calls.
997 MustClearTailCallFlags |= ActualArg != *AI;
998 }
999
1000 VMap[I] = ActualArg;
1001 }
1002
1003 // We want the inliner to prune the code as it copies. We would LOVE to
1004 // have no dead or constant instructions leftover after inlining occurs
1005 // (which can happen, e.g., because an argument was constant), but we'll be
1006 // happy with whatever the cloner can do.
1007 CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
1008 /*ModuleLevelChanges=*/false, Returns, ".i",
1009 &InlinedFunctionInfo, IFI.TD, TheCall);
1010
1011 // Remember the first block that is newly cloned over.
1012 FirstNewBlock = LastBlock; ++FirstNewBlock;
1013
1014 // Update the callgraph if requested.
1015 if (IFI.CG)
1016 UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI);
1017
1018 // Update inlined instructions' line number information.
1019 fixupLineNumbers(Caller, FirstNewBlock, TheCall);
1020 }
1021
1022 // If there are any alloca instructions in the block that used to be the entry
1023 // block for the callee, move them to the entry block of the caller. First
1024 // calculate which instruction they should be inserted before. We insert the
1025 // instructions at the end of the current alloca list.
1026 //
1027 {
1028 BasicBlock::iterator InsertPoint = Caller->begin()->begin();
1029 for (BasicBlock::iterator I = FirstNewBlock->begin(),
1030 E = FirstNewBlock->end(); I != E; ) {
1031 AllocaInst *AI = dyn_cast<AllocaInst>(I++);
1032 if (AI == 0) continue;
1033
1034 // If the alloca is now dead, remove it. This often occurs due to code
1035 // specialization.
1036 if (AI->use_empty()) {
1037 AI->eraseFromParent();
1038 continue;
1039 }
1040
1041 if (!isa<Constant>(AI->getArraySize()))
1042 continue;
1043
1044 // Keep track of the static allocas that we inline into the caller.
1045 IFI.StaticAllocas.push_back(AI);
1046
1047 // Scan for the block of allocas that we can move over, and move them
1048 // all at once.
1049 while (isa<AllocaInst>(I) &&
1050 isa<Constant>(cast<AllocaInst>(I)->getArraySize())) {
1051 IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
1052 ++I;
1053 }
1054
1055 // Transfer all of the allocas over in a block. Using splice means
1056 // that the instructions aren't removed from the symbol table, then
1057 // reinserted.
1058 Caller->getEntryBlock().getInstList().splice(InsertPoint,
1059 FirstNewBlock->getInstList(),
1060 AI, I);
1061 }
1062 }
1063
1064 // Leave lifetime markers for the static alloca's, scoping them to the
1065 // function we just inlined.
1066 if (!IFI.StaticAllocas.empty()) {
1067 IRBuilder<> builder(FirstNewBlock->begin());
1068 for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
1069 AllocaInst *AI = IFI.StaticAllocas[ai];
1070
1071 // If the alloca is already scoped to something smaller than the whole
1072 // function then there's no need to add redundant, less accurate markers.
1073 if (hasLifetimeMarkers(AI))
1074 continue;
1075
1076 builder.CreateLifetimeStart(AI);
1077 for (unsigned ri = 0, re = Returns.size(); ri != re; ++ri) {
1078 IRBuilder<> builder(Returns[ri]);
1079 builder.CreateLifetimeEnd(AI);
1080 }
1081 }
1082 }
1083
1084 // If the inlined code contained dynamic alloca instructions, wrap the inlined
1085 // code with llvm.stacksave/llvm.stackrestore intrinsics.
1086 if (InlinedFunctionInfo.ContainsDynamicAllocas) {
1087 Module *M = Caller->getParent();
1088 // Get the two intrinsics we care about.
1089 Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
1090 Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore);
1091
1092 // Insert the llvm.stacksave.
1093 CallInst *SavedPtr = IRBuilder<>(FirstNewBlock, FirstNewBlock->begin())
1094 .CreateCall(StackSave, "savedstack");
1095
1096 // Insert a call to llvm.stackrestore before any return instructions in the
1097 // inlined function.
1098 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
1099 IRBuilder<>(Returns[i]).CreateCall(StackRestore, SavedPtr);
1100 }
1101
1102 // Count the number of StackRestore calls we insert.
1103 unsigned NumStackRestores = Returns.size();
1104
1105 // If we are inlining an invoke instruction, insert restores before each
1106 // unwind. These unwinds will be rewritten into branches later.
1107 if (InlinedFunctionInfo.ContainsUnwinds && isa<InvokeInst>(TheCall)) {
1108 for (Function::iterator BB = FirstNewBlock, E = Caller->end();
1109 BB != E; ++BB)
1110 if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
1111 IRBuilder<>(UI).CreateCall(StackRestore, SavedPtr);
1112 ++NumStackRestores;
1113 }
1114 }
1115 }
1116
1117 // If we are inlining tail call instruction through a call site that isn't
1118 // marked 'tail', we must remove the tail marker for any calls in the inlined
1119 // code. Also, calls inlined through a 'nounwind' call site should be marked
1120 // 'nounwind'.
1121 if (InlinedFunctionInfo.ContainsCalls &&
1122 (MustClearTailCallFlags || MarkNoUnwind)) {
1123 for (Function::iterator BB = FirstNewBlock, E = Caller->end();
1124 BB != E; ++BB)
1125 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
1126 if (CallInst *CI = dyn_cast<CallInst>(I)) {
1127 if (MustClearTailCallFlags)
1128 CI->setTailCall(false);
1129 if (MarkNoUnwind)
1130 CI->setDoesNotThrow();
1131 }
1132 }
1133
1134 // If we are inlining through a 'nounwind' call site then any inlined 'unwind'
1135 // instructions are unreachable.
1136 if (InlinedFunctionInfo.ContainsUnwinds && MarkNoUnwind)
1137 for (Function::iterator BB = FirstNewBlock, E = Caller->end();
1138 BB != E; ++BB) {
1139 TerminatorInst *Term = BB->getTerminator();
1140 if (isa<UnwindInst>(Term)) {
1141 new UnreachableInst(Context, Term);
1142 BB->getInstList().erase(Term);
1143 }
1144 }
1145
1146 // If we are inlining for an invoke instruction, we must make sure to rewrite
1147 // any inlined 'unwind' instructions into branches to the invoke exception
1148 // destination, and call instructions into invoke instructions.
1149 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall))
1150 HandleInlinedInvoke(II, FirstNewBlock, InlinedFunctionInfo);
1151
1152 // If we cloned in _exactly one_ basic block, and if that block ends in a
1153 // return instruction, we splice the body of the inlined callee directly into
1154 // the calling basic block.
1155 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
1156 // Move all of the instructions right before the call.
1157 OrigBB->getInstList().splice(TheCall, FirstNewBlock->getInstList(),
1158 FirstNewBlock->begin(), FirstNewBlock->end());
1159 // Remove the cloned basic block.
1160 Caller->getBasicBlockList().pop_back();
1161
1162 // If the call site was an invoke instruction, add a branch to the normal
1163 // destination.
1164 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall))
1165 BranchInst::Create(II->getNormalDest(), TheCall);
1166
1167 // If the return instruction returned a value, replace uses of the call with
1168 // uses of the returned value.
1169 if (!TheCall->use_empty()) {
1170 ReturnInst *R = Returns[0];
1171 if (TheCall == R->getReturnValue())
1172 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
1173 else
1174 TheCall->replaceAllUsesWith(R->getReturnValue());
1175 }
1176 // Since we are now done with the Call/Invoke, we can delete it.
1177 TheCall->eraseFromParent();
1178
1179 // Since we are now done with the return instruction, delete it also.
1180 Returns[0]->eraseFromParent();
1181
1182 // We are now done with the inlining.
1183 return true;
1184 }
1185
1186 // Otherwise, we have the normal case, of more than one block to inline or
1187 // multiple return sites.
1188
1189 // We want to clone the entire callee function into the hole between the
1190 // "starter" and "ender" blocks. How we accomplish this depends on whether
1191 // this is an invoke instruction or a call instruction.
1192 BasicBlock *AfterCallBB;
1193 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
1194
1195 // Add an unconditional branch to make this look like the CallInst case...
1196 BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall);
1197
1198 // Split the basic block. This guarantees that no PHI nodes will have to be
1199 // updated due to new incoming edges, and make the invoke case more
1200 // symmetric to the call case.
1201 AfterCallBB = OrigBB->splitBasicBlock(NewBr,
1202 CalledFunc->getName()+".exit");
1203
1204 } else { // It's a call
1205 // If this is a call instruction, we need to split the basic block that
1206 // the call lives in.
1207 //
1208 AfterCallBB = OrigBB->splitBasicBlock(TheCall,
1209 CalledFunc->getName()+".exit");
1210 }
1211
1212 // Change the branch that used to go to AfterCallBB to branch to the first
1213 // basic block of the inlined function.
1214 //
1215 TerminatorInst *Br = OrigBB->getTerminator();
1216 assert(Br && Br->getOpcode() == Instruction::Br &&
1217 "splitBasicBlock broken!");
1218 Br->setOperand(0, FirstNewBlock);
1219
1220
1221 // Now that the function is correct, make it a little bit nicer. In
1222 // particular, move the basic blocks inserted from the end of the function
1223 // into the space made by splitting the source basic block.
1224 Caller->getBasicBlockList().splice(AfterCallBB, Caller->getBasicBlockList(),
1225 FirstNewBlock, Caller->end());
1226
1227 // Handle all of the return instructions that we just cloned in, and eliminate
1228 // any users of the original call/invoke instruction.
1229 Type *RTy = CalledFunc->getReturnType();
1230
1231 PHINode *PHI = 0;
1232 if (Returns.size() > 1) {
1233 // The PHI node should go at the front of the new basic block to merge all
1234 // possible incoming values.
1235 if (!TheCall->use_empty()) {
1236 PHI = PHINode::Create(RTy, Returns.size(), TheCall->getName(),
1237 AfterCallBB->begin());
1238 // Anything that used the result of the function call should now use the
1239 // PHI node as their operand.
1240 TheCall->replaceAllUsesWith(PHI);
1241 }
1242
1243 // Loop over all of the return instructions adding entries to the PHI node
1244 // as appropriate.
1245 if (PHI) {
1246 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
1247 ReturnInst *RI = Returns[i];
1248 assert(RI->getReturnValue()->getType() == PHI->getType() &&
1249 "Ret value not consistent in function!");
1250 PHI->addIncoming(RI->getReturnValue(), RI->getParent());
1251 }
1252 }
1253
1254
1255 // Add a branch to the merge points and remove return instructions.
1256 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
1257 ReturnInst *RI = Returns[i];
1258 BranchInst::Create(AfterCallBB, RI);
1259 RI->eraseFromParent();
1260 }
1261 } else if (!Returns.empty()) {
1262 // Otherwise, if there is exactly one return value, just replace anything
1263 // using the return value of the call with the computed value.
1264 if (!TheCall->use_empty()) {
1265 if (TheCall == Returns[0]->getReturnValue())
1266 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
1267 else
1268 TheCall->replaceAllUsesWith(Returns[0]->getReturnValue());
1269 }
1270
1271 // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
1272 BasicBlock *ReturnBB = Returns[0]->getParent();
1273 ReturnBB->replaceAllUsesWith(AfterCallBB);
1274
1275 // Splice the code from the return block into the block that it will return
1276 // to, which contains the code that was after the call.
1277 AfterCallBB->getInstList().splice(AfterCallBB->begin(),
1278 ReturnBB->getInstList());
1279
1280 // Delete the return instruction now and empty ReturnBB now.
1281 Returns[0]->eraseFromParent();
1282 ReturnBB->eraseFromParent();
1283 } else if (!TheCall->use_empty()) {
1284 // No returns, but something is using the return value of the call. Just
1285 // nuke the result.
1286 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
1287 }
1288
1289 // Since we are now done with the Call/Invoke, we can delete it.
1290 TheCall->eraseFromParent();
1291
1292 // We should always be able to fold the entry block of the function into the
1293 // single predecessor of the block...
1294 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
1295 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
1296
1297 // Splice the code entry block into calling block, right before the
1298 // unconditional branch.
1299 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes
1300 OrigBB->getInstList().splice(Br, CalleeEntry->getInstList());
1301
1302 // Remove the unconditional branch.
1303 OrigBB->getInstList().erase(Br);
1304
1305 // Now we can remove the CalleeEntry block, which is now empty.
1306 Caller->getBasicBlockList().erase(CalleeEntry);
1307
1308 // If we inserted a phi node, check to see if it has a single value (e.g. all
1309 // the entries are the same or undef). If so, remove the PHI so it doesn't
1310 // block other optimizations.
1311 if (PHI)
1312 if (Value *V = SimplifyInstruction(PHI, IFI.TD)) {
1313 PHI->replaceAllUsesWith(V);
1314 PHI->eraseFromParent();
1315 }
1316
1317 return true;
1318 }
1319