1 //===- TailRecursionElimination.cpp - Eliminate Tail Calls ----------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file transforms calls of the current function (self recursion) followed
11 // by a return instruction with a branch to the entry of the function, creating
12 // a loop. This pass also implements the following extensions to the basic
13 // algorithm:
14 //
15 // 1. Trivial instructions between the call and return do not prevent the
16 // transformation from taking place, though currently the analysis cannot
17 // support moving any really useful instructions (only dead ones).
18 // 2. This pass transforms functions that are prevented from being tail
19 // recursive by an associative and commutative expression to use an
20 // accumulator variable, thus compiling the typical naive factorial or
21 // 'fib' implementation into efficient code.
22 // 3. TRE is performed if the function returns void, if the return
23 // returns the result returned by the call, or if the function returns a
24 // run-time constant on all exits from the function. It is possible, though
25 // unlikely, that the return returns something else (like constant 0), and
26 // can still be TRE'd. It can be TRE'd if ALL OTHER return instructions in
27 // the function return the exact same value.
28 // 4. If it can prove that callees do not access their caller stack frame,
29 // they are marked as eligible for tail call elimination (by the code
30 // generator).
31 //
32 // There are several improvements that could be made:
33 //
34 // 1. If the function has any alloca instructions, these instructions will be
35 // moved out of the entry block of the function, causing them to be
36 // evaluated each time through the tail recursion. Safely keeping allocas
37 // in the entry block requires analysis to proves that the tail-called
38 // function does not read or write the stack object.
39 // 2. Tail recursion is only performed if the call immediately precedes the
40 // return instruction. It's possible that there could be a jump between
41 // the call and the return.
42 // 3. There can be intervening operations between the call and the return that
43 // prevent the TRE from occurring. For example, there could be GEP's and
44 // stores to memory that will not be read or written by the call. This
45 // requires some substantial analysis (such as with DSA) to prove safe to
46 // move ahead of the call, but doing so could allow many more TREs to be
47 // performed, for example in TreeAdd/TreeAlloc from the treeadd benchmark.
48 // 4. The algorithm we use to detect if callees access their caller stack
49 // frames is very primitive.
50 //
51 //===----------------------------------------------------------------------===//
52
53 #include "llvm/Transforms/Scalar/TailRecursionElimination.h"
54 #include "llvm/ADT/STLExtras.h"
55 #include "llvm/ADT/SmallPtrSet.h"
56 #include "llvm/ADT/Statistic.h"
57 #include "llvm/Analysis/CFG.h"
58 #include "llvm/Analysis/CaptureTracking.h"
59 #include "llvm/Analysis/GlobalsModRef.h"
60 #include "llvm/Analysis/InlineCost.h"
61 #include "llvm/Analysis/InstructionSimplify.h"
62 #include "llvm/Analysis/Loads.h"
63 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
64 #include "llvm/Analysis/TargetTransformInfo.h"
65 #include "llvm/IR/CFG.h"
66 #include "llvm/IR/CallSite.h"
67 #include "llvm/IR/Constants.h"
68 #include "llvm/IR/DataLayout.h"
69 #include "llvm/IR/DerivedTypes.h"
70 #include "llvm/IR/DiagnosticInfo.h"
71 #include "llvm/IR/Function.h"
72 #include "llvm/IR/InstIterator.h"
73 #include "llvm/IR/Instructions.h"
74 #include "llvm/IR/IntrinsicInst.h"
75 #include "llvm/IR/Module.h"
76 #include "llvm/IR/ValueHandle.h"
77 #include "llvm/Pass.h"
78 #include "llvm/Support/Debug.h"
79 #include "llvm/Support/raw_ostream.h"
80 #include "llvm/Transforms/Scalar.h"
81 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
82 using namespace llvm;
83
84 #define DEBUG_TYPE "tailcallelim"
85
86 STATISTIC(NumEliminated, "Number of tail calls removed");
87 STATISTIC(NumRetDuped, "Number of return duplicated");
88 STATISTIC(NumAccumAdded, "Number of accumulators introduced");
89
90 /// Scan the specified function for alloca instructions.
91 /// If it contains any dynamic allocas, returns false.
canTRE(Function & F)92 static bool canTRE(Function &F) {
93 // Because of PR962, we don't TRE dynamic allocas.
94 return llvm::all_of(instructions(F), [](Instruction &I) {
95 auto *AI = dyn_cast<AllocaInst>(&I);
96 return !AI || AI->isStaticAlloca();
97 });
98 }
99
100 namespace {
101 struct AllocaDerivedValueTracker {
102 // Start at a root value and walk its use-def chain to mark calls that use the
103 // value or a derived value in AllocaUsers, and places where it may escape in
104 // EscapePoints.
walk__anon661e55750211::AllocaDerivedValueTracker105 void walk(Value *Root) {
106 SmallVector<Use *, 32> Worklist;
107 SmallPtrSet<Use *, 32> Visited;
108
109 auto AddUsesToWorklist = [&](Value *V) {
110 for (auto &U : V->uses()) {
111 if (!Visited.insert(&U).second)
112 continue;
113 Worklist.push_back(&U);
114 }
115 };
116
117 AddUsesToWorklist(Root);
118
119 while (!Worklist.empty()) {
120 Use *U = Worklist.pop_back_val();
121 Instruction *I = cast<Instruction>(U->getUser());
122
123 switch (I->getOpcode()) {
124 case Instruction::Call:
125 case Instruction::Invoke: {
126 CallSite CS(I);
127 bool IsNocapture =
128 CS.isDataOperand(U) && CS.doesNotCapture(CS.getDataOperandNo(U));
129 callUsesLocalStack(CS, IsNocapture);
130 if (IsNocapture) {
131 // If the alloca-derived argument is passed in as nocapture, then it
132 // can't propagate to the call's return. That would be capturing.
133 continue;
134 }
135 break;
136 }
137 case Instruction::Load: {
138 // The result of a load is not alloca-derived (unless an alloca has
139 // otherwise escaped, but this is a local analysis).
140 continue;
141 }
142 case Instruction::Store: {
143 if (U->getOperandNo() == 0)
144 EscapePoints.insert(I);
145 continue; // Stores have no users to analyze.
146 }
147 case Instruction::BitCast:
148 case Instruction::GetElementPtr:
149 case Instruction::PHI:
150 case Instruction::Select:
151 case Instruction::AddrSpaceCast:
152 break;
153 default:
154 EscapePoints.insert(I);
155 break;
156 }
157
158 AddUsesToWorklist(I);
159 }
160 }
161
callUsesLocalStack__anon661e55750211::AllocaDerivedValueTracker162 void callUsesLocalStack(CallSite CS, bool IsNocapture) {
163 // Add it to the list of alloca users.
164 AllocaUsers.insert(CS.getInstruction());
165
166 // If it's nocapture then it can't capture this alloca.
167 if (IsNocapture)
168 return;
169
170 // If it can write to memory, it can leak the alloca value.
171 if (!CS.onlyReadsMemory())
172 EscapePoints.insert(CS.getInstruction());
173 }
174
175 SmallPtrSet<Instruction *, 32> AllocaUsers;
176 SmallPtrSet<Instruction *, 32> EscapePoints;
177 };
178 }
179
markTails(Function & F,bool & AllCallsAreTailCalls,OptimizationRemarkEmitter * ORE)180 static bool markTails(Function &F, bool &AllCallsAreTailCalls,
181 OptimizationRemarkEmitter *ORE) {
182 if (F.callsFunctionThatReturnsTwice())
183 return false;
184 AllCallsAreTailCalls = true;
185
186 // The local stack holds all alloca instructions and all byval arguments.
187 AllocaDerivedValueTracker Tracker;
188 for (Argument &Arg : F.args()) {
189 if (Arg.hasByValAttr())
190 Tracker.walk(&Arg);
191 }
192 for (auto &BB : F) {
193 for (auto &I : BB)
194 if (AllocaInst *AI = dyn_cast<AllocaInst>(&I))
195 Tracker.walk(AI);
196 }
197
198 bool Modified = false;
199
200 // Track whether a block is reachable after an alloca has escaped. Blocks that
201 // contain the escaping instruction will be marked as being visited without an
202 // escaped alloca, since that is how the block began.
203 enum VisitType {
204 UNVISITED,
205 UNESCAPED,
206 ESCAPED
207 };
208 DenseMap<BasicBlock *, VisitType> Visited;
209
210 // We propagate the fact that an alloca has escaped from block to successor.
211 // Visit the blocks that are propagating the escapedness first. To do this, we
212 // maintain two worklists.
213 SmallVector<BasicBlock *, 32> WorklistUnescaped, WorklistEscaped;
214
215 // We may enter a block and visit it thinking that no alloca has escaped yet,
216 // then see an escape point and go back around a loop edge and come back to
217 // the same block twice. Because of this, we defer setting tail on calls when
218 // we first encounter them in a block. Every entry in this list does not
219 // statically use an alloca via use-def chain analysis, but may find an alloca
220 // through other means if the block turns out to be reachable after an escape
221 // point.
222 SmallVector<CallInst *, 32> DeferredTails;
223
224 BasicBlock *BB = &F.getEntryBlock();
225 VisitType Escaped = UNESCAPED;
226 do {
227 for (auto &I : *BB) {
228 if (Tracker.EscapePoints.count(&I))
229 Escaped = ESCAPED;
230
231 CallInst *CI = dyn_cast<CallInst>(&I);
232 if (!CI || CI->isTailCall() || isa<DbgInfoIntrinsic>(&I))
233 continue;
234
235 bool IsNoTail = CI->isNoTailCall() || CI->hasOperandBundles();
236
237 if (!IsNoTail && CI->doesNotAccessMemory()) {
238 // A call to a readnone function whose arguments are all things computed
239 // outside this function can be marked tail. Even if you stored the
240 // alloca address into a global, a readnone function can't load the
241 // global anyhow.
242 //
243 // Note that this runs whether we know an alloca has escaped or not. If
244 // it has, then we can't trust Tracker.AllocaUsers to be accurate.
245 bool SafeToTail = true;
246 for (auto &Arg : CI->arg_operands()) {
247 if (isa<Constant>(Arg.getUser()))
248 continue;
249 if (Argument *A = dyn_cast<Argument>(Arg.getUser()))
250 if (!A->hasByValAttr())
251 continue;
252 SafeToTail = false;
253 break;
254 }
255 if (SafeToTail) {
256 using namespace ore;
257 ORE->emit([&]() {
258 return OptimizationRemark(DEBUG_TYPE, "tailcall-readnone", CI)
259 << "marked as tail call candidate (readnone)";
260 });
261 CI->setTailCall();
262 Modified = true;
263 continue;
264 }
265 }
266
267 if (!IsNoTail && Escaped == UNESCAPED && !Tracker.AllocaUsers.count(CI)) {
268 DeferredTails.push_back(CI);
269 } else {
270 AllCallsAreTailCalls = false;
271 }
272 }
273
274 for (auto *SuccBB : make_range(succ_begin(BB), succ_end(BB))) {
275 auto &State = Visited[SuccBB];
276 if (State < Escaped) {
277 State = Escaped;
278 if (State == ESCAPED)
279 WorklistEscaped.push_back(SuccBB);
280 else
281 WorklistUnescaped.push_back(SuccBB);
282 }
283 }
284
285 if (!WorklistEscaped.empty()) {
286 BB = WorklistEscaped.pop_back_val();
287 Escaped = ESCAPED;
288 } else {
289 BB = nullptr;
290 while (!WorklistUnescaped.empty()) {
291 auto *NextBB = WorklistUnescaped.pop_back_val();
292 if (Visited[NextBB] == UNESCAPED) {
293 BB = NextBB;
294 Escaped = UNESCAPED;
295 break;
296 }
297 }
298 }
299 } while (BB);
300
301 for (CallInst *CI : DeferredTails) {
302 if (Visited[CI->getParent()] != ESCAPED) {
303 // If the escape point was part way through the block, calls after the
304 // escape point wouldn't have been put into DeferredTails.
305 LLVM_DEBUG(dbgs() << "Marked as tail call candidate: " << *CI << "\n");
306 CI->setTailCall();
307 Modified = true;
308 } else {
309 AllCallsAreTailCalls = false;
310 }
311 }
312
313 return Modified;
314 }
315
316 /// Return true if it is safe to move the specified
317 /// instruction from after the call to before the call, assuming that all
318 /// instructions between the call and this instruction are movable.
319 ///
canMoveAboveCall(Instruction * I,CallInst * CI,AliasAnalysis * AA)320 static bool canMoveAboveCall(Instruction *I, CallInst *CI, AliasAnalysis *AA) {
321 // FIXME: We can move load/store/call/free instructions above the call if the
322 // call does not mod/ref the memory location being processed.
323 if (I->mayHaveSideEffects()) // This also handles volatile loads.
324 return false;
325
326 if (LoadInst *L = dyn_cast<LoadInst>(I)) {
327 // Loads may always be moved above calls without side effects.
328 if (CI->mayHaveSideEffects()) {
329 // Non-volatile loads may be moved above a call with side effects if it
330 // does not write to memory and the load provably won't trap.
331 // Writes to memory only matter if they may alias the pointer
332 // being loaded from.
333 const DataLayout &DL = L->getModule()->getDataLayout();
334 if (isModSet(AA->getModRefInfo(CI, MemoryLocation::get(L))) ||
335 !isSafeToLoadUnconditionally(L->getPointerOperand(),
336 L->getAlignment(), DL, L))
337 return false;
338 }
339 }
340
341 // Otherwise, if this is a side-effect free instruction, check to make sure
342 // that it does not use the return value of the call. If it doesn't use the
343 // return value of the call, it must only use things that are defined before
344 // the call, or movable instructions between the call and the instruction
345 // itself.
346 return !is_contained(I->operands(), CI);
347 }
348
349 /// Return true if the specified value is the same when the return would exit
350 /// as it was when the initial iteration of the recursive function was executed.
351 ///
352 /// We currently handle static constants and arguments that are not modified as
353 /// part of the recursion.
isDynamicConstant(Value * V,CallInst * CI,ReturnInst * RI)354 static bool isDynamicConstant(Value *V, CallInst *CI, ReturnInst *RI) {
355 if (isa<Constant>(V)) return true; // Static constants are always dyn consts
356
357 // Check to see if this is an immutable argument, if so, the value
358 // will be available to initialize the accumulator.
359 if (Argument *Arg = dyn_cast<Argument>(V)) {
360 // Figure out which argument number this is...
361 unsigned ArgNo = 0;
362 Function *F = CI->getParent()->getParent();
363 for (Function::arg_iterator AI = F->arg_begin(); &*AI != Arg; ++AI)
364 ++ArgNo;
365
366 // If we are passing this argument into call as the corresponding
367 // argument operand, then the argument is dynamically constant.
368 // Otherwise, we cannot transform this function safely.
369 if (CI->getArgOperand(ArgNo) == Arg)
370 return true;
371 }
372
373 // Switch cases are always constant integers. If the value is being switched
374 // on and the return is only reachable from one of its cases, it's
375 // effectively constant.
376 if (BasicBlock *UniquePred = RI->getParent()->getUniquePredecessor())
377 if (SwitchInst *SI = dyn_cast<SwitchInst>(UniquePred->getTerminator()))
378 if (SI->getCondition() == V)
379 return SI->getDefaultDest() != RI->getParent();
380
381 // Not a constant or immutable argument, we can't safely transform.
382 return false;
383 }
384
385 /// Check to see if the function containing the specified tail call consistently
386 /// returns the same runtime-constant value at all exit points except for
387 /// IgnoreRI. If so, return the returned value.
getCommonReturnValue(ReturnInst * IgnoreRI,CallInst * CI)388 static Value *getCommonReturnValue(ReturnInst *IgnoreRI, CallInst *CI) {
389 Function *F = CI->getParent()->getParent();
390 Value *ReturnedValue = nullptr;
391
392 for (BasicBlock &BBI : *F) {
393 ReturnInst *RI = dyn_cast<ReturnInst>(BBI.getTerminator());
394 if (RI == nullptr || RI == IgnoreRI) continue;
395
396 // We can only perform this transformation if the value returned is
397 // evaluatable at the start of the initial invocation of the function,
398 // instead of at the end of the evaluation.
399 //
400 Value *RetOp = RI->getOperand(0);
401 if (!isDynamicConstant(RetOp, CI, RI))
402 return nullptr;
403
404 if (ReturnedValue && RetOp != ReturnedValue)
405 return nullptr; // Cannot transform if differing values are returned.
406 ReturnedValue = RetOp;
407 }
408 return ReturnedValue;
409 }
410
411 /// If the specified instruction can be transformed using accumulator recursion
412 /// elimination, return the constant which is the start of the accumulator
413 /// value. Otherwise return null.
canTransformAccumulatorRecursion(Instruction * I,CallInst * CI)414 static Value *canTransformAccumulatorRecursion(Instruction *I, CallInst *CI) {
415 if (!I->isAssociative() || !I->isCommutative()) return nullptr;
416 assert(I->getNumOperands() == 2 &&
417 "Associative/commutative operations should have 2 args!");
418
419 // Exactly one operand should be the result of the call instruction.
420 if ((I->getOperand(0) == CI && I->getOperand(1) == CI) ||
421 (I->getOperand(0) != CI && I->getOperand(1) != CI))
422 return nullptr;
423
424 // The only user of this instruction we allow is a single return instruction.
425 if (!I->hasOneUse() || !isa<ReturnInst>(I->user_back()))
426 return nullptr;
427
428 // Ok, now we have to check all of the other return instructions in this
429 // function. If they return non-constants or differing values, then we cannot
430 // transform the function safely.
431 return getCommonReturnValue(cast<ReturnInst>(I->user_back()), CI);
432 }
433
firstNonDbg(BasicBlock::iterator I)434 static Instruction *firstNonDbg(BasicBlock::iterator I) {
435 while (isa<DbgInfoIntrinsic>(I))
436 ++I;
437 return &*I;
438 }
439
findTRECandidate(Instruction * TI,bool CannotTailCallElimCallsMarkedTail,const TargetTransformInfo * TTI)440 static CallInst *findTRECandidate(Instruction *TI,
441 bool CannotTailCallElimCallsMarkedTail,
442 const TargetTransformInfo *TTI) {
443 BasicBlock *BB = TI->getParent();
444 Function *F = BB->getParent();
445
446 if (&BB->front() == TI) // Make sure there is something before the terminator.
447 return nullptr;
448
449 // Scan backwards from the return, checking to see if there is a tail call in
450 // this block. If so, set CI to it.
451 CallInst *CI = nullptr;
452 BasicBlock::iterator BBI(TI);
453 while (true) {
454 CI = dyn_cast<CallInst>(BBI);
455 if (CI && CI->getCalledFunction() == F)
456 break;
457
458 if (BBI == BB->begin())
459 return nullptr; // Didn't find a potential tail call.
460 --BBI;
461 }
462
463 // If this call is marked as a tail call, and if there are dynamic allocas in
464 // the function, we cannot perform this optimization.
465 if (CI->isTailCall() && CannotTailCallElimCallsMarkedTail)
466 return nullptr;
467
468 // As a special case, detect code like this:
469 // double fabs(double f) { return __builtin_fabs(f); } // a 'fabs' call
470 // and disable this xform in this case, because the code generator will
471 // lower the call to fabs into inline code.
472 if (BB == &F->getEntryBlock() &&
473 firstNonDbg(BB->front().getIterator()) == CI &&
474 firstNonDbg(std::next(BB->begin())) == TI && CI->getCalledFunction() &&
475 !TTI->isLoweredToCall(CI->getCalledFunction())) {
476 // A single-block function with just a call and a return. Check that
477 // the arguments match.
478 CallSite::arg_iterator I = CallSite(CI).arg_begin(),
479 E = CallSite(CI).arg_end();
480 Function::arg_iterator FI = F->arg_begin(),
481 FE = F->arg_end();
482 for (; I != E && FI != FE; ++I, ++FI)
483 if (*I != &*FI) break;
484 if (I == E && FI == FE)
485 return nullptr;
486 }
487
488 return CI;
489 }
490
eliminateRecursiveTailCall(CallInst * CI,ReturnInst * Ret,BasicBlock * & OldEntry,bool & TailCallsAreMarkedTail,SmallVectorImpl<PHINode * > & ArgumentPHIs,AliasAnalysis * AA,OptimizationRemarkEmitter * ORE)491 static bool eliminateRecursiveTailCall(CallInst *CI, ReturnInst *Ret,
492 BasicBlock *&OldEntry,
493 bool &TailCallsAreMarkedTail,
494 SmallVectorImpl<PHINode *> &ArgumentPHIs,
495 AliasAnalysis *AA,
496 OptimizationRemarkEmitter *ORE) {
497 // If we are introducing accumulator recursion to eliminate operations after
498 // the call instruction that are both associative and commutative, the initial
499 // value for the accumulator is placed in this variable. If this value is set
500 // then we actually perform accumulator recursion elimination instead of
501 // simple tail recursion elimination. If the operation is an LLVM instruction
502 // (eg: "add") then it is recorded in AccumulatorRecursionInstr. If not, then
503 // we are handling the case when the return instruction returns a constant C
504 // which is different to the constant returned by other return instructions
505 // (which is recorded in AccumulatorRecursionEliminationInitVal). This is a
506 // special case of accumulator recursion, the operation being "return C".
507 Value *AccumulatorRecursionEliminationInitVal = nullptr;
508 Instruction *AccumulatorRecursionInstr = nullptr;
509
510 // Ok, we found a potential tail call. We can currently only transform the
511 // tail call if all of the instructions between the call and the return are
512 // movable to above the call itself, leaving the call next to the return.
513 // Check that this is the case now.
514 BasicBlock::iterator BBI(CI);
515 for (++BBI; &*BBI != Ret; ++BBI) {
516 if (canMoveAboveCall(&*BBI, CI, AA))
517 continue;
518
519 // If we can't move the instruction above the call, it might be because it
520 // is an associative and commutative operation that could be transformed
521 // using accumulator recursion elimination. Check to see if this is the
522 // case, and if so, remember the initial accumulator value for later.
523 if ((AccumulatorRecursionEliminationInitVal =
524 canTransformAccumulatorRecursion(&*BBI, CI))) {
525 // Yes, this is accumulator recursion. Remember which instruction
526 // accumulates.
527 AccumulatorRecursionInstr = &*BBI;
528 } else {
529 return false; // Otherwise, we cannot eliminate the tail recursion!
530 }
531 }
532
533 // We can only transform call/return pairs that either ignore the return value
534 // of the call and return void, ignore the value of the call and return a
535 // constant, return the value returned by the tail call, or that are being
536 // accumulator recursion variable eliminated.
537 if (Ret->getNumOperands() == 1 && Ret->getReturnValue() != CI &&
538 !isa<UndefValue>(Ret->getReturnValue()) &&
539 AccumulatorRecursionEliminationInitVal == nullptr &&
540 !getCommonReturnValue(nullptr, CI)) {
541 // One case remains that we are able to handle: the current return
542 // instruction returns a constant, and all other return instructions
543 // return a different constant.
544 if (!isDynamicConstant(Ret->getReturnValue(), CI, Ret))
545 return false; // Current return instruction does not return a constant.
546 // Check that all other return instructions return a common constant. If
547 // so, record it in AccumulatorRecursionEliminationInitVal.
548 AccumulatorRecursionEliminationInitVal = getCommonReturnValue(Ret, CI);
549 if (!AccumulatorRecursionEliminationInitVal)
550 return false;
551 }
552
553 BasicBlock *BB = Ret->getParent();
554 Function *F = BB->getParent();
555
556 using namespace ore;
557 ORE->emit([&]() {
558 return OptimizationRemark(DEBUG_TYPE, "tailcall-recursion", CI)
559 << "transforming tail recursion into loop";
560 });
561
562 // OK! We can transform this tail call. If this is the first one found,
563 // create the new entry block, allowing us to branch back to the old entry.
564 if (!OldEntry) {
565 OldEntry = &F->getEntryBlock();
566 BasicBlock *NewEntry = BasicBlock::Create(F->getContext(), "", F, OldEntry);
567 NewEntry->takeName(OldEntry);
568 OldEntry->setName("tailrecurse");
569 BranchInst::Create(OldEntry, NewEntry);
570
571 // If this tail call is marked 'tail' and if there are any allocas in the
572 // entry block, move them up to the new entry block.
573 TailCallsAreMarkedTail = CI->isTailCall();
574 if (TailCallsAreMarkedTail)
575 // Move all fixed sized allocas from OldEntry to NewEntry.
576 for (BasicBlock::iterator OEBI = OldEntry->begin(), E = OldEntry->end(),
577 NEBI = NewEntry->begin(); OEBI != E; )
578 if (AllocaInst *AI = dyn_cast<AllocaInst>(OEBI++))
579 if (isa<ConstantInt>(AI->getArraySize()))
580 AI->moveBefore(&*NEBI);
581
582 // Now that we have created a new block, which jumps to the entry
583 // block, insert a PHI node for each argument of the function.
584 // For now, we initialize each PHI to only have the real arguments
585 // which are passed in.
586 Instruction *InsertPos = &OldEntry->front();
587 for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end();
588 I != E; ++I) {
589 PHINode *PN = PHINode::Create(I->getType(), 2,
590 I->getName() + ".tr", InsertPos);
591 I->replaceAllUsesWith(PN); // Everyone use the PHI node now!
592 PN->addIncoming(&*I, NewEntry);
593 ArgumentPHIs.push_back(PN);
594 }
595 }
596
597 // If this function has self recursive calls in the tail position where some
598 // are marked tail and some are not, only transform one flavor or another. We
599 // have to choose whether we move allocas in the entry block to the new entry
600 // block or not, so we can't make a good choice for both. NOTE: We could do
601 // slightly better here in the case that the function has no entry block
602 // allocas.
603 if (TailCallsAreMarkedTail && !CI->isTailCall())
604 return false;
605
606 // Ok, now that we know we have a pseudo-entry block WITH all of the
607 // required PHI nodes, add entries into the PHI node for the actual
608 // parameters passed into the tail-recursive call.
609 for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i)
610 ArgumentPHIs[i]->addIncoming(CI->getArgOperand(i), BB);
611
612 // If we are introducing an accumulator variable to eliminate the recursion,
613 // do so now. Note that we _know_ that no subsequent tail recursion
614 // eliminations will happen on this function because of the way the
615 // accumulator recursion predicate is set up.
616 //
617 if (AccumulatorRecursionEliminationInitVal) {
618 Instruction *AccRecInstr = AccumulatorRecursionInstr;
619 // Start by inserting a new PHI node for the accumulator.
620 pred_iterator PB = pred_begin(OldEntry), PE = pred_end(OldEntry);
621 PHINode *AccPN = PHINode::Create(
622 AccumulatorRecursionEliminationInitVal->getType(),
623 std::distance(PB, PE) + 1, "accumulator.tr", &OldEntry->front());
624
625 // Loop over all of the predecessors of the tail recursion block. For the
626 // real entry into the function we seed the PHI with the initial value,
627 // computed earlier. For any other existing branches to this block (due to
628 // other tail recursions eliminated) the accumulator is not modified.
629 // Because we haven't added the branch in the current block to OldEntry yet,
630 // it will not show up as a predecessor.
631 for (pred_iterator PI = PB; PI != PE; ++PI) {
632 BasicBlock *P = *PI;
633 if (P == &F->getEntryBlock())
634 AccPN->addIncoming(AccumulatorRecursionEliminationInitVal, P);
635 else
636 AccPN->addIncoming(AccPN, P);
637 }
638
639 if (AccRecInstr) {
640 // Add an incoming argument for the current block, which is computed by
641 // our associative and commutative accumulator instruction.
642 AccPN->addIncoming(AccRecInstr, BB);
643
644 // Next, rewrite the accumulator recursion instruction so that it does not
645 // use the result of the call anymore, instead, use the PHI node we just
646 // inserted.
647 AccRecInstr->setOperand(AccRecInstr->getOperand(0) != CI, AccPN);
648 } else {
649 // Add an incoming argument for the current block, which is just the
650 // constant returned by the current return instruction.
651 AccPN->addIncoming(Ret->getReturnValue(), BB);
652 }
653
654 // Finally, rewrite any return instructions in the program to return the PHI
655 // node instead of the "initval" that they do currently. This loop will
656 // actually rewrite the return value we are destroying, but that's ok.
657 for (BasicBlock &BBI : *F)
658 if (ReturnInst *RI = dyn_cast<ReturnInst>(BBI.getTerminator()))
659 RI->setOperand(0, AccPN);
660 ++NumAccumAdded;
661 }
662
663 // Now that all of the PHI nodes are in place, remove the call and
664 // ret instructions, replacing them with an unconditional branch.
665 BranchInst *NewBI = BranchInst::Create(OldEntry, Ret);
666 NewBI->setDebugLoc(CI->getDebugLoc());
667
668 BB->getInstList().erase(Ret); // Remove return.
669 BB->getInstList().erase(CI); // Remove call.
670 ++NumEliminated;
671 return true;
672 }
673
foldReturnAndProcessPred(BasicBlock * BB,ReturnInst * Ret,BasicBlock * & OldEntry,bool & TailCallsAreMarkedTail,SmallVectorImpl<PHINode * > & ArgumentPHIs,bool CannotTailCallElimCallsMarkedTail,const TargetTransformInfo * TTI,AliasAnalysis * AA,OptimizationRemarkEmitter * ORE)674 static bool foldReturnAndProcessPred(
675 BasicBlock *BB, ReturnInst *Ret, BasicBlock *&OldEntry,
676 bool &TailCallsAreMarkedTail, SmallVectorImpl<PHINode *> &ArgumentPHIs,
677 bool CannotTailCallElimCallsMarkedTail, const TargetTransformInfo *TTI,
678 AliasAnalysis *AA, OptimizationRemarkEmitter *ORE) {
679 bool Change = false;
680
681 // Make sure this block is a trivial return block.
682 assert(BB->getFirstNonPHIOrDbg() == Ret &&
683 "Trying to fold non-trivial return block");
684
685 // If the return block contains nothing but the return and PHI's,
686 // there might be an opportunity to duplicate the return in its
687 // predecessors and perform TRE there. Look for predecessors that end
688 // in unconditional branch and recursive call(s).
689 SmallVector<BranchInst*, 8> UncondBranchPreds;
690 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
691 BasicBlock *Pred = *PI;
692 TerminatorInst *PTI = Pred->getTerminator();
693 if (BranchInst *BI = dyn_cast<BranchInst>(PTI))
694 if (BI->isUnconditional())
695 UncondBranchPreds.push_back(BI);
696 }
697
698 while (!UncondBranchPreds.empty()) {
699 BranchInst *BI = UncondBranchPreds.pop_back_val();
700 BasicBlock *Pred = BI->getParent();
701 if (CallInst *CI = findTRECandidate(BI, CannotTailCallElimCallsMarkedTail, TTI)){
702 LLVM_DEBUG(dbgs() << "FOLDING: " << *BB
703 << "INTO UNCOND BRANCH PRED: " << *Pred);
704 ReturnInst *RI = FoldReturnIntoUncondBranch(Ret, BB, Pred);
705
706 // Cleanup: if all predecessors of BB have been eliminated by
707 // FoldReturnIntoUncondBranch, delete it. It is important to empty it,
708 // because the ret instruction in there is still using a value which
709 // eliminateRecursiveTailCall will attempt to remove.
710 if (!BB->hasAddressTaken() && pred_begin(BB) == pred_end(BB))
711 BB->eraseFromParent();
712
713 eliminateRecursiveTailCall(CI, RI, OldEntry, TailCallsAreMarkedTail,
714 ArgumentPHIs, AA, ORE);
715 ++NumRetDuped;
716 Change = true;
717 }
718 }
719
720 return Change;
721 }
722
processReturningBlock(ReturnInst * Ret,BasicBlock * & OldEntry,bool & TailCallsAreMarkedTail,SmallVectorImpl<PHINode * > & ArgumentPHIs,bool CannotTailCallElimCallsMarkedTail,const TargetTransformInfo * TTI,AliasAnalysis * AA,OptimizationRemarkEmitter * ORE)723 static bool processReturningBlock(ReturnInst *Ret, BasicBlock *&OldEntry,
724 bool &TailCallsAreMarkedTail,
725 SmallVectorImpl<PHINode *> &ArgumentPHIs,
726 bool CannotTailCallElimCallsMarkedTail,
727 const TargetTransformInfo *TTI,
728 AliasAnalysis *AA,
729 OptimizationRemarkEmitter *ORE) {
730 CallInst *CI = findTRECandidate(Ret, CannotTailCallElimCallsMarkedTail, TTI);
731 if (!CI)
732 return false;
733
734 return eliminateRecursiveTailCall(CI, Ret, OldEntry, TailCallsAreMarkedTail,
735 ArgumentPHIs, AA, ORE);
736 }
737
eliminateTailRecursion(Function & F,const TargetTransformInfo * TTI,AliasAnalysis * AA,OptimizationRemarkEmitter * ORE)738 static bool eliminateTailRecursion(Function &F, const TargetTransformInfo *TTI,
739 AliasAnalysis *AA,
740 OptimizationRemarkEmitter *ORE) {
741 if (F.getFnAttribute("disable-tail-calls").getValueAsString() == "true")
742 return false;
743
744 bool MadeChange = false;
745 bool AllCallsAreTailCalls = false;
746 MadeChange |= markTails(F, AllCallsAreTailCalls, ORE);
747 if (!AllCallsAreTailCalls)
748 return MadeChange;
749
750 // If this function is a varargs function, we won't be able to PHI the args
751 // right, so don't even try to convert it...
752 if (F.getFunctionType()->isVarArg())
753 return false;
754
755 BasicBlock *OldEntry = nullptr;
756 bool TailCallsAreMarkedTail = false;
757 SmallVector<PHINode*, 8> ArgumentPHIs;
758
759 // If false, we cannot perform TRE on tail calls marked with the 'tail'
760 // attribute, because doing so would cause the stack size to increase (real
761 // TRE would deallocate variable sized allocas, TRE doesn't).
762 bool CanTRETailMarkedCall = canTRE(F);
763
764 // Change any tail recursive calls to loops.
765 //
766 // FIXME: The code generator produces really bad code when an 'escaping
767 // alloca' is changed from being a static alloca to being a dynamic alloca.
768 // Until this is resolved, disable this transformation if that would ever
769 // happen. This bug is PR962.
770 for (Function::iterator BBI = F.begin(), E = F.end(); BBI != E; /*in loop*/) {
771 BasicBlock *BB = &*BBI++; // foldReturnAndProcessPred may delete BB.
772 if (ReturnInst *Ret = dyn_cast<ReturnInst>(BB->getTerminator())) {
773 bool Change = processReturningBlock(Ret, OldEntry, TailCallsAreMarkedTail,
774 ArgumentPHIs, !CanTRETailMarkedCall,
775 TTI, AA, ORE);
776 if (!Change && BB->getFirstNonPHIOrDbg() == Ret)
777 Change = foldReturnAndProcessPred(BB, Ret, OldEntry,
778 TailCallsAreMarkedTail, ArgumentPHIs,
779 !CanTRETailMarkedCall, TTI, AA, ORE);
780 MadeChange |= Change;
781 }
782 }
783
784 // If we eliminated any tail recursions, it's possible that we inserted some
785 // silly PHI nodes which just merge an initial value (the incoming operand)
786 // with themselves. Check to see if we did and clean up our mess if so. This
787 // occurs when a function passes an argument straight through to its tail
788 // call.
789 for (PHINode *PN : ArgumentPHIs) {
790 // If the PHI Node is a dynamic constant, replace it with the value it is.
791 if (Value *PNV = SimplifyInstruction(PN, F.getParent()->getDataLayout())) {
792 PN->replaceAllUsesWith(PNV);
793 PN->eraseFromParent();
794 }
795 }
796
797 return MadeChange;
798 }
799
800 namespace {
801 struct TailCallElim : public FunctionPass {
802 static char ID; // Pass identification, replacement for typeid
TailCallElim__anon661e55750611::TailCallElim803 TailCallElim() : FunctionPass(ID) {
804 initializeTailCallElimPass(*PassRegistry::getPassRegistry());
805 }
806
getAnalysisUsage__anon661e55750611::TailCallElim807 void getAnalysisUsage(AnalysisUsage &AU) const override {
808 AU.addRequired<TargetTransformInfoWrapperPass>();
809 AU.addRequired<AAResultsWrapperPass>();
810 AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
811 AU.addPreserved<GlobalsAAWrapperPass>();
812 }
813
runOnFunction__anon661e55750611::TailCallElim814 bool runOnFunction(Function &F) override {
815 if (skipFunction(F))
816 return false;
817
818 return eliminateTailRecursion(
819 F, &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F),
820 &getAnalysis<AAResultsWrapperPass>().getAAResults(),
821 &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE());
822 }
823 };
824 }
825
826 char TailCallElim::ID = 0;
827 INITIALIZE_PASS_BEGIN(TailCallElim, "tailcallelim", "Tail Call Elimination",
828 false, false)
INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)829 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
830 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
831 INITIALIZE_PASS_END(TailCallElim, "tailcallelim", "Tail Call Elimination",
832 false, false)
833
834 // Public interface to the TailCallElimination pass
835 FunctionPass *llvm::createTailCallEliminationPass() {
836 return new TailCallElim();
837 }
838
run(Function & F,FunctionAnalysisManager & AM)839 PreservedAnalyses TailCallElimPass::run(Function &F,
840 FunctionAnalysisManager &AM) {
841
842 TargetTransformInfo &TTI = AM.getResult<TargetIRAnalysis>(F);
843 AliasAnalysis &AA = AM.getResult<AAManager>(F);
844 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
845
846 bool Changed = eliminateTailRecursion(F, &TTI, &AA, &ORE);
847
848 if (!Changed)
849 return PreservedAnalyses::all();
850 PreservedAnalyses PA;
851 PA.preserve<GlobalsAA>();
852 return PA;
853 }
854