1 //===- LowerInvoke.cpp - Eliminate Invoke & Unwind instructions -----------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This transformation is designed for use by code generators which do not yet
11 // support stack unwinding. This pass supports two models of exception handling
12 // lowering, the 'cheap' support and the 'expensive' support.
13 //
14 // 'Cheap' exception handling support gives the program the ability to execute
15 // any program which does not "throw an exception", by turning 'invoke'
16 // instructions into calls and by turning 'unwind' instructions into calls to
17 // abort(). If the program does dynamically use the unwind instruction, the
18 // program will print a message then abort.
19 //
20 // 'Expensive' exception handling support gives the full exception handling
21 // support to the program at the cost of making the 'invoke' instruction
22 // really expensive. It basically inserts setjmp/longjmp calls to emulate the
23 // exception handling as necessary.
24 //
25 // Because the 'expensive' support slows down programs a lot, and EH is only
26 // used for a subset of the programs, it must be specifically enabled by an
27 // option.
28 //
29 // Note that after this pass runs the CFG is not entirely accurate (exceptional
30 // control flow edges are not correct anymore) so only very simple things should
31 // be done after the lowerinvoke pass has run (like generation of native code).
32 // This should not be used as a general purpose "my LLVM-to-LLVM pass doesn't
33 // support the invoke instruction yet" lowering pass.
34 //
35 //===----------------------------------------------------------------------===//
36
37 #define DEBUG_TYPE "lowerinvoke"
38 #include "llvm/Transforms/Scalar.h"
39 #include "llvm/ADT/SmallVector.h"
40 #include "llvm/ADT/Statistic.h"
41 #include "llvm/IR/Constants.h"
42 #include "llvm/IR/DerivedTypes.h"
43 #include "llvm/IR/Instructions.h"
44 #include "llvm/IR/Intrinsics.h"
45 #include "llvm/IR/LLVMContext.h"
46 #include "llvm/IR/Module.h"
47 #include "llvm/Pass.h"
48 #include "llvm/Support/CommandLine.h"
49 #include "llvm/Target/TargetLowering.h"
50 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
51 #include "llvm/Transforms/Utils/Local.h"
52 #include <csetjmp>
53 #include <set>
54 using namespace llvm;
55
56 STATISTIC(NumInvokes, "Number of invokes replaced");
57 STATISTIC(NumSpilled, "Number of registers live across unwind edges");
58
59 static cl::opt<bool> ExpensiveEHSupport("enable-correct-eh-support",
60 cl::desc("Make the -lowerinvoke pass insert expensive, but correct, EH code"));
61
62 namespace {
63 class LowerInvoke : public FunctionPass {
64 const TargetMachine *TM;
65
66 // Used for both models.
67 Constant *AbortFn;
68
69 // Used for expensive EH support.
70 StructType *JBLinkTy;
71 GlobalVariable *JBListHead;
72 Constant *SetJmpFn, *LongJmpFn, *StackSaveFn, *StackRestoreFn;
73 bool useExpensiveEHSupport;
74
75 public:
76 static char ID; // Pass identification, replacement for typeid
LowerInvoke(const TargetMachine * TM=0,bool useExpensiveEHSupport=ExpensiveEHSupport)77 explicit LowerInvoke(const TargetMachine *TM = 0,
78 bool useExpensiveEHSupport = ExpensiveEHSupport)
79 : FunctionPass(ID), TM(TM),
80 useExpensiveEHSupport(useExpensiveEHSupport) {
81 initializeLowerInvokePass(*PassRegistry::getPassRegistry());
82 }
83 bool doInitialization(Module &M);
84 bool runOnFunction(Function &F);
85
getAnalysisUsage(AnalysisUsage & AU) const86 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
87 // This is a cluster of orthogonal Transforms
88 AU.addPreserved("mem2reg");
89 AU.addPreservedID(LowerSwitchID);
90 }
91
92 private:
93 bool insertCheapEHSupport(Function &F);
94 void splitLiveRangesLiveAcrossInvokes(SmallVectorImpl<InvokeInst*>&Invokes);
95 void rewriteExpensiveInvoke(InvokeInst *II, unsigned InvokeNo,
96 AllocaInst *InvokeNum, AllocaInst *StackPtr,
97 SwitchInst *CatchSwitch);
98 bool insertExpensiveEHSupport(Function &F);
99 };
100 }
101
102 char LowerInvoke::ID = 0;
103 INITIALIZE_PASS(LowerInvoke, "lowerinvoke",
104 "Lower invoke and unwind, for unwindless code generators",
105 false, false)
106
107 char &llvm::LowerInvokePassID = LowerInvoke::ID;
108
109 // Public Interface To the LowerInvoke pass.
createLowerInvokePass(const TargetMachine * TM,bool useExpensiveEHSupport)110 FunctionPass *llvm::createLowerInvokePass(const TargetMachine *TM,
111 bool useExpensiveEHSupport) {
112 return new LowerInvoke(TM, useExpensiveEHSupport || ExpensiveEHSupport);
113 }
114
115 // doInitialization - Make sure that there is a prototype for abort in the
116 // current module.
doInitialization(Module & M)117 bool LowerInvoke::doInitialization(Module &M) {
118 Type *VoidPtrTy = Type::getInt8PtrTy(M.getContext());
119 if (useExpensiveEHSupport) {
120 // Insert a type for the linked list of jump buffers.
121 const TargetLowering *TLI = TM ? TM->getTargetLowering() : 0;
122 unsigned JBSize = TLI ? TLI->getJumpBufSize() : 0;
123 JBSize = JBSize ? JBSize : 200;
124 Type *JmpBufTy = ArrayType::get(VoidPtrTy, JBSize);
125
126 JBLinkTy = StructType::create(M.getContext(), "llvm.sjljeh.jmpbufty");
127 Type *Elts[] = { JmpBufTy, PointerType::getUnqual(JBLinkTy) };
128 JBLinkTy->setBody(Elts);
129
130 Type *PtrJBList = PointerType::getUnqual(JBLinkTy);
131
132 // Now that we've done that, insert the jmpbuf list head global, unless it
133 // already exists.
134 if (!(JBListHead = M.getGlobalVariable("llvm.sjljeh.jblist", PtrJBList))) {
135 JBListHead = new GlobalVariable(M, PtrJBList, false,
136 GlobalValue::LinkOnceAnyLinkage,
137 Constant::getNullValue(PtrJBList),
138 "llvm.sjljeh.jblist");
139 }
140
141 // VisualStudio defines setjmp as _setjmp
142 #if defined(_MSC_VER) && defined(setjmp) && \
143 !defined(setjmp_undefined_for_msvc)
144 # pragma push_macro("setjmp")
145 # undef setjmp
146 # define setjmp_undefined_for_msvc
147 #endif
148
149 SetJmpFn = Intrinsic::getDeclaration(&M, Intrinsic::setjmp);
150
151 #if defined(_MSC_VER) && defined(setjmp_undefined_for_msvc)
152 // let's return it to _setjmp state
153 # pragma pop_macro("setjmp")
154 # undef setjmp_undefined_for_msvc
155 #endif
156
157 LongJmpFn = Intrinsic::getDeclaration(&M, Intrinsic::longjmp);
158 StackSaveFn = Intrinsic::getDeclaration(&M, Intrinsic::stacksave);
159 StackRestoreFn = Intrinsic::getDeclaration(&M, Intrinsic::stackrestore);
160 }
161
162 // We need the 'write' and 'abort' functions for both models.
163 AbortFn = M.getOrInsertFunction("abort", Type::getVoidTy(M.getContext()),
164 (Type *)0);
165 return true;
166 }
167
insertCheapEHSupport(Function & F)168 bool LowerInvoke::insertCheapEHSupport(Function &F) {
169 bool Changed = false;
170 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
171 if (InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator())) {
172 SmallVector<Value*,16> CallArgs(II->op_begin(), II->op_end() - 3);
173 // Insert a normal call instruction...
174 CallInst *NewCall = CallInst::Create(II->getCalledValue(),
175 CallArgs, "", II);
176 NewCall->takeName(II);
177 NewCall->setCallingConv(II->getCallingConv());
178 NewCall->setAttributes(II->getAttributes());
179 NewCall->setDebugLoc(II->getDebugLoc());
180 II->replaceAllUsesWith(NewCall);
181
182 // Insert an unconditional branch to the normal destination.
183 BranchInst::Create(II->getNormalDest(), II);
184
185 // Remove any PHI node entries from the exception destination.
186 II->getUnwindDest()->removePredecessor(BB);
187
188 // Remove the invoke instruction now.
189 BB->getInstList().erase(II);
190
191 ++NumInvokes; Changed = true;
192 }
193 return Changed;
194 }
195
196 /// rewriteExpensiveInvoke - Insert code and hack the function to replace the
197 /// specified invoke instruction with a call.
rewriteExpensiveInvoke(InvokeInst * II,unsigned InvokeNo,AllocaInst * InvokeNum,AllocaInst * StackPtr,SwitchInst * CatchSwitch)198 void LowerInvoke::rewriteExpensiveInvoke(InvokeInst *II, unsigned InvokeNo,
199 AllocaInst *InvokeNum,
200 AllocaInst *StackPtr,
201 SwitchInst *CatchSwitch) {
202 ConstantInt *InvokeNoC = ConstantInt::get(Type::getInt32Ty(II->getContext()),
203 InvokeNo);
204
205 // If the unwind edge has phi nodes, split the edge.
206 if (isa<PHINode>(II->getUnwindDest()->begin())) {
207 SplitCriticalEdge(II, 1, this);
208
209 // If there are any phi nodes left, they must have a single predecessor.
210 while (PHINode *PN = dyn_cast<PHINode>(II->getUnwindDest()->begin())) {
211 PN->replaceAllUsesWith(PN->getIncomingValue(0));
212 PN->eraseFromParent();
213 }
214 }
215
216 // Insert a store of the invoke num before the invoke and store zero into the
217 // location afterward.
218 new StoreInst(InvokeNoC, InvokeNum, true, II); // volatile
219
220 // Insert a store of the stack ptr before the invoke, so we can restore it
221 // later in the exception case.
222 CallInst* StackSaveRet = CallInst::Create(StackSaveFn, "ssret", II);
223 new StoreInst(StackSaveRet, StackPtr, true, II); // volatile
224
225 BasicBlock::iterator NI = II->getNormalDest()->getFirstInsertionPt();
226 // nonvolatile.
227 new StoreInst(Constant::getNullValue(Type::getInt32Ty(II->getContext())),
228 InvokeNum, false, NI);
229
230 Instruction* StackPtrLoad =
231 new LoadInst(StackPtr, "stackptr.restore", true,
232 II->getUnwindDest()->getFirstInsertionPt());
233 CallInst::Create(StackRestoreFn, StackPtrLoad, "")->insertAfter(StackPtrLoad);
234
235 // Add a switch case to our unwind block.
236 CatchSwitch->addCase(InvokeNoC, II->getUnwindDest());
237
238 // Insert a normal call instruction.
239 SmallVector<Value*,16> CallArgs(II->op_begin(), II->op_end() - 3);
240 CallInst *NewCall = CallInst::Create(II->getCalledValue(),
241 CallArgs, "", II);
242 NewCall->takeName(II);
243 NewCall->setCallingConv(II->getCallingConv());
244 NewCall->setAttributes(II->getAttributes());
245 NewCall->setDebugLoc(II->getDebugLoc());
246 II->replaceAllUsesWith(NewCall);
247
248 // Replace the invoke with an uncond branch.
249 BranchInst::Create(II->getNormalDest(), NewCall->getParent());
250 II->eraseFromParent();
251 }
252
253 /// MarkBlocksLiveIn - Insert BB and all of its predescessors into LiveBBs until
254 /// we reach blocks we've already seen.
MarkBlocksLiveIn(BasicBlock * BB,std::set<BasicBlock * > & LiveBBs)255 static void MarkBlocksLiveIn(BasicBlock *BB, std::set<BasicBlock*> &LiveBBs) {
256 if (!LiveBBs.insert(BB).second) return; // already been here.
257
258 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
259 MarkBlocksLiveIn(*PI, LiveBBs);
260 }
261
262 // First thing we need to do is scan the whole function for values that are
263 // live across unwind edges. Each value that is live across an unwind edge
264 // we spill into a stack location, guaranteeing that there is nothing live
265 // across the unwind edge. This process also splits all critical edges
266 // coming out of invoke's.
267 void LowerInvoke::
splitLiveRangesLiveAcrossInvokes(SmallVectorImpl<InvokeInst * > & Invokes)268 splitLiveRangesLiveAcrossInvokes(SmallVectorImpl<InvokeInst*> &Invokes) {
269 // First step, split all critical edges from invoke instructions.
270 for (unsigned i = 0, e = Invokes.size(); i != e; ++i) {
271 InvokeInst *II = Invokes[i];
272 SplitCriticalEdge(II, 0, this);
273 SplitCriticalEdge(II, 1, this);
274 assert(!isa<PHINode>(II->getNormalDest()) &&
275 !isa<PHINode>(II->getUnwindDest()) &&
276 "critical edge splitting left single entry phi nodes?");
277 }
278
279 Function *F = Invokes.back()->getParent()->getParent();
280
281 // To avoid having to handle incoming arguments specially, we lower each arg
282 // to a copy instruction in the entry block. This ensures that the argument
283 // value itself cannot be live across the entry block.
284 BasicBlock::iterator AfterAllocaInsertPt = F->begin()->begin();
285 while (isa<AllocaInst>(AfterAllocaInsertPt) &&
286 isa<ConstantInt>(cast<AllocaInst>(AfterAllocaInsertPt)->getArraySize()))
287 ++AfterAllocaInsertPt;
288 for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
289 AI != E; ++AI) {
290 Type *Ty = AI->getType();
291 // Aggregate types can't be cast, but are legal argument types, so we have
292 // to handle them differently. We use an extract/insert pair as a
293 // lightweight method to achieve the same goal.
294 if (isa<StructType>(Ty) || isa<ArrayType>(Ty) || isa<VectorType>(Ty)) {
295 Instruction *EI = ExtractValueInst::Create(AI, 0, "",AfterAllocaInsertPt);
296 Instruction *NI = InsertValueInst::Create(AI, EI, 0);
297 NI->insertAfter(EI);
298 AI->replaceAllUsesWith(NI);
299 // Set the operand of the instructions back to the AllocaInst.
300 EI->setOperand(0, AI);
301 NI->setOperand(0, AI);
302 } else {
303 // This is always a no-op cast because we're casting AI to AI->getType()
304 // so src and destination types are identical. BitCast is the only
305 // possibility.
306 CastInst *NC = new BitCastInst(
307 AI, AI->getType(), AI->getName()+".tmp", AfterAllocaInsertPt);
308 AI->replaceAllUsesWith(NC);
309 // Set the operand of the cast instruction back to the AllocaInst.
310 // Normally it's forbidden to replace a CastInst's operand because it
311 // could cause the opcode to reflect an illegal conversion. However,
312 // we're replacing it here with the same value it was constructed with.
313 // We do this because the above replaceAllUsesWith() clobbered the
314 // operand, but we want this one to remain.
315 NC->setOperand(0, AI);
316 }
317 }
318
319 // Finally, scan the code looking for instructions with bad live ranges.
320 for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
321 for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E; ++II) {
322 // Ignore obvious cases we don't have to handle. In particular, most
323 // instructions either have no uses or only have a single use inside the
324 // current block. Ignore them quickly.
325 Instruction *Inst = II;
326 if (Inst->use_empty()) continue;
327 if (Inst->hasOneUse() &&
328 cast<Instruction>(Inst->use_back())->getParent() == BB &&
329 !isa<PHINode>(Inst->use_back())) continue;
330
331 // If this is an alloca in the entry block, it's not a real register
332 // value.
333 if (AllocaInst *AI = dyn_cast<AllocaInst>(Inst))
334 if (isa<ConstantInt>(AI->getArraySize()) && BB == F->begin())
335 continue;
336
337 // Avoid iterator invalidation by copying users to a temporary vector.
338 SmallVector<Instruction*,16> Users;
339 for (Value::use_iterator UI = Inst->use_begin(), E = Inst->use_end();
340 UI != E; ++UI) {
341 Instruction *User = cast<Instruction>(*UI);
342 if (User->getParent() != BB || isa<PHINode>(User))
343 Users.push_back(User);
344 }
345
346 // Scan all of the uses and see if the live range is live across an unwind
347 // edge. If we find a use live across an invoke edge, create an alloca
348 // and spill the value.
349 std::set<InvokeInst*> InvokesWithStoreInserted;
350
351 // Find all of the blocks that this value is live in.
352 std::set<BasicBlock*> LiveBBs;
353 LiveBBs.insert(Inst->getParent());
354 while (!Users.empty()) {
355 Instruction *U = Users.back();
356 Users.pop_back();
357
358 if (!isa<PHINode>(U)) {
359 MarkBlocksLiveIn(U->getParent(), LiveBBs);
360 } else {
361 // Uses for a PHI node occur in their predecessor block.
362 PHINode *PN = cast<PHINode>(U);
363 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
364 if (PN->getIncomingValue(i) == Inst)
365 MarkBlocksLiveIn(PN->getIncomingBlock(i), LiveBBs);
366 }
367 }
368
369 // Now that we know all of the blocks that this thing is live in, see if
370 // it includes any of the unwind locations.
371 bool NeedsSpill = false;
372 for (unsigned i = 0, e = Invokes.size(); i != e; ++i) {
373 BasicBlock *UnwindBlock = Invokes[i]->getUnwindDest();
374 if (UnwindBlock != BB && LiveBBs.count(UnwindBlock)) {
375 NeedsSpill = true;
376 }
377 }
378
379 // If we decided we need a spill, do it.
380 if (NeedsSpill) {
381 ++NumSpilled;
382 DemoteRegToStack(*Inst, true);
383 }
384 }
385 }
386
insertExpensiveEHSupport(Function & F)387 bool LowerInvoke::insertExpensiveEHSupport(Function &F) {
388 SmallVector<ReturnInst*,16> Returns;
389 SmallVector<InvokeInst*,16> Invokes;
390 UnreachableInst* UnreachablePlaceholder = 0;
391
392 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
393 if (ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator())) {
394 // Remember all return instructions in case we insert an invoke into this
395 // function.
396 Returns.push_back(RI);
397 } else if (InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator())) {
398 Invokes.push_back(II);
399 }
400
401 if (Invokes.empty()) return false;
402
403 NumInvokes += Invokes.size();
404
405 // TODO: This is not an optimal way to do this. In particular, this always
406 // inserts setjmp calls into the entries of functions with invoke instructions
407 // even though there are possibly paths through the function that do not
408 // execute any invokes. In particular, for functions with early exits, e.g.
409 // the 'addMove' method in hexxagon, it would be nice to not have to do the
410 // setjmp stuff on the early exit path. This requires a bit of dataflow, but
411 // would not be too hard to do.
412
413 // If we have an invoke instruction, insert a setjmp that dominates all
414 // invokes. After the setjmp, use a cond branch that goes to the original
415 // code path on zero, and to a designated 'catch' block of nonzero.
416 Value *OldJmpBufPtr = 0;
417 if (!Invokes.empty()) {
418 // First thing we need to do is scan the whole function for values that are
419 // live across unwind edges. Each value that is live across an unwind edge
420 // we spill into a stack location, guaranteeing that there is nothing live
421 // across the unwind edge. This process also splits all critical edges
422 // coming out of invoke's.
423 splitLiveRangesLiveAcrossInvokes(Invokes);
424
425 BasicBlock *EntryBB = F.begin();
426
427 // Create an alloca for the incoming jump buffer ptr and the new jump buffer
428 // that needs to be restored on all exits from the function. This is an
429 // alloca because the value needs to be live across invokes.
430 const TargetLowering *TLI = TM ? TM->getTargetLowering() : 0;
431 unsigned Align = TLI ? TLI->getJumpBufAlignment() : 0;
432 AllocaInst *JmpBuf =
433 new AllocaInst(JBLinkTy, 0, Align,
434 "jblink", F.begin()->begin());
435
436 Value *Idx[] = { Constant::getNullValue(Type::getInt32Ty(F.getContext())),
437 ConstantInt::get(Type::getInt32Ty(F.getContext()), 1) };
438 OldJmpBufPtr = GetElementPtrInst::Create(JmpBuf, Idx, "OldBuf",
439 EntryBB->getTerminator());
440
441 // Copy the JBListHead to the alloca.
442 Value *OldBuf = new LoadInst(JBListHead, "oldjmpbufptr", true,
443 EntryBB->getTerminator());
444 new StoreInst(OldBuf, OldJmpBufPtr, true, EntryBB->getTerminator());
445
446 // Add the new jumpbuf to the list.
447 new StoreInst(JmpBuf, JBListHead, true, EntryBB->getTerminator());
448
449 // Create the catch block. The catch block is basically a big switch
450 // statement that goes to all of the invoke catch blocks.
451 BasicBlock *CatchBB =
452 BasicBlock::Create(F.getContext(), "setjmp.catch", &F);
453
454 // Create an alloca which keeps track of the stack pointer before every
455 // invoke, this allows us to properly restore the stack pointer after
456 // long jumping.
457 AllocaInst *StackPtr = new AllocaInst(Type::getInt8PtrTy(F.getContext()), 0,
458 "stackptr", EntryBB->begin());
459
460 // Create an alloca which keeps track of which invoke is currently
461 // executing. For normal calls it contains zero.
462 AllocaInst *InvokeNum = new AllocaInst(Type::getInt32Ty(F.getContext()), 0,
463 "invokenum",EntryBB->begin());
464 new StoreInst(ConstantInt::get(Type::getInt32Ty(F.getContext()), 0),
465 InvokeNum, true, EntryBB->getTerminator());
466
467 // Insert a load in the Catch block, and a switch on its value. By default,
468 // we go to a block that just does an unwind (which is the correct action
469 // for a standard call). We insert an unreachable instruction here and
470 // modify the block to jump to the correct unwinding pad later.
471 BasicBlock *UnwindBB = BasicBlock::Create(F.getContext(), "unwindbb", &F);
472 UnreachablePlaceholder = new UnreachableInst(F.getContext(), UnwindBB);
473
474 Value *CatchLoad = new LoadInst(InvokeNum, "invoke.num", true, CatchBB);
475 SwitchInst *CatchSwitch =
476 SwitchInst::Create(CatchLoad, UnwindBB, Invokes.size(), CatchBB);
477
478 // Now that things are set up, insert the setjmp call itself.
479
480 // Split the entry block to insert the conditional branch for the setjmp.
481 BasicBlock *ContBlock = EntryBB->splitBasicBlock(EntryBB->getTerminator(),
482 "setjmp.cont");
483
484 Idx[1] = ConstantInt::get(Type::getInt32Ty(F.getContext()), 0);
485 Value *JmpBufPtr = GetElementPtrInst::Create(JmpBuf, Idx, "TheJmpBuf",
486 EntryBB->getTerminator());
487 JmpBufPtr = new BitCastInst(JmpBufPtr,
488 Type::getInt8PtrTy(F.getContext()),
489 "tmp", EntryBB->getTerminator());
490 Value *SJRet = CallInst::Create(SetJmpFn, JmpBufPtr, "sjret",
491 EntryBB->getTerminator());
492
493 // Compare the return value to zero.
494 Value *IsNormal = new ICmpInst(EntryBB->getTerminator(),
495 ICmpInst::ICMP_EQ, SJRet,
496 Constant::getNullValue(SJRet->getType()),
497 "notunwind");
498 // Nuke the uncond branch.
499 EntryBB->getTerminator()->eraseFromParent();
500
501 // Put in a new condbranch in its place.
502 BranchInst::Create(ContBlock, CatchBB, IsNormal, EntryBB);
503
504 // At this point, we are all set up, rewrite each invoke instruction.
505 for (unsigned i = 0, e = Invokes.size(); i != e; ++i)
506 rewriteExpensiveInvoke(Invokes[i], i+1, InvokeNum, StackPtr, CatchSwitch);
507 }
508
509 // We know that there is at least one unwind.
510
511 // Create three new blocks, the block to load the jmpbuf ptr and compare
512 // against null, the block to do the longjmp, and the error block for if it
513 // is null. Add them at the end of the function because they are not hot.
514 BasicBlock *UnwindHandler = BasicBlock::Create(F.getContext(),
515 "dounwind", &F);
516 BasicBlock *UnwindBlock = BasicBlock::Create(F.getContext(), "unwind", &F);
517 BasicBlock *TermBlock = BasicBlock::Create(F.getContext(), "unwinderror", &F);
518
519 // If this function contains an invoke, restore the old jumpbuf ptr.
520 Value *BufPtr;
521 if (OldJmpBufPtr) {
522 // Before the return, insert a copy from the saved value to the new value.
523 BufPtr = new LoadInst(OldJmpBufPtr, "oldjmpbufptr", UnwindHandler);
524 new StoreInst(BufPtr, JBListHead, UnwindHandler);
525 } else {
526 BufPtr = new LoadInst(JBListHead, "ehlist", UnwindHandler);
527 }
528
529 // Load the JBList, if it's null, then there was no catch!
530 Value *NotNull = new ICmpInst(*UnwindHandler, ICmpInst::ICMP_NE, BufPtr,
531 Constant::getNullValue(BufPtr->getType()),
532 "notnull");
533 BranchInst::Create(UnwindBlock, TermBlock, NotNull, UnwindHandler);
534
535 // Create the block to do the longjmp.
536 // Get a pointer to the jmpbuf and longjmp.
537 Value *Idx[] = { Constant::getNullValue(Type::getInt32Ty(F.getContext())),
538 ConstantInt::get(Type::getInt32Ty(F.getContext()), 0) };
539 Idx[0] = GetElementPtrInst::Create(BufPtr, Idx, "JmpBuf", UnwindBlock);
540 Idx[0] = new BitCastInst(Idx[0],
541 Type::getInt8PtrTy(F.getContext()),
542 "tmp", UnwindBlock);
543 Idx[1] = ConstantInt::get(Type::getInt32Ty(F.getContext()), 1);
544 CallInst::Create(LongJmpFn, Idx, "", UnwindBlock);
545 new UnreachableInst(F.getContext(), UnwindBlock);
546
547 // Set up the term block ("throw without a catch").
548 new UnreachableInst(F.getContext(), TermBlock);
549
550 // Insert a call to abort()
551 CallInst::Create(AbortFn, "",
552 TermBlock->getTerminator())->setTailCall();
553
554 // Replace the inserted unreachable with a branch to the unwind handler.
555 if (UnreachablePlaceholder) {
556 BranchInst::Create(UnwindHandler, UnreachablePlaceholder);
557 UnreachablePlaceholder->eraseFromParent();
558 }
559
560 // Finally, for any returns from this function, if this function contains an
561 // invoke, restore the old jmpbuf pointer to its input value.
562 if (OldJmpBufPtr) {
563 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
564 ReturnInst *R = Returns[i];
565
566 // Before the return, insert a copy from the saved value to the new value.
567 Value *OldBuf = new LoadInst(OldJmpBufPtr, "oldjmpbufptr", true, R);
568 new StoreInst(OldBuf, JBListHead, true, R);
569 }
570 }
571
572 return true;
573 }
574
runOnFunction(Function & F)575 bool LowerInvoke::runOnFunction(Function &F) {
576 if (useExpensiveEHSupport)
577 return insertExpensiveEHSupport(F);
578 else
579 return insertCheapEHSupport(F);
580 }
581