1 //===--- CGCleanup.cpp - Bookkeeping and code emission for cleanups -------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains code dealing with the IR generation for cleanups
11 // and related information.
12 //
13 // A "cleanup" is a piece of code which needs to be executed whenever
14 // control transfers out of a particular scope. This can be
15 // conditionalized to occur only on exceptional control flow, only on
16 // normal control flow, or both.
17 //
18 //===----------------------------------------------------------------------===//
19
20 #include "CGCleanup.h"
21 #include "CodeGenFunction.h"
22
23 using namespace clang;
24 using namespace CodeGen;
25
needsSaving(RValue rv)26 bool DominatingValue<RValue>::saved_type::needsSaving(RValue rv) {
27 if (rv.isScalar())
28 return DominatingLLVMValue::needsSaving(rv.getScalarVal());
29 if (rv.isAggregate())
30 return DominatingLLVMValue::needsSaving(rv.getAggregateAddr());
31 return true;
32 }
33
34 DominatingValue<RValue>::saved_type
save(CodeGenFunction & CGF,RValue rv)35 DominatingValue<RValue>::saved_type::save(CodeGenFunction &CGF, RValue rv) {
36 if (rv.isScalar()) {
37 llvm::Value *V = rv.getScalarVal();
38
39 // These automatically dominate and don't need to be saved.
40 if (!DominatingLLVMValue::needsSaving(V))
41 return saved_type(V, ScalarLiteral);
42
43 // Everything else needs an alloca.
44 llvm::Value *addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue");
45 CGF.Builder.CreateStore(V, addr);
46 return saved_type(addr, ScalarAddress);
47 }
48
49 if (rv.isComplex()) {
50 CodeGenFunction::ComplexPairTy V = rv.getComplexVal();
51 llvm::Type *ComplexTy =
52 llvm::StructType::get(V.first->getType(), V.second->getType(),
53 (void*) 0);
54 llvm::Value *addr = CGF.CreateTempAlloca(ComplexTy, "saved-complex");
55 CGF.Builder.CreateStore(V.first, CGF.Builder.CreateStructGEP(addr, 0));
56 CGF.Builder.CreateStore(V.second, CGF.Builder.CreateStructGEP(addr, 1));
57 return saved_type(addr, ComplexAddress);
58 }
59
60 assert(rv.isAggregate());
61 llvm::Value *V = rv.getAggregateAddr(); // TODO: volatile?
62 if (!DominatingLLVMValue::needsSaving(V))
63 return saved_type(V, AggregateLiteral);
64
65 llvm::Value *addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue");
66 CGF.Builder.CreateStore(V, addr);
67 return saved_type(addr, AggregateAddress);
68 }
69
70 /// Given a saved r-value produced by SaveRValue, perform the code
71 /// necessary to restore it to usability at the current insertion
72 /// point.
restore(CodeGenFunction & CGF)73 RValue DominatingValue<RValue>::saved_type::restore(CodeGenFunction &CGF) {
74 switch (K) {
75 case ScalarLiteral:
76 return RValue::get(Value);
77 case ScalarAddress:
78 return RValue::get(CGF.Builder.CreateLoad(Value));
79 case AggregateLiteral:
80 return RValue::getAggregate(Value);
81 case AggregateAddress:
82 return RValue::getAggregate(CGF.Builder.CreateLoad(Value));
83 case ComplexAddress: {
84 llvm::Value *real =
85 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(Value, 0));
86 llvm::Value *imag =
87 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(Value, 1));
88 return RValue::getComplex(real, imag);
89 }
90 }
91
92 llvm_unreachable("bad saved r-value kind");
93 }
94
95 /// Push an entry of the given size onto this protected-scope stack.
allocate(size_t Size)96 char *EHScopeStack::allocate(size_t Size) {
97 if (!StartOfBuffer) {
98 unsigned Capacity = 1024;
99 while (Capacity < Size) Capacity *= 2;
100 StartOfBuffer = new char[Capacity];
101 StartOfData = EndOfBuffer = StartOfBuffer + Capacity;
102 } else if (static_cast<size_t>(StartOfData - StartOfBuffer) < Size) {
103 unsigned CurrentCapacity = EndOfBuffer - StartOfBuffer;
104 unsigned UsedCapacity = CurrentCapacity - (StartOfData - StartOfBuffer);
105
106 unsigned NewCapacity = CurrentCapacity;
107 do {
108 NewCapacity *= 2;
109 } while (NewCapacity < UsedCapacity + Size);
110
111 char *NewStartOfBuffer = new char[NewCapacity];
112 char *NewEndOfBuffer = NewStartOfBuffer + NewCapacity;
113 char *NewStartOfData = NewEndOfBuffer - UsedCapacity;
114 memcpy(NewStartOfData, StartOfData, UsedCapacity);
115 delete [] StartOfBuffer;
116 StartOfBuffer = NewStartOfBuffer;
117 EndOfBuffer = NewEndOfBuffer;
118 StartOfData = NewStartOfData;
119 }
120
121 assert(StartOfBuffer + Size <= StartOfData);
122 StartOfData -= Size;
123 return StartOfData;
124 }
125
126 EHScopeStack::stable_iterator
getInnermostActiveNormalCleanup() const127 EHScopeStack::getInnermostActiveNormalCleanup() const {
128 for (stable_iterator si = getInnermostNormalCleanup(), se = stable_end();
129 si != se; ) {
130 EHCleanupScope &cleanup = cast<EHCleanupScope>(*find(si));
131 if (cleanup.isActive()) return si;
132 si = cleanup.getEnclosingNormalCleanup();
133 }
134 return stable_end();
135 }
136
getInnermostActiveEHScope() const137 EHScopeStack::stable_iterator EHScopeStack::getInnermostActiveEHScope() const {
138 for (stable_iterator si = getInnermostEHScope(), se = stable_end();
139 si != se; ) {
140 // Skip over inactive cleanups.
141 EHCleanupScope *cleanup = dyn_cast<EHCleanupScope>(&*find(si));
142 if (cleanup && !cleanup->isActive()) {
143 si = cleanup->getEnclosingEHScope();
144 continue;
145 }
146
147 // All other scopes are always active.
148 return si;
149 }
150
151 return stable_end();
152 }
153
154
pushCleanup(CleanupKind Kind,size_t Size)155 void *EHScopeStack::pushCleanup(CleanupKind Kind, size_t Size) {
156 assert(((Size % sizeof(void*)) == 0) && "cleanup type is misaligned");
157 char *Buffer = allocate(EHCleanupScope::getSizeForCleanupSize(Size));
158 bool IsNormalCleanup = Kind & NormalCleanup;
159 bool IsEHCleanup = Kind & EHCleanup;
160 bool IsActive = !(Kind & InactiveCleanup);
161 EHCleanupScope *Scope =
162 new (Buffer) EHCleanupScope(IsNormalCleanup,
163 IsEHCleanup,
164 IsActive,
165 Size,
166 BranchFixups.size(),
167 InnermostNormalCleanup,
168 InnermostEHScope);
169 if (IsNormalCleanup)
170 InnermostNormalCleanup = stable_begin();
171 if (IsEHCleanup)
172 InnermostEHScope = stable_begin();
173
174 return Scope->getCleanupBuffer();
175 }
176
popCleanup()177 void EHScopeStack::popCleanup() {
178 assert(!empty() && "popping exception stack when not empty");
179
180 assert(isa<EHCleanupScope>(*begin()));
181 EHCleanupScope &Cleanup = cast<EHCleanupScope>(*begin());
182 InnermostNormalCleanup = Cleanup.getEnclosingNormalCleanup();
183 InnermostEHScope = Cleanup.getEnclosingEHScope();
184 StartOfData += Cleanup.getAllocatedSize();
185
186 // Destroy the cleanup.
187 Cleanup.~EHCleanupScope();
188
189 // Check whether we can shrink the branch-fixups stack.
190 if (!BranchFixups.empty()) {
191 // If we no longer have any normal cleanups, all the fixups are
192 // complete.
193 if (!hasNormalCleanups())
194 BranchFixups.clear();
195
196 // Otherwise we can still trim out unnecessary nulls.
197 else
198 popNullFixups();
199 }
200 }
201
pushFilter(unsigned numFilters)202 EHFilterScope *EHScopeStack::pushFilter(unsigned numFilters) {
203 assert(getInnermostEHScope() == stable_end());
204 char *buffer = allocate(EHFilterScope::getSizeForNumFilters(numFilters));
205 EHFilterScope *filter = new (buffer) EHFilterScope(numFilters);
206 InnermostEHScope = stable_begin();
207 return filter;
208 }
209
popFilter()210 void EHScopeStack::popFilter() {
211 assert(!empty() && "popping exception stack when not empty");
212
213 EHFilterScope &filter = cast<EHFilterScope>(*begin());
214 StartOfData += EHFilterScope::getSizeForNumFilters(filter.getNumFilters());
215
216 InnermostEHScope = filter.getEnclosingEHScope();
217 }
218
pushCatch(unsigned numHandlers)219 EHCatchScope *EHScopeStack::pushCatch(unsigned numHandlers) {
220 char *buffer = allocate(EHCatchScope::getSizeForNumHandlers(numHandlers));
221 EHCatchScope *scope =
222 new (buffer) EHCatchScope(numHandlers, InnermostEHScope);
223 InnermostEHScope = stable_begin();
224 return scope;
225 }
226
pushTerminate()227 void EHScopeStack::pushTerminate() {
228 char *Buffer = allocate(EHTerminateScope::getSize());
229 new (Buffer) EHTerminateScope(InnermostEHScope);
230 InnermostEHScope = stable_begin();
231 }
232
233 /// Remove any 'null' fixups on the stack. However, we can't pop more
234 /// fixups than the fixup depth on the innermost normal cleanup, or
235 /// else fixups that we try to add to that cleanup will end up in the
236 /// wrong place. We *could* try to shrink fixup depths, but that's
237 /// actually a lot of work for little benefit.
popNullFixups()238 void EHScopeStack::popNullFixups() {
239 // We expect this to only be called when there's still an innermost
240 // normal cleanup; otherwise there really shouldn't be any fixups.
241 assert(hasNormalCleanups());
242
243 EHScopeStack::iterator it = find(InnermostNormalCleanup);
244 unsigned MinSize = cast<EHCleanupScope>(*it).getFixupDepth();
245 assert(BranchFixups.size() >= MinSize && "fixup stack out of order");
246
247 while (BranchFixups.size() > MinSize &&
248 BranchFixups.back().Destination == 0)
249 BranchFixups.pop_back();
250 }
251
initFullExprCleanup()252 void CodeGenFunction::initFullExprCleanup() {
253 // Create a variable to decide whether the cleanup needs to be run.
254 llvm::AllocaInst *active
255 = CreateTempAlloca(Builder.getInt1Ty(), "cleanup.cond");
256
257 // Initialize it to false at a site that's guaranteed to be run
258 // before each evaluation.
259 setBeforeOutermostConditional(Builder.getFalse(), active);
260
261 // Initialize it to true at the current location.
262 Builder.CreateStore(Builder.getTrue(), active);
263
264 // Set that as the active flag in the cleanup.
265 EHCleanupScope &cleanup = cast<EHCleanupScope>(*EHStack.begin());
266 assert(cleanup.getActiveFlag() == 0 && "cleanup already has active flag?");
267 cleanup.setActiveFlag(active);
268
269 if (cleanup.isNormalCleanup()) cleanup.setTestFlagInNormalCleanup();
270 if (cleanup.isEHCleanup()) cleanup.setTestFlagInEHCleanup();
271 }
272
anchor()273 void EHScopeStack::Cleanup::anchor() {}
274
275 /// All the branch fixups on the EH stack have propagated out past the
276 /// outermost normal cleanup; resolve them all by adding cases to the
277 /// given switch instruction.
ResolveAllBranchFixups(CodeGenFunction & CGF,llvm::SwitchInst * Switch,llvm::BasicBlock * CleanupEntry)278 static void ResolveAllBranchFixups(CodeGenFunction &CGF,
279 llvm::SwitchInst *Switch,
280 llvm::BasicBlock *CleanupEntry) {
281 llvm::SmallPtrSet<llvm::BasicBlock*, 4> CasesAdded;
282
283 for (unsigned I = 0, E = CGF.EHStack.getNumBranchFixups(); I != E; ++I) {
284 // Skip this fixup if its destination isn't set.
285 BranchFixup &Fixup = CGF.EHStack.getBranchFixup(I);
286 if (Fixup.Destination == 0) continue;
287
288 // If there isn't an OptimisticBranchBlock, then InitialBranch is
289 // still pointing directly to its destination; forward it to the
290 // appropriate cleanup entry. This is required in the specific
291 // case of
292 // { std::string s; goto lbl; }
293 // lbl:
294 // i.e. where there's an unresolved fixup inside a single cleanup
295 // entry which we're currently popping.
296 if (Fixup.OptimisticBranchBlock == 0) {
297 new llvm::StoreInst(CGF.Builder.getInt32(Fixup.DestinationIndex),
298 CGF.getNormalCleanupDestSlot(),
299 Fixup.InitialBranch);
300 Fixup.InitialBranch->setSuccessor(0, CleanupEntry);
301 }
302
303 // Don't add this case to the switch statement twice.
304 if (!CasesAdded.insert(Fixup.Destination)) continue;
305
306 Switch->addCase(CGF.Builder.getInt32(Fixup.DestinationIndex),
307 Fixup.Destination);
308 }
309
310 CGF.EHStack.clearFixups();
311 }
312
313 /// Transitions the terminator of the given exit-block of a cleanup to
314 /// be a cleanup switch.
TransitionToCleanupSwitch(CodeGenFunction & CGF,llvm::BasicBlock * Block)315 static llvm::SwitchInst *TransitionToCleanupSwitch(CodeGenFunction &CGF,
316 llvm::BasicBlock *Block) {
317 // If it's a branch, turn it into a switch whose default
318 // destination is its original target.
319 llvm::TerminatorInst *Term = Block->getTerminator();
320 assert(Term && "can't transition block without terminator");
321
322 if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) {
323 assert(Br->isUnconditional());
324 llvm::LoadInst *Load =
325 new llvm::LoadInst(CGF.getNormalCleanupDestSlot(), "cleanup.dest", Term);
326 llvm::SwitchInst *Switch =
327 llvm::SwitchInst::Create(Load, Br->getSuccessor(0), 4, Block);
328 Br->eraseFromParent();
329 return Switch;
330 } else {
331 return cast<llvm::SwitchInst>(Term);
332 }
333 }
334
ResolveBranchFixups(llvm::BasicBlock * Block)335 void CodeGenFunction::ResolveBranchFixups(llvm::BasicBlock *Block) {
336 assert(Block && "resolving a null target block");
337 if (!EHStack.getNumBranchFixups()) return;
338
339 assert(EHStack.hasNormalCleanups() &&
340 "branch fixups exist with no normal cleanups on stack");
341
342 llvm::SmallPtrSet<llvm::BasicBlock*, 4> ModifiedOptimisticBlocks;
343 bool ResolvedAny = false;
344
345 for (unsigned I = 0, E = EHStack.getNumBranchFixups(); I != E; ++I) {
346 // Skip this fixup if its destination doesn't match.
347 BranchFixup &Fixup = EHStack.getBranchFixup(I);
348 if (Fixup.Destination != Block) continue;
349
350 Fixup.Destination = 0;
351 ResolvedAny = true;
352
353 // If it doesn't have an optimistic branch block, LatestBranch is
354 // already pointing to the right place.
355 llvm::BasicBlock *BranchBB = Fixup.OptimisticBranchBlock;
356 if (!BranchBB)
357 continue;
358
359 // Don't process the same optimistic branch block twice.
360 if (!ModifiedOptimisticBlocks.insert(BranchBB))
361 continue;
362
363 llvm::SwitchInst *Switch = TransitionToCleanupSwitch(*this, BranchBB);
364
365 // Add a case to the switch.
366 Switch->addCase(Builder.getInt32(Fixup.DestinationIndex), Block);
367 }
368
369 if (ResolvedAny)
370 EHStack.popNullFixups();
371 }
372
373 /// Pops cleanup blocks until the given savepoint is reached.
PopCleanupBlocks(EHScopeStack::stable_iterator Old)374 void CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old) {
375 assert(Old.isValid());
376
377 while (EHStack.stable_begin() != Old) {
378 EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
379
380 // As long as Old strictly encloses the scope's enclosing normal
381 // cleanup, we're going to emit another normal cleanup which
382 // fallthrough can propagate through.
383 bool FallThroughIsBranchThrough =
384 Old.strictlyEncloses(Scope.getEnclosingNormalCleanup());
385
386 PopCleanupBlock(FallThroughIsBranchThrough);
387 }
388 }
389
390 /// Pops cleanup blocks until the given savepoint is reached, then add the
391 /// cleanups from the given savepoint in the lifetime-extended cleanups stack.
392 void
PopCleanupBlocks(EHScopeStack::stable_iterator Old,size_t OldLifetimeExtendedSize)393 CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old,
394 size_t OldLifetimeExtendedSize) {
395 PopCleanupBlocks(Old);
396
397 // Move our deferred cleanups onto the EH stack.
398 for (size_t I = OldLifetimeExtendedSize,
399 E = LifetimeExtendedCleanupStack.size(); I != E; /**/) {
400 // Alignment should be guaranteed by the vptrs in the individual cleanups.
401 assert((I % llvm::alignOf<LifetimeExtendedCleanupHeader>() == 0) &&
402 "misaligned cleanup stack entry");
403
404 LifetimeExtendedCleanupHeader &Header =
405 reinterpret_cast<LifetimeExtendedCleanupHeader&>(
406 LifetimeExtendedCleanupStack[I]);
407 I += sizeof(Header);
408
409 EHStack.pushCopyOfCleanup(Header.getKind(),
410 &LifetimeExtendedCleanupStack[I],
411 Header.getSize());
412 I += Header.getSize();
413 }
414 LifetimeExtendedCleanupStack.resize(OldLifetimeExtendedSize);
415 }
416
CreateNormalEntry(CodeGenFunction & CGF,EHCleanupScope & Scope)417 static llvm::BasicBlock *CreateNormalEntry(CodeGenFunction &CGF,
418 EHCleanupScope &Scope) {
419 assert(Scope.isNormalCleanup());
420 llvm::BasicBlock *Entry = Scope.getNormalBlock();
421 if (!Entry) {
422 Entry = CGF.createBasicBlock("cleanup");
423 Scope.setNormalBlock(Entry);
424 }
425 return Entry;
426 }
427
428 /// Attempts to reduce a cleanup's entry block to a fallthrough. This
429 /// is basically llvm::MergeBlockIntoPredecessor, except
430 /// simplified/optimized for the tighter constraints on cleanup blocks.
431 ///
432 /// Returns the new block, whatever it is.
SimplifyCleanupEntry(CodeGenFunction & CGF,llvm::BasicBlock * Entry)433 static llvm::BasicBlock *SimplifyCleanupEntry(CodeGenFunction &CGF,
434 llvm::BasicBlock *Entry) {
435 llvm::BasicBlock *Pred = Entry->getSinglePredecessor();
436 if (!Pred) return Entry;
437
438 llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Pred->getTerminator());
439 if (!Br || Br->isConditional()) return Entry;
440 assert(Br->getSuccessor(0) == Entry);
441
442 // If we were previously inserting at the end of the cleanup entry
443 // block, we'll need to continue inserting at the end of the
444 // predecessor.
445 bool WasInsertBlock = CGF.Builder.GetInsertBlock() == Entry;
446 assert(!WasInsertBlock || CGF.Builder.GetInsertPoint() == Entry->end());
447
448 // Kill the branch.
449 Br->eraseFromParent();
450
451 // Replace all uses of the entry with the predecessor, in case there
452 // are phis in the cleanup.
453 Entry->replaceAllUsesWith(Pred);
454
455 // Merge the blocks.
456 Pred->getInstList().splice(Pred->end(), Entry->getInstList());
457
458 // Kill the entry block.
459 Entry->eraseFromParent();
460
461 if (WasInsertBlock)
462 CGF.Builder.SetInsertPoint(Pred);
463
464 return Pred;
465 }
466
EmitCleanup(CodeGenFunction & CGF,EHScopeStack::Cleanup * Fn,EHScopeStack::Cleanup::Flags flags,llvm::Value * ActiveFlag)467 static void EmitCleanup(CodeGenFunction &CGF,
468 EHScopeStack::Cleanup *Fn,
469 EHScopeStack::Cleanup::Flags flags,
470 llvm::Value *ActiveFlag) {
471 // EH cleanups always occur within a terminate scope.
472 if (flags.isForEHCleanup()) CGF.EHStack.pushTerminate();
473
474 // If there's an active flag, load it and skip the cleanup if it's
475 // false.
476 llvm::BasicBlock *ContBB = 0;
477 if (ActiveFlag) {
478 ContBB = CGF.createBasicBlock("cleanup.done");
479 llvm::BasicBlock *CleanupBB = CGF.createBasicBlock("cleanup.action");
480 llvm::Value *IsActive
481 = CGF.Builder.CreateLoad(ActiveFlag, "cleanup.is_active");
482 CGF.Builder.CreateCondBr(IsActive, CleanupBB, ContBB);
483 CGF.EmitBlock(CleanupBB);
484 }
485
486 // Ask the cleanup to emit itself.
487 Fn->Emit(CGF, flags);
488 assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?");
489
490 // Emit the continuation block if there was an active flag.
491 if (ActiveFlag)
492 CGF.EmitBlock(ContBB);
493
494 // Leave the terminate scope.
495 if (flags.isForEHCleanup()) CGF.EHStack.popTerminate();
496 }
497
ForwardPrebranchedFallthrough(llvm::BasicBlock * Exit,llvm::BasicBlock * From,llvm::BasicBlock * To)498 static void ForwardPrebranchedFallthrough(llvm::BasicBlock *Exit,
499 llvm::BasicBlock *From,
500 llvm::BasicBlock *To) {
501 // Exit is the exit block of a cleanup, so it always terminates in
502 // an unconditional branch or a switch.
503 llvm::TerminatorInst *Term = Exit->getTerminator();
504
505 if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) {
506 assert(Br->isUnconditional() && Br->getSuccessor(0) == From);
507 Br->setSuccessor(0, To);
508 } else {
509 llvm::SwitchInst *Switch = cast<llvm::SwitchInst>(Term);
510 for (unsigned I = 0, E = Switch->getNumSuccessors(); I != E; ++I)
511 if (Switch->getSuccessor(I) == From)
512 Switch->setSuccessor(I, To);
513 }
514 }
515
516 /// We don't need a normal entry block for the given cleanup.
517 /// Optimistic fixup branches can cause these blocks to come into
518 /// existence anyway; if so, destroy it.
519 ///
520 /// The validity of this transformation is very much specific to the
521 /// exact ways in which we form branches to cleanup entries.
destroyOptimisticNormalEntry(CodeGenFunction & CGF,EHCleanupScope & scope)522 static void destroyOptimisticNormalEntry(CodeGenFunction &CGF,
523 EHCleanupScope &scope) {
524 llvm::BasicBlock *entry = scope.getNormalBlock();
525 if (!entry) return;
526
527 // Replace all the uses with unreachable.
528 llvm::BasicBlock *unreachableBB = CGF.getUnreachableBlock();
529 for (llvm::BasicBlock::use_iterator
530 i = entry->use_begin(), e = entry->use_end(); i != e; ) {
531 llvm::Use &use = i.getUse();
532 ++i;
533
534 use.set(unreachableBB);
535
536 // The only uses should be fixup switches.
537 llvm::SwitchInst *si = cast<llvm::SwitchInst>(use.getUser());
538 if (si->getNumCases() == 1 && si->getDefaultDest() == unreachableBB) {
539 // Replace the switch with a branch.
540 llvm::BranchInst::Create(si->case_begin().getCaseSuccessor(), si);
541
542 // The switch operand is a load from the cleanup-dest alloca.
543 llvm::LoadInst *condition = cast<llvm::LoadInst>(si->getCondition());
544
545 // Destroy the switch.
546 si->eraseFromParent();
547
548 // Destroy the load.
549 assert(condition->getOperand(0) == CGF.NormalCleanupDest);
550 assert(condition->use_empty());
551 condition->eraseFromParent();
552 }
553 }
554
555 assert(entry->use_empty());
556 delete entry;
557 }
558
559 /// Pops a cleanup block. If the block includes a normal cleanup, the
560 /// current insertion point is threaded through the cleanup, as are
561 /// any branch fixups on the cleanup.
PopCleanupBlock(bool FallthroughIsBranchThrough)562 void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
563 assert(!EHStack.empty() && "cleanup stack is empty!");
564 assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!");
565 EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
566 assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups());
567
568 // Remember activation information.
569 bool IsActive = Scope.isActive();
570 llvm::Value *NormalActiveFlag =
571 Scope.shouldTestFlagInNormalCleanup() ? Scope.getActiveFlag() : 0;
572 llvm::Value *EHActiveFlag =
573 Scope.shouldTestFlagInEHCleanup() ? Scope.getActiveFlag() : 0;
574
575 // Check whether we need an EH cleanup. This is only true if we've
576 // generated a lazy EH cleanup block.
577 llvm::BasicBlock *EHEntry = Scope.getCachedEHDispatchBlock();
578 assert(Scope.hasEHBranches() == (EHEntry != 0));
579 bool RequiresEHCleanup = (EHEntry != 0);
580 EHScopeStack::stable_iterator EHParent = Scope.getEnclosingEHScope();
581
582 // Check the three conditions which might require a normal cleanup:
583
584 // - whether there are branch fix-ups through this cleanup
585 unsigned FixupDepth = Scope.getFixupDepth();
586 bool HasFixups = EHStack.getNumBranchFixups() != FixupDepth;
587
588 // - whether there are branch-throughs or branch-afters
589 bool HasExistingBranches = Scope.hasBranches();
590
591 // - whether there's a fallthrough
592 llvm::BasicBlock *FallthroughSource = Builder.GetInsertBlock();
593 bool HasFallthrough = (FallthroughSource != 0 && IsActive);
594
595 // Branch-through fall-throughs leave the insertion point set to the
596 // end of the last cleanup, which points to the current scope. The
597 // rest of IR gen doesn't need to worry about this; it only happens
598 // during the execution of PopCleanupBlocks().
599 bool HasPrebranchedFallthrough =
600 (FallthroughSource && FallthroughSource->getTerminator());
601
602 // If this is a normal cleanup, then having a prebranched
603 // fallthrough implies that the fallthrough source unconditionally
604 // jumps here.
605 assert(!Scope.isNormalCleanup() || !HasPrebranchedFallthrough ||
606 (Scope.getNormalBlock() &&
607 FallthroughSource->getTerminator()->getSuccessor(0)
608 == Scope.getNormalBlock()));
609
610 bool RequiresNormalCleanup = false;
611 if (Scope.isNormalCleanup() &&
612 (HasFixups || HasExistingBranches || HasFallthrough)) {
613 RequiresNormalCleanup = true;
614 }
615
616 // If we have a prebranched fallthrough into an inactive normal
617 // cleanup, rewrite it so that it leads to the appropriate place.
618 if (Scope.isNormalCleanup() && HasPrebranchedFallthrough && !IsActive) {
619 llvm::BasicBlock *prebranchDest;
620
621 // If the prebranch is semantically branching through the next
622 // cleanup, just forward it to the next block, leaving the
623 // insertion point in the prebranched block.
624 if (FallthroughIsBranchThrough) {
625 EHScope &enclosing = *EHStack.find(Scope.getEnclosingNormalCleanup());
626 prebranchDest = CreateNormalEntry(*this, cast<EHCleanupScope>(enclosing));
627
628 // Otherwise, we need to make a new block. If the normal cleanup
629 // isn't being used at all, we could actually reuse the normal
630 // entry block, but this is simpler, and it avoids conflicts with
631 // dead optimistic fixup branches.
632 } else {
633 prebranchDest = createBasicBlock("forwarded-prebranch");
634 EmitBlock(prebranchDest);
635 }
636
637 llvm::BasicBlock *normalEntry = Scope.getNormalBlock();
638 assert(normalEntry && !normalEntry->use_empty());
639
640 ForwardPrebranchedFallthrough(FallthroughSource,
641 normalEntry, prebranchDest);
642 }
643
644 // If we don't need the cleanup at all, we're done.
645 if (!RequiresNormalCleanup && !RequiresEHCleanup) {
646 destroyOptimisticNormalEntry(*this, Scope);
647 EHStack.popCleanup(); // safe because there are no fixups
648 assert(EHStack.getNumBranchFixups() == 0 ||
649 EHStack.hasNormalCleanups());
650 return;
651 }
652
653 // Copy the cleanup emission data out. Note that SmallVector
654 // guarantees maximal alignment for its buffer regardless of its
655 // type parameter.
656 SmallVector<char, 8*sizeof(void*)> CleanupBuffer;
657 CleanupBuffer.reserve(Scope.getCleanupSize());
658 memcpy(CleanupBuffer.data(),
659 Scope.getCleanupBuffer(), Scope.getCleanupSize());
660 CleanupBuffer.set_size(Scope.getCleanupSize());
661 EHScopeStack::Cleanup *Fn =
662 reinterpret_cast<EHScopeStack::Cleanup*>(CleanupBuffer.data());
663
664 EHScopeStack::Cleanup::Flags cleanupFlags;
665 if (Scope.isNormalCleanup())
666 cleanupFlags.setIsNormalCleanupKind();
667 if (Scope.isEHCleanup())
668 cleanupFlags.setIsEHCleanupKind();
669
670 if (!RequiresNormalCleanup) {
671 destroyOptimisticNormalEntry(*this, Scope);
672 EHStack.popCleanup();
673 } else {
674 // If we have a fallthrough and no other need for the cleanup,
675 // emit it directly.
676 if (HasFallthrough && !HasPrebranchedFallthrough &&
677 !HasFixups && !HasExistingBranches) {
678
679 destroyOptimisticNormalEntry(*this, Scope);
680 EHStack.popCleanup();
681
682 EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag);
683
684 // Otherwise, the best approach is to thread everything through
685 // the cleanup block and then try to clean up after ourselves.
686 } else {
687 // Force the entry block to exist.
688 llvm::BasicBlock *NormalEntry = CreateNormalEntry(*this, Scope);
689
690 // I. Set up the fallthrough edge in.
691
692 CGBuilderTy::InsertPoint savedInactiveFallthroughIP;
693
694 // If there's a fallthrough, we need to store the cleanup
695 // destination index. For fall-throughs this is always zero.
696 if (HasFallthrough) {
697 if (!HasPrebranchedFallthrough)
698 Builder.CreateStore(Builder.getInt32(0), getNormalCleanupDestSlot());
699
700 // Otherwise, save and clear the IP if we don't have fallthrough
701 // because the cleanup is inactive.
702 } else if (FallthroughSource) {
703 assert(!IsActive && "source without fallthrough for active cleanup");
704 savedInactiveFallthroughIP = Builder.saveAndClearIP();
705 }
706
707 // II. Emit the entry block. This implicitly branches to it if
708 // we have fallthrough. All the fixups and existing branches
709 // should already be branched to it.
710 EmitBlock(NormalEntry);
711
712 // III. Figure out where we're going and build the cleanup
713 // epilogue.
714
715 bool HasEnclosingCleanups =
716 (Scope.getEnclosingNormalCleanup() != EHStack.stable_end());
717
718 // Compute the branch-through dest if we need it:
719 // - if there are branch-throughs threaded through the scope
720 // - if fall-through is a branch-through
721 // - if there are fixups that will be optimistically forwarded
722 // to the enclosing cleanup
723 llvm::BasicBlock *BranchThroughDest = 0;
724 if (Scope.hasBranchThroughs() ||
725 (FallthroughSource && FallthroughIsBranchThrough) ||
726 (HasFixups && HasEnclosingCleanups)) {
727 assert(HasEnclosingCleanups);
728 EHScope &S = *EHStack.find(Scope.getEnclosingNormalCleanup());
729 BranchThroughDest = CreateNormalEntry(*this, cast<EHCleanupScope>(S));
730 }
731
732 llvm::BasicBlock *FallthroughDest = 0;
733 SmallVector<llvm::Instruction*, 2> InstsToAppend;
734
735 // If there's exactly one branch-after and no other threads,
736 // we can route it without a switch.
737 if (!Scope.hasBranchThroughs() && !HasFixups && !HasFallthrough &&
738 Scope.getNumBranchAfters() == 1) {
739 assert(!BranchThroughDest || !IsActive);
740
741 // TODO: clean up the possibly dead stores to the cleanup dest slot.
742 llvm::BasicBlock *BranchAfter = Scope.getBranchAfterBlock(0);
743 InstsToAppend.push_back(llvm::BranchInst::Create(BranchAfter));
744
745 // Build a switch-out if we need it:
746 // - if there are branch-afters threaded through the scope
747 // - if fall-through is a branch-after
748 // - if there are fixups that have nowhere left to go and
749 // so must be immediately resolved
750 } else if (Scope.getNumBranchAfters() ||
751 (HasFallthrough && !FallthroughIsBranchThrough) ||
752 (HasFixups && !HasEnclosingCleanups)) {
753
754 llvm::BasicBlock *Default =
755 (BranchThroughDest ? BranchThroughDest : getUnreachableBlock());
756
757 // TODO: base this on the number of branch-afters and fixups
758 const unsigned SwitchCapacity = 10;
759
760 llvm::LoadInst *Load =
761 new llvm::LoadInst(getNormalCleanupDestSlot(), "cleanup.dest");
762 llvm::SwitchInst *Switch =
763 llvm::SwitchInst::Create(Load, Default, SwitchCapacity);
764
765 InstsToAppend.push_back(Load);
766 InstsToAppend.push_back(Switch);
767
768 // Branch-after fallthrough.
769 if (FallthroughSource && !FallthroughIsBranchThrough) {
770 FallthroughDest = createBasicBlock("cleanup.cont");
771 if (HasFallthrough)
772 Switch->addCase(Builder.getInt32(0), FallthroughDest);
773 }
774
775 for (unsigned I = 0, E = Scope.getNumBranchAfters(); I != E; ++I) {
776 Switch->addCase(Scope.getBranchAfterIndex(I),
777 Scope.getBranchAfterBlock(I));
778 }
779
780 // If there aren't any enclosing cleanups, we can resolve all
781 // the fixups now.
782 if (HasFixups && !HasEnclosingCleanups)
783 ResolveAllBranchFixups(*this, Switch, NormalEntry);
784 } else {
785 // We should always have a branch-through destination in this case.
786 assert(BranchThroughDest);
787 InstsToAppend.push_back(llvm::BranchInst::Create(BranchThroughDest));
788 }
789
790 // IV. Pop the cleanup and emit it.
791 EHStack.popCleanup();
792 assert(EHStack.hasNormalCleanups() == HasEnclosingCleanups);
793
794 EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag);
795
796 // Append the prepared cleanup prologue from above.
797 llvm::BasicBlock *NormalExit = Builder.GetInsertBlock();
798 for (unsigned I = 0, E = InstsToAppend.size(); I != E; ++I)
799 NormalExit->getInstList().push_back(InstsToAppend[I]);
800
801 // Optimistically hope that any fixups will continue falling through.
802 for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups();
803 I < E; ++I) {
804 BranchFixup &Fixup = EHStack.getBranchFixup(I);
805 if (!Fixup.Destination) continue;
806 if (!Fixup.OptimisticBranchBlock) {
807 new llvm::StoreInst(Builder.getInt32(Fixup.DestinationIndex),
808 getNormalCleanupDestSlot(),
809 Fixup.InitialBranch);
810 Fixup.InitialBranch->setSuccessor(0, NormalEntry);
811 }
812 Fixup.OptimisticBranchBlock = NormalExit;
813 }
814
815 // V. Set up the fallthrough edge out.
816
817 // Case 1: a fallthrough source exists but doesn't branch to the
818 // cleanup because the cleanup is inactive.
819 if (!HasFallthrough && FallthroughSource) {
820 // Prebranched fallthrough was forwarded earlier.
821 // Non-prebranched fallthrough doesn't need to be forwarded.
822 // Either way, all we need to do is restore the IP we cleared before.
823 assert(!IsActive);
824 Builder.restoreIP(savedInactiveFallthroughIP);
825
826 // Case 2: a fallthrough source exists and should branch to the
827 // cleanup, but we're not supposed to branch through to the next
828 // cleanup.
829 } else if (HasFallthrough && FallthroughDest) {
830 assert(!FallthroughIsBranchThrough);
831 EmitBlock(FallthroughDest);
832
833 // Case 3: a fallthrough source exists and should branch to the
834 // cleanup and then through to the next.
835 } else if (HasFallthrough) {
836 // Everything is already set up for this.
837
838 // Case 4: no fallthrough source exists.
839 } else {
840 Builder.ClearInsertionPoint();
841 }
842
843 // VI. Assorted cleaning.
844
845 // Check whether we can merge NormalEntry into a single predecessor.
846 // This might invalidate (non-IR) pointers to NormalEntry.
847 llvm::BasicBlock *NewNormalEntry =
848 SimplifyCleanupEntry(*this, NormalEntry);
849
850 // If it did invalidate those pointers, and NormalEntry was the same
851 // as NormalExit, go back and patch up the fixups.
852 if (NewNormalEntry != NormalEntry && NormalEntry == NormalExit)
853 for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups();
854 I < E; ++I)
855 EHStack.getBranchFixup(I).OptimisticBranchBlock = NewNormalEntry;
856 }
857 }
858
859 assert(EHStack.hasNormalCleanups() || EHStack.getNumBranchFixups() == 0);
860
861 // Emit the EH cleanup if required.
862 if (RequiresEHCleanup) {
863 if (CGDebugInfo *DI = getDebugInfo())
864 DI->EmitLocation(Builder, CurEHLocation);
865
866 CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
867
868 EmitBlock(EHEntry);
869
870 // We only actually emit the cleanup code if the cleanup is either
871 // active or was used before it was deactivated.
872 if (EHActiveFlag || IsActive) {
873
874 cleanupFlags.setIsForEHCleanup();
875 EmitCleanup(*this, Fn, cleanupFlags, EHActiveFlag);
876 }
877
878 Builder.CreateBr(getEHDispatchBlock(EHParent));
879
880 Builder.restoreIP(SavedIP);
881
882 SimplifyCleanupEntry(*this, EHEntry);
883 }
884 }
885
886 /// isObviouslyBranchWithoutCleanups - Return true if a branch to the
887 /// specified destination obviously has no cleanups to run. 'false' is always
888 /// a conservatively correct answer for this method.
isObviouslyBranchWithoutCleanups(JumpDest Dest) const889 bool CodeGenFunction::isObviouslyBranchWithoutCleanups(JumpDest Dest) const {
890 assert(Dest.getScopeDepth().encloses(EHStack.stable_begin())
891 && "stale jump destination");
892
893 // Calculate the innermost active normal cleanup.
894 EHScopeStack::stable_iterator TopCleanup =
895 EHStack.getInnermostActiveNormalCleanup();
896
897 // If we're not in an active normal cleanup scope, or if the
898 // destination scope is within the innermost active normal cleanup
899 // scope, we don't need to worry about fixups.
900 if (TopCleanup == EHStack.stable_end() ||
901 TopCleanup.encloses(Dest.getScopeDepth())) // works for invalid
902 return true;
903
904 // Otherwise, we might need some cleanups.
905 return false;
906 }
907
908
909 /// Terminate the current block by emitting a branch which might leave
910 /// the current cleanup-protected scope. The target scope may not yet
911 /// be known, in which case this will require a fixup.
912 ///
913 /// As a side-effect, this method clears the insertion point.
EmitBranchThroughCleanup(JumpDest Dest)914 void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) {
915 assert(Dest.getScopeDepth().encloses(EHStack.stable_begin())
916 && "stale jump destination");
917
918 if (!HaveInsertPoint())
919 return;
920
921 // Create the branch.
922 llvm::BranchInst *BI = Builder.CreateBr(Dest.getBlock());
923
924 // Calculate the innermost active normal cleanup.
925 EHScopeStack::stable_iterator
926 TopCleanup = EHStack.getInnermostActiveNormalCleanup();
927
928 // If we're not in an active normal cleanup scope, or if the
929 // destination scope is within the innermost active normal cleanup
930 // scope, we don't need to worry about fixups.
931 if (TopCleanup == EHStack.stable_end() ||
932 TopCleanup.encloses(Dest.getScopeDepth())) { // works for invalid
933 Builder.ClearInsertionPoint();
934 return;
935 }
936
937 // If we can't resolve the destination cleanup scope, just add this
938 // to the current cleanup scope as a branch fixup.
939 if (!Dest.getScopeDepth().isValid()) {
940 BranchFixup &Fixup = EHStack.addBranchFixup();
941 Fixup.Destination = Dest.getBlock();
942 Fixup.DestinationIndex = Dest.getDestIndex();
943 Fixup.InitialBranch = BI;
944 Fixup.OptimisticBranchBlock = 0;
945
946 Builder.ClearInsertionPoint();
947 return;
948 }
949
950 // Otherwise, thread through all the normal cleanups in scope.
951
952 // Store the index at the start.
953 llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex());
954 new llvm::StoreInst(Index, getNormalCleanupDestSlot(), BI);
955
956 // Adjust BI to point to the first cleanup block.
957 {
958 EHCleanupScope &Scope =
959 cast<EHCleanupScope>(*EHStack.find(TopCleanup));
960 BI->setSuccessor(0, CreateNormalEntry(*this, Scope));
961 }
962
963 // Add this destination to all the scopes involved.
964 EHScopeStack::stable_iterator I = TopCleanup;
965 EHScopeStack::stable_iterator E = Dest.getScopeDepth();
966 if (E.strictlyEncloses(I)) {
967 while (true) {
968 EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(I));
969 assert(Scope.isNormalCleanup());
970 I = Scope.getEnclosingNormalCleanup();
971
972 // If this is the last cleanup we're propagating through, tell it
973 // that there's a resolved jump moving through it.
974 if (!E.strictlyEncloses(I)) {
975 Scope.addBranchAfter(Index, Dest.getBlock());
976 break;
977 }
978
979 // Otherwise, tell the scope that there's a jump propoagating
980 // through it. If this isn't new information, all the rest of
981 // the work has been done before.
982 if (!Scope.addBranchThrough(Dest.getBlock()))
983 break;
984 }
985 }
986
987 Builder.ClearInsertionPoint();
988 }
989
IsUsedAsNormalCleanup(EHScopeStack & EHStack,EHScopeStack::stable_iterator C)990 static bool IsUsedAsNormalCleanup(EHScopeStack &EHStack,
991 EHScopeStack::stable_iterator C) {
992 // If we needed a normal block for any reason, that counts.
993 if (cast<EHCleanupScope>(*EHStack.find(C)).getNormalBlock())
994 return true;
995
996 // Check whether any enclosed cleanups were needed.
997 for (EHScopeStack::stable_iterator
998 I = EHStack.getInnermostNormalCleanup();
999 I != C; ) {
1000 assert(C.strictlyEncloses(I));
1001 EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I));
1002 if (S.getNormalBlock()) return true;
1003 I = S.getEnclosingNormalCleanup();
1004 }
1005
1006 return false;
1007 }
1008
IsUsedAsEHCleanup(EHScopeStack & EHStack,EHScopeStack::stable_iterator cleanup)1009 static bool IsUsedAsEHCleanup(EHScopeStack &EHStack,
1010 EHScopeStack::stable_iterator cleanup) {
1011 // If we needed an EH block for any reason, that counts.
1012 if (EHStack.find(cleanup)->hasEHBranches())
1013 return true;
1014
1015 // Check whether any enclosed cleanups were needed.
1016 for (EHScopeStack::stable_iterator
1017 i = EHStack.getInnermostEHScope(); i != cleanup; ) {
1018 assert(cleanup.strictlyEncloses(i));
1019
1020 EHScope &scope = *EHStack.find(i);
1021 if (scope.hasEHBranches())
1022 return true;
1023
1024 i = scope.getEnclosingEHScope();
1025 }
1026
1027 return false;
1028 }
1029
1030 enum ForActivation_t {
1031 ForActivation,
1032 ForDeactivation
1033 };
1034
1035 /// The given cleanup block is changing activation state. Configure a
1036 /// cleanup variable if necessary.
1037 ///
1038 /// It would be good if we had some way of determining if there were
1039 /// extra uses *after* the change-over point.
SetupCleanupBlockActivation(CodeGenFunction & CGF,EHScopeStack::stable_iterator C,ForActivation_t kind,llvm::Instruction * dominatingIP)1040 static void SetupCleanupBlockActivation(CodeGenFunction &CGF,
1041 EHScopeStack::stable_iterator C,
1042 ForActivation_t kind,
1043 llvm::Instruction *dominatingIP) {
1044 EHCleanupScope &Scope = cast<EHCleanupScope>(*CGF.EHStack.find(C));
1045
1046 // We always need the flag if we're activating the cleanup in a
1047 // conditional context, because we have to assume that the current
1048 // location doesn't necessarily dominate the cleanup's code.
1049 bool isActivatedInConditional =
1050 (kind == ForActivation && CGF.isInConditionalBranch());
1051
1052 bool needFlag = false;
1053
1054 // Calculate whether the cleanup was used:
1055
1056 // - as a normal cleanup
1057 if (Scope.isNormalCleanup() &&
1058 (isActivatedInConditional || IsUsedAsNormalCleanup(CGF.EHStack, C))) {
1059 Scope.setTestFlagInNormalCleanup();
1060 needFlag = true;
1061 }
1062
1063 // - as an EH cleanup
1064 if (Scope.isEHCleanup() &&
1065 (isActivatedInConditional || IsUsedAsEHCleanup(CGF.EHStack, C))) {
1066 Scope.setTestFlagInEHCleanup();
1067 needFlag = true;
1068 }
1069
1070 // If it hasn't yet been used as either, we're done.
1071 if (!needFlag) return;
1072
1073 llvm::AllocaInst *var = Scope.getActiveFlag();
1074 if (!var) {
1075 var = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(), "cleanup.isactive");
1076 Scope.setActiveFlag(var);
1077
1078 assert(dominatingIP && "no existing variable and no dominating IP!");
1079
1080 // Initialize to true or false depending on whether it was
1081 // active up to this point.
1082 llvm::Value *value = CGF.Builder.getInt1(kind == ForDeactivation);
1083
1084 // If we're in a conditional block, ignore the dominating IP and
1085 // use the outermost conditional branch.
1086 if (CGF.isInConditionalBranch()) {
1087 CGF.setBeforeOutermostConditional(value, var);
1088 } else {
1089 new llvm::StoreInst(value, var, dominatingIP);
1090 }
1091 }
1092
1093 CGF.Builder.CreateStore(CGF.Builder.getInt1(kind == ForActivation), var);
1094 }
1095
1096 /// Activate a cleanup that was created in an inactivated state.
ActivateCleanupBlock(EHScopeStack::stable_iterator C,llvm::Instruction * dominatingIP)1097 void CodeGenFunction::ActivateCleanupBlock(EHScopeStack::stable_iterator C,
1098 llvm::Instruction *dominatingIP) {
1099 assert(C != EHStack.stable_end() && "activating bottom of stack?");
1100 EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C));
1101 assert(!Scope.isActive() && "double activation");
1102
1103 SetupCleanupBlockActivation(*this, C, ForActivation, dominatingIP);
1104
1105 Scope.setActive(true);
1106 }
1107
1108 /// Deactive a cleanup that was created in an active state.
DeactivateCleanupBlock(EHScopeStack::stable_iterator C,llvm::Instruction * dominatingIP)1109 void CodeGenFunction::DeactivateCleanupBlock(EHScopeStack::stable_iterator C,
1110 llvm::Instruction *dominatingIP) {
1111 assert(C != EHStack.stable_end() && "deactivating bottom of stack?");
1112 EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C));
1113 assert(Scope.isActive() && "double deactivation");
1114
1115 // If it's the top of the stack, just pop it.
1116 if (C == EHStack.stable_begin()) {
1117 // If it's a normal cleanup, we need to pretend that the
1118 // fallthrough is unreachable.
1119 CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
1120 PopCleanupBlock();
1121 Builder.restoreIP(SavedIP);
1122 return;
1123 }
1124
1125 // Otherwise, follow the general case.
1126 SetupCleanupBlockActivation(*this, C, ForDeactivation, dominatingIP);
1127
1128 Scope.setActive(false);
1129 }
1130
getNormalCleanupDestSlot()1131 llvm::Value *CodeGenFunction::getNormalCleanupDestSlot() {
1132 if (!NormalCleanupDest)
1133 NormalCleanupDest =
1134 CreateTempAlloca(Builder.getInt32Ty(), "cleanup.dest.slot");
1135 return NormalCleanupDest;
1136 }
1137
1138 /// Emits all the code to cause the given temporary to be cleaned up.
EmitCXXTemporary(const CXXTemporary * Temporary,QualType TempType,llvm::Value * Ptr)1139 void CodeGenFunction::EmitCXXTemporary(const CXXTemporary *Temporary,
1140 QualType TempType,
1141 llvm::Value *Ptr) {
1142 pushDestroy(NormalAndEHCleanup, Ptr, TempType, destroyCXXObject,
1143 /*useEHCleanup*/ true);
1144 }
1145