1 //===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code to emit Stmt nodes as LLVM code.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "CodeGenFunction.h"
15 #include "CGDebugInfo.h"
16 #include "CodeGenModule.h"
17 #include "TargetInfo.h"
18 #include "clang/AST/StmtVisitor.h"
19 #include "clang/Sema/SemaDiagnostic.h"
20 #include "clang/Basic/PrettyStackTrace.h"
21 #include "clang/Basic/TargetInfo.h"
22 #include "llvm/ADT/StringExtras.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/InlineAsm.h"
25 #include "llvm/IR/Intrinsics.h"
26 #include "llvm/Support/CallSite.h"
27 using namespace clang;
28 using namespace CodeGen;
29
30 //===----------------------------------------------------------------------===//
31 // Statement Emission
32 //===----------------------------------------------------------------------===//
33
EmitStopPoint(const Stmt * S)34 void CodeGenFunction::EmitStopPoint(const Stmt *S) {
35 if (CGDebugInfo *DI = getDebugInfo()) {
36 SourceLocation Loc;
37 Loc = S->getLocStart();
38 DI->EmitLocation(Builder, Loc);
39
40 LastStopPoint = Loc;
41 }
42 }
43
EmitStmt(const Stmt * S)44 void CodeGenFunction::EmitStmt(const Stmt *S) {
45 assert(S && "Null statement?");
46
47 // These statements have their own debug info handling.
48 if (EmitSimpleStmt(S))
49 return;
50
51 // Check if we are generating unreachable code.
52 if (!HaveInsertPoint()) {
53 // If so, and the statement doesn't contain a label, then we do not need to
54 // generate actual code. This is safe because (1) the current point is
55 // unreachable, so we don't need to execute the code, and (2) we've already
56 // handled the statements which update internal data structures (like the
57 // local variable map) which could be used by subsequent statements.
58 if (!ContainsLabel(S)) {
59 // Verify that any decl statements were handled as simple, they may be in
60 // scope of subsequent reachable statements.
61 assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
62 return;
63 }
64
65 // Otherwise, make a new block to hold the code.
66 EnsureInsertPoint();
67 }
68
69 // Generate a stoppoint if we are emitting debug info.
70 EmitStopPoint(S);
71
72 switch (S->getStmtClass()) {
73 case Stmt::NoStmtClass:
74 case Stmt::CXXCatchStmtClass:
75 case Stmt::SEHExceptStmtClass:
76 case Stmt::SEHFinallyStmtClass:
77 case Stmt::MSDependentExistsStmtClass:
78 case Stmt::OMPParallelDirectiveClass:
79 llvm_unreachable("invalid statement class to emit generically");
80 case Stmt::NullStmtClass:
81 case Stmt::CompoundStmtClass:
82 case Stmt::DeclStmtClass:
83 case Stmt::LabelStmtClass:
84 case Stmt::AttributedStmtClass:
85 case Stmt::GotoStmtClass:
86 case Stmt::BreakStmtClass:
87 case Stmt::ContinueStmtClass:
88 case Stmt::DefaultStmtClass:
89 case Stmt::CaseStmtClass:
90 llvm_unreachable("should have emitted these statements as simple");
91
92 #define STMT(Type, Base)
93 #define ABSTRACT_STMT(Op)
94 #define EXPR(Type, Base) \
95 case Stmt::Type##Class:
96 #include "clang/AST/StmtNodes.inc"
97 {
98 // Remember the block we came in on.
99 llvm::BasicBlock *incoming = Builder.GetInsertBlock();
100 assert(incoming && "expression emission must have an insertion point");
101
102 EmitIgnoredExpr(cast<Expr>(S));
103
104 llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
105 assert(outgoing && "expression emission cleared block!");
106
107 // The expression emitters assume (reasonably!) that the insertion
108 // point is always set. To maintain that, the call-emission code
109 // for noreturn functions has to enter a new block with no
110 // predecessors. We want to kill that block and mark the current
111 // insertion point unreachable in the common case of a call like
112 // "exit();". Since expression emission doesn't otherwise create
113 // blocks with no predecessors, we can just test for that.
114 // However, we must be careful not to do this to our incoming
115 // block, because *statement* emission does sometimes create
116 // reachable blocks which will have no predecessors until later in
117 // the function. This occurs with, e.g., labels that are not
118 // reachable by fallthrough.
119 if (incoming != outgoing && outgoing->use_empty()) {
120 outgoing->eraseFromParent();
121 Builder.ClearInsertionPoint();
122 }
123 break;
124 }
125
126 case Stmt::IndirectGotoStmtClass:
127 EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
128
129 case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
130 case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S)); break;
131 case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S)); break;
132 case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S)); break;
133
134 case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
135
136 case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
137 case Stmt::GCCAsmStmtClass: // Intentional fall-through.
138 case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
139 case Stmt::CapturedStmtClass:
140 EmitCapturedStmt(cast<CapturedStmt>(*S), CR_Default);
141 break;
142 case Stmt::ObjCAtTryStmtClass:
143 EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
144 break;
145 case Stmt::ObjCAtCatchStmtClass:
146 llvm_unreachable(
147 "@catch statements should be handled by EmitObjCAtTryStmt");
148 case Stmt::ObjCAtFinallyStmtClass:
149 llvm_unreachable(
150 "@finally statements should be handled by EmitObjCAtTryStmt");
151 case Stmt::ObjCAtThrowStmtClass:
152 EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
153 break;
154 case Stmt::ObjCAtSynchronizedStmtClass:
155 EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
156 break;
157 case Stmt::ObjCForCollectionStmtClass:
158 EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
159 break;
160 case Stmt::ObjCAutoreleasePoolStmtClass:
161 EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
162 break;
163
164 case Stmt::CXXTryStmtClass:
165 EmitCXXTryStmt(cast<CXXTryStmt>(*S));
166 break;
167 case Stmt::CXXForRangeStmtClass:
168 EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S));
169 case Stmt::SEHTryStmtClass:
170 // FIXME Not yet implemented
171 break;
172 }
173 }
174
EmitSimpleStmt(const Stmt * S)175 bool CodeGenFunction::EmitSimpleStmt(const Stmt *S) {
176 switch (S->getStmtClass()) {
177 default: return false;
178 case Stmt::NullStmtClass: break;
179 case Stmt::CompoundStmtClass: EmitCompoundStmt(cast<CompoundStmt>(*S)); break;
180 case Stmt::DeclStmtClass: EmitDeclStmt(cast<DeclStmt>(*S)); break;
181 case Stmt::LabelStmtClass: EmitLabelStmt(cast<LabelStmt>(*S)); break;
182 case Stmt::AttributedStmtClass:
183 EmitAttributedStmt(cast<AttributedStmt>(*S)); break;
184 case Stmt::GotoStmtClass: EmitGotoStmt(cast<GotoStmt>(*S)); break;
185 case Stmt::BreakStmtClass: EmitBreakStmt(cast<BreakStmt>(*S)); break;
186 case Stmt::ContinueStmtClass: EmitContinueStmt(cast<ContinueStmt>(*S)); break;
187 case Stmt::DefaultStmtClass: EmitDefaultStmt(cast<DefaultStmt>(*S)); break;
188 case Stmt::CaseStmtClass: EmitCaseStmt(cast<CaseStmt>(*S)); break;
189 }
190
191 return true;
192 }
193
194 /// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
195 /// this captures the expression result of the last sub-statement and returns it
196 /// (for use by the statement expression extension).
EmitCompoundStmt(const CompoundStmt & S,bool GetLast,AggValueSlot AggSlot)197 llvm::Value* CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
198 AggValueSlot AggSlot) {
199 PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
200 "LLVM IR generation of compound statement ('{}')");
201
202 // Keep track of the current cleanup stack depth, including debug scopes.
203 LexicalScope Scope(*this, S.getSourceRange());
204
205 return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
206 }
207
208 llvm::Value*
EmitCompoundStmtWithoutScope(const CompoundStmt & S,bool GetLast,AggValueSlot AggSlot)209 CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S,
210 bool GetLast,
211 AggValueSlot AggSlot) {
212
213 for (CompoundStmt::const_body_iterator I = S.body_begin(),
214 E = S.body_end()-GetLast; I != E; ++I)
215 EmitStmt(*I);
216
217 llvm::Value *RetAlloca = 0;
218 if (GetLast) {
219 // We have to special case labels here. They are statements, but when put
220 // at the end of a statement expression, they yield the value of their
221 // subexpression. Handle this by walking through all labels we encounter,
222 // emitting them before we evaluate the subexpr.
223 const Stmt *LastStmt = S.body_back();
224 while (const LabelStmt *LS = dyn_cast<LabelStmt>(LastStmt)) {
225 EmitLabel(LS->getDecl());
226 LastStmt = LS->getSubStmt();
227 }
228
229 EnsureInsertPoint();
230
231 QualType ExprTy = cast<Expr>(LastStmt)->getType();
232 if (hasAggregateEvaluationKind(ExprTy)) {
233 EmitAggExpr(cast<Expr>(LastStmt), AggSlot);
234 } else {
235 // We can't return an RValue here because there might be cleanups at
236 // the end of the StmtExpr. Because of that, we have to emit the result
237 // here into a temporary alloca.
238 RetAlloca = CreateMemTemp(ExprTy);
239 EmitAnyExprToMem(cast<Expr>(LastStmt), RetAlloca, Qualifiers(),
240 /*IsInit*/false);
241 }
242
243 }
244
245 return RetAlloca;
246 }
247
SimplifyForwardingBlocks(llvm::BasicBlock * BB)248 void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
249 llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
250
251 // If there is a cleanup stack, then we it isn't worth trying to
252 // simplify this block (we would need to remove it from the scope map
253 // and cleanup entry).
254 if (!EHStack.empty())
255 return;
256
257 // Can only simplify direct branches.
258 if (!BI || !BI->isUnconditional())
259 return;
260
261 // Can only simplify empty blocks.
262 if (BI != BB->begin())
263 return;
264
265 BB->replaceAllUsesWith(BI->getSuccessor(0));
266 BI->eraseFromParent();
267 BB->eraseFromParent();
268 }
269
EmitBlock(llvm::BasicBlock * BB,bool IsFinished)270 void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
271 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
272
273 // Fall out of the current block (if necessary).
274 EmitBranch(BB);
275
276 if (IsFinished && BB->use_empty()) {
277 delete BB;
278 return;
279 }
280
281 // Place the block after the current block, if possible, or else at
282 // the end of the function.
283 if (CurBB && CurBB->getParent())
284 CurFn->getBasicBlockList().insertAfter(CurBB, BB);
285 else
286 CurFn->getBasicBlockList().push_back(BB);
287 Builder.SetInsertPoint(BB);
288 }
289
EmitBranch(llvm::BasicBlock * Target)290 void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
291 // Emit a branch from the current block to the target one if this
292 // was a real block. If this was just a fall-through block after a
293 // terminator, don't emit it.
294 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
295
296 if (!CurBB || CurBB->getTerminator()) {
297 // If there is no insert point or the previous block is already
298 // terminated, don't touch it.
299 } else {
300 // Otherwise, create a fall-through branch.
301 Builder.CreateBr(Target);
302 }
303
304 Builder.ClearInsertionPoint();
305 }
306
EmitBlockAfterUses(llvm::BasicBlock * block)307 void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
308 bool inserted = false;
309 for (llvm::BasicBlock::use_iterator
310 i = block->use_begin(), e = block->use_end(); i != e; ++i) {
311 if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(*i)) {
312 CurFn->getBasicBlockList().insertAfter(insn->getParent(), block);
313 inserted = true;
314 break;
315 }
316 }
317
318 if (!inserted)
319 CurFn->getBasicBlockList().push_back(block);
320
321 Builder.SetInsertPoint(block);
322 }
323
324 CodeGenFunction::JumpDest
getJumpDestForLabel(const LabelDecl * D)325 CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) {
326 JumpDest &Dest = LabelMap[D];
327 if (Dest.isValid()) return Dest;
328
329 // Create, but don't insert, the new block.
330 Dest = JumpDest(createBasicBlock(D->getName()),
331 EHScopeStack::stable_iterator::invalid(),
332 NextCleanupDestIndex++);
333 return Dest;
334 }
335
EmitLabel(const LabelDecl * D)336 void CodeGenFunction::EmitLabel(const LabelDecl *D) {
337 // Add this label to the current lexical scope if we're within any
338 // normal cleanups. Jumps "in" to this label --- when permitted by
339 // the language --- may need to be routed around such cleanups.
340 if (EHStack.hasNormalCleanups() && CurLexicalScope)
341 CurLexicalScope->addLabel(D);
342
343 JumpDest &Dest = LabelMap[D];
344
345 // If we didn't need a forward reference to this label, just go
346 // ahead and create a destination at the current scope.
347 if (!Dest.isValid()) {
348 Dest = getJumpDestInCurrentScope(D->getName());
349
350 // Otherwise, we need to give this label a target depth and remove
351 // it from the branch-fixups list.
352 } else {
353 assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
354 Dest.setScopeDepth(EHStack.stable_begin());
355 ResolveBranchFixups(Dest.getBlock());
356 }
357
358 EmitBlock(Dest.getBlock());
359 }
360
361 /// Change the cleanup scope of the labels in this lexical scope to
362 /// match the scope of the enclosing context.
rescopeLabels()363 void CodeGenFunction::LexicalScope::rescopeLabels() {
364 assert(!Labels.empty());
365 EHScopeStack::stable_iterator innermostScope
366 = CGF.EHStack.getInnermostNormalCleanup();
367
368 // Change the scope depth of all the labels.
369 for (SmallVectorImpl<const LabelDecl*>::const_iterator
370 i = Labels.begin(), e = Labels.end(); i != e; ++i) {
371 assert(CGF.LabelMap.count(*i));
372 JumpDest &dest = CGF.LabelMap.find(*i)->second;
373 assert(dest.getScopeDepth().isValid());
374 assert(innermostScope.encloses(dest.getScopeDepth()));
375 dest.setScopeDepth(innermostScope);
376 }
377
378 // Reparent the labels if the new scope also has cleanups.
379 if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
380 ParentScope->Labels.append(Labels.begin(), Labels.end());
381 }
382 }
383
384
EmitLabelStmt(const LabelStmt & S)385 void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
386 EmitLabel(S.getDecl());
387 EmitStmt(S.getSubStmt());
388 }
389
EmitAttributedStmt(const AttributedStmt & S)390 void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) {
391 EmitStmt(S.getSubStmt());
392 }
393
EmitGotoStmt(const GotoStmt & S)394 void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) {
395 // If this code is reachable then emit a stop point (if generating
396 // debug info). We have to do this ourselves because we are on the
397 // "simple" statement path.
398 if (HaveInsertPoint())
399 EmitStopPoint(&S);
400
401 EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel()));
402 }
403
404
EmitIndirectGotoStmt(const IndirectGotoStmt & S)405 void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
406 if (const LabelDecl *Target = S.getConstantTarget()) {
407 EmitBranchThroughCleanup(getJumpDestForLabel(Target));
408 return;
409 }
410
411 // Ensure that we have an i8* for our PHI node.
412 llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
413 Int8PtrTy, "addr");
414 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
415
416 // Get the basic block for the indirect goto.
417 llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
418
419 // The first instruction in the block has to be the PHI for the switch dest,
420 // add an entry for this branch.
421 cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
422
423 EmitBranch(IndGotoBB);
424 }
425
EmitIfStmt(const IfStmt & S)426 void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
427 // C99 6.8.4.1: The first substatement is executed if the expression compares
428 // unequal to 0. The condition must be a scalar type.
429 LexicalScope ConditionScope(*this, S.getSourceRange());
430
431 if (S.getConditionVariable())
432 EmitAutoVarDecl(*S.getConditionVariable());
433
434 // If the condition constant folds and can be elided, try to avoid emitting
435 // the condition and the dead arm of the if/else.
436 bool CondConstant;
437 if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant)) {
438 // Figure out which block (then or else) is executed.
439 const Stmt *Executed = S.getThen();
440 const Stmt *Skipped = S.getElse();
441 if (!CondConstant) // Condition false?
442 std::swap(Executed, Skipped);
443
444 // If the skipped block has no labels in it, just emit the executed block.
445 // This avoids emitting dead code and simplifies the CFG substantially.
446 if (!ContainsLabel(Skipped)) {
447 if (Executed) {
448 RunCleanupsScope ExecutedScope(*this);
449 EmitStmt(Executed);
450 }
451 return;
452 }
453 }
454
455 // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
456 // the conditional branch.
457 llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
458 llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
459 llvm::BasicBlock *ElseBlock = ContBlock;
460 if (S.getElse())
461 ElseBlock = createBasicBlock("if.else");
462 EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock);
463
464 // Emit the 'then' code.
465 EmitBlock(ThenBlock);
466 {
467 RunCleanupsScope ThenScope(*this);
468 EmitStmt(S.getThen());
469 }
470 EmitBranch(ContBlock);
471
472 // Emit the 'else' code if present.
473 if (const Stmt *Else = S.getElse()) {
474 // There is no need to emit line number for unconditional branch.
475 if (getDebugInfo())
476 Builder.SetCurrentDebugLocation(llvm::DebugLoc());
477 EmitBlock(ElseBlock);
478 {
479 RunCleanupsScope ElseScope(*this);
480 EmitStmt(Else);
481 }
482 // There is no need to emit line number for unconditional branch.
483 if (getDebugInfo())
484 Builder.SetCurrentDebugLocation(llvm::DebugLoc());
485 EmitBranch(ContBlock);
486 }
487
488 // Emit the continuation block for code after the if.
489 EmitBlock(ContBlock, true);
490 }
491
EmitWhileStmt(const WhileStmt & S)492 void CodeGenFunction::EmitWhileStmt(const WhileStmt &S) {
493 // Emit the header for the loop, which will also become
494 // the continue target.
495 JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
496 EmitBlock(LoopHeader.getBlock());
497
498 // Create an exit block for when the condition fails, which will
499 // also become the break target.
500 JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
501
502 // Store the blocks to use for break and continue.
503 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
504
505 // C++ [stmt.while]p2:
506 // When the condition of a while statement is a declaration, the
507 // scope of the variable that is declared extends from its point
508 // of declaration (3.3.2) to the end of the while statement.
509 // [...]
510 // The object created in a condition is destroyed and created
511 // with each iteration of the loop.
512 RunCleanupsScope ConditionScope(*this);
513
514 if (S.getConditionVariable())
515 EmitAutoVarDecl(*S.getConditionVariable());
516
517 // Evaluate the conditional in the while header. C99 6.8.5.1: The
518 // evaluation of the controlling expression takes place before each
519 // execution of the loop body.
520 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
521
522 // while(1) is common, avoid extra exit blocks. Be sure
523 // to correctly handle break/continue though.
524 bool EmitBoolCondBranch = true;
525 if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
526 if (C->isOne())
527 EmitBoolCondBranch = false;
528
529 // As long as the condition is true, go to the loop body.
530 llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
531 if (EmitBoolCondBranch) {
532 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
533 if (ConditionScope.requiresCleanups())
534 ExitBlock = createBasicBlock("while.exit");
535
536 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock);
537
538 if (ExitBlock != LoopExit.getBlock()) {
539 EmitBlock(ExitBlock);
540 EmitBranchThroughCleanup(LoopExit);
541 }
542 }
543
544 // Emit the loop body. We have to emit this in a cleanup scope
545 // because it might be a singleton DeclStmt.
546 {
547 RunCleanupsScope BodyScope(*this);
548 EmitBlock(LoopBody);
549 EmitStmt(S.getBody());
550 }
551
552 BreakContinueStack.pop_back();
553
554 // Immediately force cleanup.
555 ConditionScope.ForceCleanup();
556
557 // Branch to the loop header again.
558 EmitBranch(LoopHeader.getBlock());
559
560 // Emit the exit block.
561 EmitBlock(LoopExit.getBlock(), true);
562
563 // The LoopHeader typically is just a branch if we skipped emitting
564 // a branch, try to erase it.
565 if (!EmitBoolCondBranch)
566 SimplifyForwardingBlocks(LoopHeader.getBlock());
567 }
568
EmitDoStmt(const DoStmt & S)569 void CodeGenFunction::EmitDoStmt(const DoStmt &S) {
570 JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
571 JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
572
573 // Store the blocks to use for break and continue.
574 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
575
576 // Emit the body of the loop.
577 llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
578 EmitBlock(LoopBody);
579 {
580 RunCleanupsScope BodyScope(*this);
581 EmitStmt(S.getBody());
582 }
583
584 BreakContinueStack.pop_back();
585
586 EmitBlock(LoopCond.getBlock());
587
588 // C99 6.8.5.2: "The evaluation of the controlling expression takes place
589 // after each execution of the loop body."
590
591 // Evaluate the conditional in the while header.
592 // C99 6.8.5p2/p4: The first substatement is executed if the expression
593 // compares unequal to 0. The condition must be a scalar type.
594 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
595
596 // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
597 // to correctly handle break/continue though.
598 bool EmitBoolCondBranch = true;
599 if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
600 if (C->isZero())
601 EmitBoolCondBranch = false;
602
603 // As long as the condition is true, iterate the loop.
604 if (EmitBoolCondBranch)
605 Builder.CreateCondBr(BoolCondVal, LoopBody, LoopExit.getBlock());
606
607 // Emit the exit block.
608 EmitBlock(LoopExit.getBlock());
609
610 // The DoCond block typically is just a branch if we skipped
611 // emitting a branch, try to erase it.
612 if (!EmitBoolCondBranch)
613 SimplifyForwardingBlocks(LoopCond.getBlock());
614 }
615
EmitForStmt(const ForStmt & S)616 void CodeGenFunction::EmitForStmt(const ForStmt &S) {
617 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
618
619 RunCleanupsScope ForScope(*this);
620
621 CGDebugInfo *DI = getDebugInfo();
622 if (DI)
623 DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin());
624
625 // Evaluate the first part before the loop.
626 if (S.getInit())
627 EmitStmt(S.getInit());
628
629 // Start the loop with a block that tests the condition.
630 // If there's an increment, the continue scope will be overwritten
631 // later.
632 JumpDest Continue = getJumpDestInCurrentScope("for.cond");
633 llvm::BasicBlock *CondBlock = Continue.getBlock();
634 EmitBlock(CondBlock);
635
636 // Create a cleanup scope for the condition variable cleanups.
637 RunCleanupsScope ConditionScope(*this);
638
639 llvm::Value *BoolCondVal = 0;
640 if (S.getCond()) {
641 // If the for statement has a condition scope, emit the local variable
642 // declaration.
643 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
644 if (S.getConditionVariable()) {
645 EmitAutoVarDecl(*S.getConditionVariable());
646 }
647
648 // If there are any cleanups between here and the loop-exit scope,
649 // create a block to stage a loop exit along.
650 if (ForScope.requiresCleanups())
651 ExitBlock = createBasicBlock("for.cond.cleanup");
652
653 // As long as the condition is true, iterate the loop.
654 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
655
656 // C99 6.8.5p2/p4: The first substatement is executed if the expression
657 // compares unequal to 0. The condition must be a scalar type.
658 BoolCondVal = EvaluateExprAsBool(S.getCond());
659 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock);
660
661 if (ExitBlock != LoopExit.getBlock()) {
662 EmitBlock(ExitBlock);
663 EmitBranchThroughCleanup(LoopExit);
664 }
665
666 EmitBlock(ForBody);
667 } else {
668 // Treat it as a non-zero constant. Don't even create a new block for the
669 // body, just fall into it.
670 }
671
672 // If the for loop doesn't have an increment we can just use the
673 // condition as the continue block. Otherwise we'll need to create
674 // a block for it (in the current scope, i.e. in the scope of the
675 // condition), and that we will become our continue block.
676 if (S.getInc())
677 Continue = getJumpDestInCurrentScope("for.inc");
678
679 // Store the blocks to use for break and continue.
680 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
681
682 {
683 // Create a separate cleanup scope for the body, in case it is not
684 // a compound statement.
685 RunCleanupsScope BodyScope(*this);
686 EmitStmt(S.getBody());
687 }
688
689 // If there is an increment, emit it next.
690 if (S.getInc()) {
691 EmitBlock(Continue.getBlock());
692 EmitStmt(S.getInc());
693 }
694
695 BreakContinueStack.pop_back();
696
697 ConditionScope.ForceCleanup();
698 EmitBranch(CondBlock);
699
700 ForScope.ForceCleanup();
701
702 if (DI)
703 DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd());
704
705 // Emit the fall-through block.
706 EmitBlock(LoopExit.getBlock(), true);
707 }
708
EmitCXXForRangeStmt(const CXXForRangeStmt & S)709 void CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S) {
710 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
711
712 RunCleanupsScope ForScope(*this);
713
714 CGDebugInfo *DI = getDebugInfo();
715 if (DI)
716 DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin());
717
718 // Evaluate the first pieces before the loop.
719 EmitStmt(S.getRangeStmt());
720 EmitStmt(S.getBeginEndStmt());
721
722 // Start the loop with a block that tests the condition.
723 // If there's an increment, the continue scope will be overwritten
724 // later.
725 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
726 EmitBlock(CondBlock);
727
728 // If there are any cleanups between here and the loop-exit scope,
729 // create a block to stage a loop exit along.
730 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
731 if (ForScope.requiresCleanups())
732 ExitBlock = createBasicBlock("for.cond.cleanup");
733
734 // The loop body, consisting of the specified body and the loop variable.
735 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
736
737 // The body is executed if the expression, contextually converted
738 // to bool, is true.
739 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
740 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock);
741
742 if (ExitBlock != LoopExit.getBlock()) {
743 EmitBlock(ExitBlock);
744 EmitBranchThroughCleanup(LoopExit);
745 }
746
747 EmitBlock(ForBody);
748
749 // Create a block for the increment. In case of a 'continue', we jump there.
750 JumpDest Continue = getJumpDestInCurrentScope("for.inc");
751
752 // Store the blocks to use for break and continue.
753 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
754
755 {
756 // Create a separate cleanup scope for the loop variable and body.
757 RunCleanupsScope BodyScope(*this);
758 EmitStmt(S.getLoopVarStmt());
759 EmitStmt(S.getBody());
760 }
761
762 // If there is an increment, emit it next.
763 EmitBlock(Continue.getBlock());
764 EmitStmt(S.getInc());
765
766 BreakContinueStack.pop_back();
767
768 EmitBranch(CondBlock);
769
770 ForScope.ForceCleanup();
771
772 if (DI)
773 DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd());
774
775 // Emit the fall-through block.
776 EmitBlock(LoopExit.getBlock(), true);
777 }
778
EmitReturnOfRValue(RValue RV,QualType Ty)779 void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
780 if (RV.isScalar()) {
781 Builder.CreateStore(RV.getScalarVal(), ReturnValue);
782 } else if (RV.isAggregate()) {
783 EmitAggregateCopy(ReturnValue, RV.getAggregateAddr(), Ty);
784 } else {
785 EmitStoreOfComplex(RV.getComplexVal(),
786 MakeNaturalAlignAddrLValue(ReturnValue, Ty),
787 /*init*/ true);
788 }
789 EmitBranchThroughCleanup(ReturnBlock);
790 }
791
792 /// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
793 /// if the function returns void, or may be missing one if the function returns
794 /// non-void. Fun stuff :).
EmitReturnStmt(const ReturnStmt & S)795 void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
796 // Emit the result value, even if unused, to evalute the side effects.
797 const Expr *RV = S.getRetValue();
798
799 // Treat block literals in a return expression as if they appeared
800 // in their own scope. This permits a small, easily-implemented
801 // exception to our over-conservative rules about not jumping to
802 // statements following block literals with non-trivial cleanups.
803 RunCleanupsScope cleanupScope(*this);
804 if (const ExprWithCleanups *cleanups =
805 dyn_cast_or_null<ExprWithCleanups>(RV)) {
806 enterFullExpression(cleanups);
807 RV = cleanups->getSubExpr();
808 }
809
810 // FIXME: Clean this up by using an LValue for ReturnTemp,
811 // EmitStoreThroughLValue, and EmitAnyExpr.
812 if (S.getNRVOCandidate() && S.getNRVOCandidate()->isNRVOVariable()) {
813 // Apply the named return value optimization for this return statement,
814 // which means doing nothing: the appropriate result has already been
815 // constructed into the NRVO variable.
816
817 // If there is an NRVO flag for this variable, set it to 1 into indicate
818 // that the cleanup code should not destroy the variable.
819 if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
820 Builder.CreateStore(Builder.getTrue(), NRVOFlag);
821 } else if (!ReturnValue) {
822 // Make sure not to return anything, but evaluate the expression
823 // for side effects.
824 if (RV)
825 EmitAnyExpr(RV);
826 } else if (RV == 0) {
827 // Do nothing (return value is left uninitialized)
828 } else if (FnRetTy->isReferenceType()) {
829 // If this function returns a reference, take the address of the expression
830 // rather than the value.
831 RValue Result = EmitReferenceBindingToExpr(RV);
832 Builder.CreateStore(Result.getScalarVal(), ReturnValue);
833 } else {
834 switch (getEvaluationKind(RV->getType())) {
835 case TEK_Scalar:
836 Builder.CreateStore(EmitScalarExpr(RV), ReturnValue);
837 break;
838 case TEK_Complex:
839 EmitComplexExprIntoLValue(RV,
840 MakeNaturalAlignAddrLValue(ReturnValue, RV->getType()),
841 /*isInit*/ true);
842 break;
843 case TEK_Aggregate: {
844 CharUnits Alignment = getContext().getTypeAlignInChars(RV->getType());
845 EmitAggExpr(RV, AggValueSlot::forAddr(ReturnValue, Alignment,
846 Qualifiers(),
847 AggValueSlot::IsDestructed,
848 AggValueSlot::DoesNotNeedGCBarriers,
849 AggValueSlot::IsNotAliased));
850 break;
851 }
852 }
853 }
854
855 ++NumReturnExprs;
856 if (RV == 0 || RV->isEvaluatable(getContext()))
857 ++NumSimpleReturnExprs;
858
859 cleanupScope.ForceCleanup();
860 EmitBranchThroughCleanup(ReturnBlock);
861 }
862
EmitDeclStmt(const DeclStmt & S)863 void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) {
864 // As long as debug info is modeled with instructions, we have to ensure we
865 // have a place to insert here and write the stop point here.
866 if (HaveInsertPoint())
867 EmitStopPoint(&S);
868
869 for (DeclStmt::const_decl_iterator I = S.decl_begin(), E = S.decl_end();
870 I != E; ++I)
871 EmitDecl(**I);
872 }
873
EmitBreakStmt(const BreakStmt & S)874 void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) {
875 assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
876
877 // If this code is reachable then emit a stop point (if generating
878 // debug info). We have to do this ourselves because we are on the
879 // "simple" statement path.
880 if (HaveInsertPoint())
881 EmitStopPoint(&S);
882
883 JumpDest Block = BreakContinueStack.back().BreakBlock;
884 EmitBranchThroughCleanup(Block);
885 }
886
EmitContinueStmt(const ContinueStmt & S)887 void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) {
888 assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
889
890 // If this code is reachable then emit a stop point (if generating
891 // debug info). We have to do this ourselves because we are on the
892 // "simple" statement path.
893 if (HaveInsertPoint())
894 EmitStopPoint(&S);
895
896 JumpDest Block = BreakContinueStack.back().ContinueBlock;
897 EmitBranchThroughCleanup(Block);
898 }
899
900 /// EmitCaseStmtRange - If case statement range is not too big then
901 /// add multiple cases to switch instruction, one for each value within
902 /// the range. If range is too big then emit "if" condition check.
EmitCaseStmtRange(const CaseStmt & S)903 void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) {
904 assert(S.getRHS() && "Expected RHS value in CaseStmt");
905
906 llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
907 llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
908
909 // Emit the code for this case. We do this first to make sure it is
910 // properly chained from our predecessor before generating the
911 // switch machinery to enter this block.
912 EmitBlock(createBasicBlock("sw.bb"));
913 llvm::BasicBlock *CaseDest = Builder.GetInsertBlock();
914 EmitStmt(S.getSubStmt());
915
916 // If range is empty, do nothing.
917 if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
918 return;
919
920 llvm::APInt Range = RHS - LHS;
921 // FIXME: parameters such as this should not be hardcoded.
922 if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
923 // Range is small enough to add multiple switch instruction cases.
924 for (unsigned i = 0, e = Range.getZExtValue() + 1; i != e; ++i) {
925 SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
926 LHS++;
927 }
928 return;
929 }
930
931 // The range is too big. Emit "if" condition into a new block,
932 // making sure to save and restore the current insertion point.
933 llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
934
935 // Push this test onto the chain of range checks (which terminates
936 // in the default basic block). The switch's default will be changed
937 // to the top of this chain after switch emission is complete.
938 llvm::BasicBlock *FalseDest = CaseRangeBlock;
939 CaseRangeBlock = createBasicBlock("sw.caserange");
940
941 CurFn->getBasicBlockList().push_back(CaseRangeBlock);
942 Builder.SetInsertPoint(CaseRangeBlock);
943
944 // Emit range check.
945 llvm::Value *Diff =
946 Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
947 llvm::Value *Cond =
948 Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
949 Builder.CreateCondBr(Cond, CaseDest, FalseDest);
950
951 // Restore the appropriate insertion point.
952 if (RestoreBB)
953 Builder.SetInsertPoint(RestoreBB);
954 else
955 Builder.ClearInsertionPoint();
956 }
957
EmitCaseStmt(const CaseStmt & S)958 void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
959 // If there is no enclosing switch instance that we're aware of, then this
960 // case statement and its block can be elided. This situation only happens
961 // when we've constant-folded the switch, are emitting the constant case,
962 // and part of the constant case includes another case statement. For
963 // instance: switch (4) { case 4: do { case 5: } while (1); }
964 if (!SwitchInsn) {
965 EmitStmt(S.getSubStmt());
966 return;
967 }
968
969 // Handle case ranges.
970 if (S.getRHS()) {
971 EmitCaseStmtRange(S);
972 return;
973 }
974
975 llvm::ConstantInt *CaseVal =
976 Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
977
978 // If the body of the case is just a 'break', and if there was no fallthrough,
979 // try to not emit an empty block.
980 if ((CGM.getCodeGenOpts().OptimizationLevel > 0) &&
981 isa<BreakStmt>(S.getSubStmt())) {
982 JumpDest Block = BreakContinueStack.back().BreakBlock;
983
984 // Only do this optimization if there are no cleanups that need emitting.
985 if (isObviouslyBranchWithoutCleanups(Block)) {
986 SwitchInsn->addCase(CaseVal, Block.getBlock());
987
988 // If there was a fallthrough into this case, make sure to redirect it to
989 // the end of the switch as well.
990 if (Builder.GetInsertBlock()) {
991 Builder.CreateBr(Block.getBlock());
992 Builder.ClearInsertionPoint();
993 }
994 return;
995 }
996 }
997
998 EmitBlock(createBasicBlock("sw.bb"));
999 llvm::BasicBlock *CaseDest = Builder.GetInsertBlock();
1000 SwitchInsn->addCase(CaseVal, CaseDest);
1001
1002 // Recursively emitting the statement is acceptable, but is not wonderful for
1003 // code where we have many case statements nested together, i.e.:
1004 // case 1:
1005 // case 2:
1006 // case 3: etc.
1007 // Handling this recursively will create a new block for each case statement
1008 // that falls through to the next case which is IR intensive. It also causes
1009 // deep recursion which can run into stack depth limitations. Handle
1010 // sequential non-range case statements specially.
1011 const CaseStmt *CurCase = &S;
1012 const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1013
1014 // Otherwise, iteratively add consecutive cases to this switch stmt.
1015 while (NextCase && NextCase->getRHS() == 0) {
1016 CurCase = NextCase;
1017 llvm::ConstantInt *CaseVal =
1018 Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
1019 SwitchInsn->addCase(CaseVal, CaseDest);
1020 NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1021 }
1022
1023 // Normal default recursion for non-cases.
1024 EmitStmt(CurCase->getSubStmt());
1025 }
1026
EmitDefaultStmt(const DefaultStmt & S)1027 void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S) {
1028 llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1029 assert(DefaultBlock->empty() &&
1030 "EmitDefaultStmt: Default block already defined?");
1031 EmitBlock(DefaultBlock);
1032 EmitStmt(S.getSubStmt());
1033 }
1034
1035 /// CollectStatementsForCase - Given the body of a 'switch' statement and a
1036 /// constant value that is being switched on, see if we can dead code eliminate
1037 /// the body of the switch to a simple series of statements to emit. Basically,
1038 /// on a switch (5) we want to find these statements:
1039 /// case 5:
1040 /// printf(...); <--
1041 /// ++i; <--
1042 /// break;
1043 ///
1044 /// and add them to the ResultStmts vector. If it is unsafe to do this
1045 /// transformation (for example, one of the elided statements contains a label
1046 /// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
1047 /// should include statements after it (e.g. the printf() line is a substmt of
1048 /// the case) then return CSFC_FallThrough. If we handled it and found a break
1049 /// statement, then return CSFC_Success.
1050 ///
1051 /// If Case is non-null, then we are looking for the specified case, checking
1052 /// that nothing we jump over contains labels. If Case is null, then we found
1053 /// the case and are looking for the break.
1054 ///
1055 /// If the recursive walk actually finds our Case, then we set FoundCase to
1056 /// true.
1057 ///
1058 enum CSFC_Result { CSFC_Failure, CSFC_FallThrough, CSFC_Success };
CollectStatementsForCase(const Stmt * S,const SwitchCase * Case,bool & FoundCase,SmallVectorImpl<const Stmt * > & ResultStmts)1059 static CSFC_Result CollectStatementsForCase(const Stmt *S,
1060 const SwitchCase *Case,
1061 bool &FoundCase,
1062 SmallVectorImpl<const Stmt*> &ResultStmts) {
1063 // If this is a null statement, just succeed.
1064 if (S == 0)
1065 return Case ? CSFC_Success : CSFC_FallThrough;
1066
1067 // If this is the switchcase (case 4: or default) that we're looking for, then
1068 // we're in business. Just add the substatement.
1069 if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
1070 if (S == Case) {
1071 FoundCase = true;
1072 return CollectStatementsForCase(SC->getSubStmt(), 0, FoundCase,
1073 ResultStmts);
1074 }
1075
1076 // Otherwise, this is some other case or default statement, just ignore it.
1077 return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
1078 ResultStmts);
1079 }
1080
1081 // If we are in the live part of the code and we found our break statement,
1082 // return a success!
1083 if (Case == 0 && isa<BreakStmt>(S))
1084 return CSFC_Success;
1085
1086 // If this is a switch statement, then it might contain the SwitchCase, the
1087 // break, or neither.
1088 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
1089 // Handle this as two cases: we might be looking for the SwitchCase (if so
1090 // the skipped statements must be skippable) or we might already have it.
1091 CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
1092 if (Case) {
1093 // Keep track of whether we see a skipped declaration. The code could be
1094 // using the declaration even if it is skipped, so we can't optimize out
1095 // the decl if the kept statements might refer to it.
1096 bool HadSkippedDecl = false;
1097
1098 // If we're looking for the case, just see if we can skip each of the
1099 // substatements.
1100 for (; Case && I != E; ++I) {
1101 HadSkippedDecl |= isa<DeclStmt>(*I);
1102
1103 switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
1104 case CSFC_Failure: return CSFC_Failure;
1105 case CSFC_Success:
1106 // A successful result means that either 1) that the statement doesn't
1107 // have the case and is skippable, or 2) does contain the case value
1108 // and also contains the break to exit the switch. In the later case,
1109 // we just verify the rest of the statements are elidable.
1110 if (FoundCase) {
1111 // If we found the case and skipped declarations, we can't do the
1112 // optimization.
1113 if (HadSkippedDecl)
1114 return CSFC_Failure;
1115
1116 for (++I; I != E; ++I)
1117 if (CodeGenFunction::ContainsLabel(*I, true))
1118 return CSFC_Failure;
1119 return CSFC_Success;
1120 }
1121 break;
1122 case CSFC_FallThrough:
1123 // If we have a fallthrough condition, then we must have found the
1124 // case started to include statements. Consider the rest of the
1125 // statements in the compound statement as candidates for inclusion.
1126 assert(FoundCase && "Didn't find case but returned fallthrough?");
1127 // We recursively found Case, so we're not looking for it anymore.
1128 Case = 0;
1129
1130 // If we found the case and skipped declarations, we can't do the
1131 // optimization.
1132 if (HadSkippedDecl)
1133 return CSFC_Failure;
1134 break;
1135 }
1136 }
1137 }
1138
1139 // If we have statements in our range, then we know that the statements are
1140 // live and need to be added to the set of statements we're tracking.
1141 for (; I != E; ++I) {
1142 switch (CollectStatementsForCase(*I, 0, FoundCase, ResultStmts)) {
1143 case CSFC_Failure: return CSFC_Failure;
1144 case CSFC_FallThrough:
1145 // A fallthrough result means that the statement was simple and just
1146 // included in ResultStmt, keep adding them afterwards.
1147 break;
1148 case CSFC_Success:
1149 // A successful result means that we found the break statement and
1150 // stopped statement inclusion. We just ensure that any leftover stmts
1151 // are skippable and return success ourselves.
1152 for (++I; I != E; ++I)
1153 if (CodeGenFunction::ContainsLabel(*I, true))
1154 return CSFC_Failure;
1155 return CSFC_Success;
1156 }
1157 }
1158
1159 return Case ? CSFC_Success : CSFC_FallThrough;
1160 }
1161
1162 // Okay, this is some other statement that we don't handle explicitly, like a
1163 // for statement or increment etc. If we are skipping over this statement,
1164 // just verify it doesn't have labels, which would make it invalid to elide.
1165 if (Case) {
1166 if (CodeGenFunction::ContainsLabel(S, true))
1167 return CSFC_Failure;
1168 return CSFC_Success;
1169 }
1170
1171 // Otherwise, we want to include this statement. Everything is cool with that
1172 // so long as it doesn't contain a break out of the switch we're in.
1173 if (CodeGenFunction::containsBreak(S)) return CSFC_Failure;
1174
1175 // Otherwise, everything is great. Include the statement and tell the caller
1176 // that we fall through and include the next statement as well.
1177 ResultStmts.push_back(S);
1178 return CSFC_FallThrough;
1179 }
1180
1181 /// FindCaseStatementsForValue - Find the case statement being jumped to and
1182 /// then invoke CollectStatementsForCase to find the list of statements to emit
1183 /// for a switch on constant. See the comment above CollectStatementsForCase
1184 /// for more details.
FindCaseStatementsForValue(const SwitchStmt & S,const llvm::APSInt & ConstantCondValue,SmallVectorImpl<const Stmt * > & ResultStmts,ASTContext & C)1185 static bool FindCaseStatementsForValue(const SwitchStmt &S,
1186 const llvm::APSInt &ConstantCondValue,
1187 SmallVectorImpl<const Stmt*> &ResultStmts,
1188 ASTContext &C) {
1189 // First step, find the switch case that is being branched to. We can do this
1190 // efficiently by scanning the SwitchCase list.
1191 const SwitchCase *Case = S.getSwitchCaseList();
1192 const DefaultStmt *DefaultCase = 0;
1193
1194 for (; Case; Case = Case->getNextSwitchCase()) {
1195 // It's either a default or case. Just remember the default statement in
1196 // case we're not jumping to any numbered cases.
1197 if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
1198 DefaultCase = DS;
1199 continue;
1200 }
1201
1202 // Check to see if this case is the one we're looking for.
1203 const CaseStmt *CS = cast<CaseStmt>(Case);
1204 // Don't handle case ranges yet.
1205 if (CS->getRHS()) return false;
1206
1207 // If we found our case, remember it as 'case'.
1208 if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
1209 break;
1210 }
1211
1212 // If we didn't find a matching case, we use a default if it exists, or we
1213 // elide the whole switch body!
1214 if (Case == 0) {
1215 // It is safe to elide the body of the switch if it doesn't contain labels
1216 // etc. If it is safe, return successfully with an empty ResultStmts list.
1217 if (DefaultCase == 0)
1218 return !CodeGenFunction::ContainsLabel(&S);
1219 Case = DefaultCase;
1220 }
1221
1222 // Ok, we know which case is being jumped to, try to collect all the
1223 // statements that follow it. This can fail for a variety of reasons. Also,
1224 // check to see that the recursive walk actually found our case statement.
1225 // Insane cases like this can fail to find it in the recursive walk since we
1226 // don't handle every stmt kind:
1227 // switch (4) {
1228 // while (1) {
1229 // case 4: ...
1230 bool FoundCase = false;
1231 return CollectStatementsForCase(S.getBody(), Case, FoundCase,
1232 ResultStmts) != CSFC_Failure &&
1233 FoundCase;
1234 }
1235
EmitSwitchStmt(const SwitchStmt & S)1236 void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
1237 JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
1238
1239 RunCleanupsScope ConditionScope(*this);
1240
1241 if (S.getConditionVariable())
1242 EmitAutoVarDecl(*S.getConditionVariable());
1243
1244 // Handle nested switch statements.
1245 llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
1246 llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
1247
1248 // See if we can constant fold the condition of the switch and therefore only
1249 // emit the live case statement (if any) of the switch.
1250 llvm::APSInt ConstantCondValue;
1251 if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
1252 SmallVector<const Stmt*, 4> CaseStmts;
1253 if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
1254 getContext())) {
1255 RunCleanupsScope ExecutedScope(*this);
1256
1257 // At this point, we are no longer "within" a switch instance, so
1258 // we can temporarily enforce this to ensure that any embedded case
1259 // statements are not emitted.
1260 SwitchInsn = 0;
1261
1262 // Okay, we can dead code eliminate everything except this case. Emit the
1263 // specified series of statements and we're good.
1264 for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
1265 EmitStmt(CaseStmts[i]);
1266
1267 // Now we want to restore the saved switch instance so that nested
1268 // switches continue to function properly
1269 SwitchInsn = SavedSwitchInsn;
1270
1271 return;
1272 }
1273 }
1274
1275 llvm::Value *CondV = EmitScalarExpr(S.getCond());
1276
1277 // Create basic block to hold stuff that comes after switch
1278 // statement. We also need to create a default block now so that
1279 // explicit case ranges tests can have a place to jump to on
1280 // failure.
1281 llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
1282 SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
1283 CaseRangeBlock = DefaultBlock;
1284
1285 // Clear the insertion point to indicate we are in unreachable code.
1286 Builder.ClearInsertionPoint();
1287
1288 // All break statements jump to NextBlock. If BreakContinueStack is non empty
1289 // then reuse last ContinueBlock.
1290 JumpDest OuterContinue;
1291 if (!BreakContinueStack.empty())
1292 OuterContinue = BreakContinueStack.back().ContinueBlock;
1293
1294 BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
1295
1296 // Emit switch body.
1297 EmitStmt(S.getBody());
1298
1299 BreakContinueStack.pop_back();
1300
1301 // Update the default block in case explicit case range tests have
1302 // been chained on top.
1303 SwitchInsn->setDefaultDest(CaseRangeBlock);
1304
1305 // If a default was never emitted:
1306 if (!DefaultBlock->getParent()) {
1307 // If we have cleanups, emit the default block so that there's a
1308 // place to jump through the cleanups from.
1309 if (ConditionScope.requiresCleanups()) {
1310 EmitBlock(DefaultBlock);
1311
1312 // Otherwise, just forward the default block to the switch end.
1313 } else {
1314 DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
1315 delete DefaultBlock;
1316 }
1317 }
1318
1319 ConditionScope.ForceCleanup();
1320
1321 // Emit continuation.
1322 EmitBlock(SwitchExit.getBlock(), true);
1323
1324 SwitchInsn = SavedSwitchInsn;
1325 CaseRangeBlock = SavedCRBlock;
1326 }
1327
1328 static std::string
SimplifyConstraint(const char * Constraint,const TargetInfo & Target,SmallVectorImpl<TargetInfo::ConstraintInfo> * OutCons=0)1329 SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
1330 SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=0) {
1331 std::string Result;
1332
1333 while (*Constraint) {
1334 switch (*Constraint) {
1335 default:
1336 Result += Target.convertConstraint(Constraint);
1337 break;
1338 // Ignore these
1339 case '*':
1340 case '?':
1341 case '!':
1342 case '=': // Will see this and the following in mult-alt constraints.
1343 case '+':
1344 break;
1345 case '#': // Ignore the rest of the constraint alternative.
1346 while (Constraint[1] && Constraint[1] != ',')
1347 Constraint++;
1348 break;
1349 case ',':
1350 Result += "|";
1351 break;
1352 case 'g':
1353 Result += "imr";
1354 break;
1355 case '[': {
1356 assert(OutCons &&
1357 "Must pass output names to constraints with a symbolic name");
1358 unsigned Index;
1359 bool result = Target.resolveSymbolicName(Constraint,
1360 &(*OutCons)[0],
1361 OutCons->size(), Index);
1362 assert(result && "Could not resolve symbolic name"); (void)result;
1363 Result += llvm::utostr(Index);
1364 break;
1365 }
1366 }
1367
1368 Constraint++;
1369 }
1370
1371 return Result;
1372 }
1373
1374 /// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
1375 /// as using a particular register add that as a constraint that will be used
1376 /// in this asm stmt.
1377 static std::string
AddVariableConstraints(const std::string & Constraint,const Expr & AsmExpr,const TargetInfo & Target,CodeGenModule & CGM,const AsmStmt & Stmt)1378 AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
1379 const TargetInfo &Target, CodeGenModule &CGM,
1380 const AsmStmt &Stmt) {
1381 const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
1382 if (!AsmDeclRef)
1383 return Constraint;
1384 const ValueDecl &Value = *AsmDeclRef->getDecl();
1385 const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
1386 if (!Variable)
1387 return Constraint;
1388 if (Variable->getStorageClass() != SC_Register)
1389 return Constraint;
1390 AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
1391 if (!Attr)
1392 return Constraint;
1393 StringRef Register = Attr->getLabel();
1394 assert(Target.isValidGCCRegisterName(Register));
1395 // We're using validateOutputConstraint here because we only care if
1396 // this is a register constraint.
1397 TargetInfo::ConstraintInfo Info(Constraint, "");
1398 if (Target.validateOutputConstraint(Info) &&
1399 !Info.allowsRegister()) {
1400 CGM.ErrorUnsupported(&Stmt, "__asm__");
1401 return Constraint;
1402 }
1403 // Canonicalize the register here before returning it.
1404 Register = Target.getNormalizedGCCRegisterName(Register);
1405 return "{" + Register.str() + "}";
1406 }
1407
1408 llvm::Value*
EmitAsmInputLValue(const TargetInfo::ConstraintInfo & Info,LValue InputValue,QualType InputType,std::string & ConstraintStr)1409 CodeGenFunction::EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
1410 LValue InputValue, QualType InputType,
1411 std::string &ConstraintStr) {
1412 llvm::Value *Arg;
1413 if (Info.allowsRegister() || !Info.allowsMemory()) {
1414 if (CodeGenFunction::hasScalarEvaluationKind(InputType)) {
1415 Arg = EmitLoadOfLValue(InputValue).getScalarVal();
1416 } else {
1417 llvm::Type *Ty = ConvertType(InputType);
1418 uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
1419 if (Size <= 64 && llvm::isPowerOf2_64(Size)) {
1420 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
1421 Ty = llvm::PointerType::getUnqual(Ty);
1422
1423 Arg = Builder.CreateLoad(Builder.CreateBitCast(InputValue.getAddress(),
1424 Ty));
1425 } else {
1426 Arg = InputValue.getAddress();
1427 ConstraintStr += '*';
1428 }
1429 }
1430 } else {
1431 Arg = InputValue.getAddress();
1432 ConstraintStr += '*';
1433 }
1434
1435 return Arg;
1436 }
1437
EmitAsmInput(const TargetInfo::ConstraintInfo & Info,const Expr * InputExpr,std::string & ConstraintStr)1438 llvm::Value* CodeGenFunction::EmitAsmInput(
1439 const TargetInfo::ConstraintInfo &Info,
1440 const Expr *InputExpr,
1441 std::string &ConstraintStr) {
1442 if (Info.allowsRegister() || !Info.allowsMemory())
1443 if (CodeGenFunction::hasScalarEvaluationKind(InputExpr->getType()))
1444 return EmitScalarExpr(InputExpr);
1445
1446 InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
1447 LValue Dest = EmitLValue(InputExpr);
1448 return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr);
1449 }
1450
1451 /// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
1452 /// asm call instruction. The !srcloc MDNode contains a list of constant
1453 /// integers which are the source locations of the start of each line in the
1454 /// asm.
getAsmSrcLocInfo(const StringLiteral * Str,CodeGenFunction & CGF)1455 static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
1456 CodeGenFunction &CGF) {
1457 SmallVector<llvm::Value *, 8> Locs;
1458 // Add the location of the first line to the MDNode.
1459 Locs.push_back(llvm::ConstantInt::get(CGF.Int32Ty,
1460 Str->getLocStart().getRawEncoding()));
1461 StringRef StrVal = Str->getString();
1462 if (!StrVal.empty()) {
1463 const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
1464 const LangOptions &LangOpts = CGF.CGM.getLangOpts();
1465
1466 // Add the location of the start of each subsequent line of the asm to the
1467 // MDNode.
1468 for (unsigned i = 0, e = StrVal.size()-1; i != e; ++i) {
1469 if (StrVal[i] != '\n') continue;
1470 SourceLocation LineLoc = Str->getLocationOfByte(i+1, SM, LangOpts,
1471 CGF.getTarget());
1472 Locs.push_back(llvm::ConstantInt::get(CGF.Int32Ty,
1473 LineLoc.getRawEncoding()));
1474 }
1475 }
1476
1477 return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
1478 }
1479
EmitAsmStmt(const AsmStmt & S)1480 void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
1481 // Assemble the final asm string.
1482 std::string AsmString = S.generateAsmString(getContext());
1483
1484 // Get all the output and input constraints together.
1485 SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
1486 SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
1487
1488 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
1489 StringRef Name;
1490 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
1491 Name = GAS->getOutputName(i);
1492 TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
1493 bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
1494 assert(IsValid && "Failed to parse output constraint");
1495 OutputConstraintInfos.push_back(Info);
1496 }
1497
1498 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
1499 StringRef Name;
1500 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
1501 Name = GAS->getInputName(i);
1502 TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
1503 bool IsValid =
1504 getTarget().validateInputConstraint(OutputConstraintInfos.data(),
1505 S.getNumOutputs(), Info);
1506 assert(IsValid && "Failed to parse input constraint"); (void)IsValid;
1507 InputConstraintInfos.push_back(Info);
1508 }
1509
1510 std::string Constraints;
1511
1512 std::vector<LValue> ResultRegDests;
1513 std::vector<QualType> ResultRegQualTys;
1514 std::vector<llvm::Type *> ResultRegTypes;
1515 std::vector<llvm::Type *> ResultTruncRegTypes;
1516 std::vector<llvm::Type *> ArgTypes;
1517 std::vector<llvm::Value*> Args;
1518
1519 // Keep track of inout constraints.
1520 std::string InOutConstraints;
1521 std::vector<llvm::Value*> InOutArgs;
1522 std::vector<llvm::Type*> InOutArgTypes;
1523
1524 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
1525 TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
1526
1527 // Simplify the output constraint.
1528 std::string OutputConstraint(S.getOutputConstraint(i));
1529 OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
1530 getTarget());
1531
1532 const Expr *OutExpr = S.getOutputExpr(i);
1533 OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
1534
1535 OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
1536 getTarget(), CGM, S);
1537
1538 LValue Dest = EmitLValue(OutExpr);
1539 if (!Constraints.empty())
1540 Constraints += ',';
1541
1542 // If this is a register output, then make the inline asm return it
1543 // by-value. If this is a memory result, return the value by-reference.
1544 if (!Info.allowsMemory() && hasScalarEvaluationKind(OutExpr->getType())) {
1545 Constraints += "=" + OutputConstraint;
1546 ResultRegQualTys.push_back(OutExpr->getType());
1547 ResultRegDests.push_back(Dest);
1548 ResultRegTypes.push_back(ConvertTypeForMem(OutExpr->getType()));
1549 ResultTruncRegTypes.push_back(ResultRegTypes.back());
1550
1551 // If this output is tied to an input, and if the input is larger, then
1552 // we need to set the actual result type of the inline asm node to be the
1553 // same as the input type.
1554 if (Info.hasMatchingInput()) {
1555 unsigned InputNo;
1556 for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
1557 TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
1558 if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
1559 break;
1560 }
1561 assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
1562
1563 QualType InputTy = S.getInputExpr(InputNo)->getType();
1564 QualType OutputType = OutExpr->getType();
1565
1566 uint64_t InputSize = getContext().getTypeSize(InputTy);
1567 if (getContext().getTypeSize(OutputType) < InputSize) {
1568 // Form the asm to return the value as a larger integer or fp type.
1569 ResultRegTypes.back() = ConvertType(InputTy);
1570 }
1571 }
1572 if (llvm::Type* AdjTy =
1573 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
1574 ResultRegTypes.back()))
1575 ResultRegTypes.back() = AdjTy;
1576 else {
1577 CGM.getDiags().Report(S.getAsmLoc(),
1578 diag::err_asm_invalid_type_in_input)
1579 << OutExpr->getType() << OutputConstraint;
1580 }
1581 } else {
1582 ArgTypes.push_back(Dest.getAddress()->getType());
1583 Args.push_back(Dest.getAddress());
1584 Constraints += "=*";
1585 Constraints += OutputConstraint;
1586 }
1587
1588 if (Info.isReadWrite()) {
1589 InOutConstraints += ',';
1590
1591 const Expr *InputExpr = S.getOutputExpr(i);
1592 llvm::Value *Arg = EmitAsmInputLValue(Info, Dest, InputExpr->getType(),
1593 InOutConstraints);
1594
1595 if (llvm::Type* AdjTy =
1596 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
1597 Arg->getType()))
1598 Arg = Builder.CreateBitCast(Arg, AdjTy);
1599
1600 if (Info.allowsRegister())
1601 InOutConstraints += llvm::utostr(i);
1602 else
1603 InOutConstraints += OutputConstraint;
1604
1605 InOutArgTypes.push_back(Arg->getType());
1606 InOutArgs.push_back(Arg);
1607 }
1608 }
1609
1610 unsigned NumConstraints = S.getNumOutputs() + S.getNumInputs();
1611
1612 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
1613 const Expr *InputExpr = S.getInputExpr(i);
1614
1615 TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
1616
1617 if (!Constraints.empty())
1618 Constraints += ',';
1619
1620 // Simplify the input constraint.
1621 std::string InputConstraint(S.getInputConstraint(i));
1622 InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(),
1623 &OutputConstraintInfos);
1624
1625 InputConstraint =
1626 AddVariableConstraints(InputConstraint,
1627 *InputExpr->IgnoreParenNoopCasts(getContext()),
1628 getTarget(), CGM, S);
1629
1630 llvm::Value *Arg = EmitAsmInput(Info, InputExpr, Constraints);
1631
1632 // If this input argument is tied to a larger output result, extend the
1633 // input to be the same size as the output. The LLVM backend wants to see
1634 // the input and output of a matching constraint be the same size. Note
1635 // that GCC does not define what the top bits are here. We use zext because
1636 // that is usually cheaper, but LLVM IR should really get an anyext someday.
1637 if (Info.hasTiedOperand()) {
1638 unsigned Output = Info.getTiedOperand();
1639 QualType OutputType = S.getOutputExpr(Output)->getType();
1640 QualType InputTy = InputExpr->getType();
1641
1642 if (getContext().getTypeSize(OutputType) >
1643 getContext().getTypeSize(InputTy)) {
1644 // Use ptrtoint as appropriate so that we can do our extension.
1645 if (isa<llvm::PointerType>(Arg->getType()))
1646 Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
1647 llvm::Type *OutputTy = ConvertType(OutputType);
1648 if (isa<llvm::IntegerType>(OutputTy))
1649 Arg = Builder.CreateZExt(Arg, OutputTy);
1650 else if (isa<llvm::PointerType>(OutputTy))
1651 Arg = Builder.CreateZExt(Arg, IntPtrTy);
1652 else {
1653 assert(OutputTy->isFloatingPointTy() && "Unexpected output type");
1654 Arg = Builder.CreateFPExt(Arg, OutputTy);
1655 }
1656 }
1657 }
1658 if (llvm::Type* AdjTy =
1659 getTargetHooks().adjustInlineAsmType(*this, InputConstraint,
1660 Arg->getType()))
1661 Arg = Builder.CreateBitCast(Arg, AdjTy);
1662 else
1663 CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
1664 << InputExpr->getType() << InputConstraint;
1665
1666 ArgTypes.push_back(Arg->getType());
1667 Args.push_back(Arg);
1668 Constraints += InputConstraint;
1669 }
1670
1671 // Append the "input" part of inout constraints last.
1672 for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
1673 ArgTypes.push_back(InOutArgTypes[i]);
1674 Args.push_back(InOutArgs[i]);
1675 }
1676 Constraints += InOutConstraints;
1677
1678 // Clobbers
1679 for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
1680 StringRef Clobber = S.getClobber(i);
1681
1682 if (Clobber != "memory" && Clobber != "cc")
1683 Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
1684
1685 if (i != 0 || NumConstraints != 0)
1686 Constraints += ',';
1687
1688 Constraints += "~{";
1689 Constraints += Clobber;
1690 Constraints += '}';
1691 }
1692
1693 // Add machine specific clobbers
1694 std::string MachineClobbers = getTarget().getClobbers();
1695 if (!MachineClobbers.empty()) {
1696 if (!Constraints.empty())
1697 Constraints += ',';
1698 Constraints += MachineClobbers;
1699 }
1700
1701 llvm::Type *ResultType;
1702 if (ResultRegTypes.empty())
1703 ResultType = VoidTy;
1704 else if (ResultRegTypes.size() == 1)
1705 ResultType = ResultRegTypes[0];
1706 else
1707 ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
1708
1709 llvm::FunctionType *FTy =
1710 llvm::FunctionType::get(ResultType, ArgTypes, false);
1711
1712 bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
1713 llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
1714 llvm::InlineAsm::AD_Intel : llvm::InlineAsm::AD_ATT;
1715 llvm::InlineAsm *IA =
1716 llvm::InlineAsm::get(FTy, AsmString, Constraints, HasSideEffect,
1717 /* IsAlignStack */ false, AsmDialect);
1718 llvm::CallInst *Result = Builder.CreateCall(IA, Args);
1719 Result->addAttribute(llvm::AttributeSet::FunctionIndex,
1720 llvm::Attribute::NoUnwind);
1721
1722 // Slap the source location of the inline asm into a !srcloc metadata on the
1723 // call. FIXME: Handle metadata for MS-style inline asms.
1724 if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
1725 Result->setMetadata("srcloc", getAsmSrcLocInfo(gccAsmStmt->getAsmString(),
1726 *this));
1727
1728 // Extract all of the register value results from the asm.
1729 std::vector<llvm::Value*> RegResults;
1730 if (ResultRegTypes.size() == 1) {
1731 RegResults.push_back(Result);
1732 } else {
1733 for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
1734 llvm::Value *Tmp = Builder.CreateExtractValue(Result, i, "asmresult");
1735 RegResults.push_back(Tmp);
1736 }
1737 }
1738
1739 for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
1740 llvm::Value *Tmp = RegResults[i];
1741
1742 // If the result type of the LLVM IR asm doesn't match the result type of
1743 // the expression, do the conversion.
1744 if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
1745 llvm::Type *TruncTy = ResultTruncRegTypes[i];
1746
1747 // Truncate the integer result to the right size, note that TruncTy can be
1748 // a pointer.
1749 if (TruncTy->isFloatingPointTy())
1750 Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
1751 else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
1752 uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
1753 Tmp = Builder.CreateTrunc(Tmp,
1754 llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize));
1755 Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
1756 } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
1757 uint64_t TmpSize =CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
1758 Tmp = Builder.CreatePtrToInt(Tmp,
1759 llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize));
1760 Tmp = Builder.CreateTrunc(Tmp, TruncTy);
1761 } else if (TruncTy->isIntegerTy()) {
1762 Tmp = Builder.CreateTrunc(Tmp, TruncTy);
1763 } else if (TruncTy->isVectorTy()) {
1764 Tmp = Builder.CreateBitCast(Tmp, TruncTy);
1765 }
1766 }
1767
1768 EmitStoreThroughLValue(RValue::get(Tmp), ResultRegDests[i]);
1769 }
1770 }
1771
InitCapturedStruct(CodeGenFunction & CGF,const CapturedStmt & S)1772 static LValue InitCapturedStruct(CodeGenFunction &CGF, const CapturedStmt &S) {
1773 const RecordDecl *RD = S.getCapturedRecordDecl();
1774 QualType RecordTy = CGF.getContext().getRecordType(RD);
1775
1776 // Initialize the captured struct.
1777 LValue SlotLV = CGF.MakeNaturalAlignAddrLValue(
1778 CGF.CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
1779
1780 RecordDecl::field_iterator CurField = RD->field_begin();
1781 for (CapturedStmt::capture_init_iterator I = S.capture_init_begin(),
1782 E = S.capture_init_end();
1783 I != E; ++I, ++CurField) {
1784 LValue LV = CGF.EmitLValueForFieldInitialization(SlotLV, *CurField);
1785 CGF.EmitInitializerForField(*CurField, LV, *I, ArrayRef<VarDecl *>());
1786 }
1787
1788 return SlotLV;
1789 }
1790
1791 /// Generate an outlined function for the body of a CapturedStmt, store any
1792 /// captured variables into the captured struct, and call the outlined function.
1793 llvm::Function *
EmitCapturedStmt(const CapturedStmt & S,CapturedRegionKind K)1794 CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) {
1795 const CapturedDecl *CD = S.getCapturedDecl();
1796 const RecordDecl *RD = S.getCapturedRecordDecl();
1797 assert(CD->hasBody() && "missing CapturedDecl body");
1798
1799 LValue CapStruct = InitCapturedStruct(*this, S);
1800
1801 // Emit the CapturedDecl
1802 CodeGenFunction CGF(CGM, true);
1803 CGF.CapturedStmtInfo = new CGCapturedStmtInfo(S, K);
1804 llvm::Function *F = CGF.GenerateCapturedStmtFunction(CD, RD);
1805 delete CGF.CapturedStmtInfo;
1806
1807 // Emit call to the helper function.
1808 EmitCallOrInvoke(F, CapStruct.getAddress());
1809
1810 return F;
1811 }
1812
1813 /// Creates the outlined function for a CapturedStmt.
1814 llvm::Function *
GenerateCapturedStmtFunction(const CapturedDecl * CD,const RecordDecl * RD)1815 CodeGenFunction::GenerateCapturedStmtFunction(const CapturedDecl *CD,
1816 const RecordDecl *RD) {
1817 assert(CapturedStmtInfo &&
1818 "CapturedStmtInfo should be set when generating the captured function");
1819
1820 // Check if we should generate debug info for this function.
1821 maybeInitializeDebugInfo();
1822
1823 // Build the argument list.
1824 ASTContext &Ctx = CGM.getContext();
1825 FunctionArgList Args;
1826 Args.append(CD->param_begin(), CD->param_end());
1827
1828 // Create the function declaration.
1829 FunctionType::ExtInfo ExtInfo;
1830 const CGFunctionInfo &FuncInfo =
1831 CGM.getTypes().arrangeFunctionDeclaration(Ctx.VoidTy, Args, ExtInfo,
1832 /*IsVariadic=*/false);
1833 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
1834
1835 llvm::Function *F =
1836 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
1837 CapturedStmtInfo->getHelperName(), &CGM.getModule());
1838 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
1839
1840 // Generate the function.
1841 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getBody()->getLocStart());
1842
1843 // Set the context parameter in CapturedStmtInfo.
1844 llvm::Value *DeclPtr = LocalDeclMap[CD->getContextParam()];
1845 assert(DeclPtr && "missing context parameter for CapturedStmt");
1846 CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr));
1847
1848 // If 'this' is captured, load it into CXXThisValue.
1849 if (CapturedStmtInfo->isCXXThisExprCaptured()) {
1850 FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl();
1851 LValue LV = MakeNaturalAlignAddrLValue(CapturedStmtInfo->getContextValue(),
1852 Ctx.getTagDeclType(RD));
1853 LValue ThisLValue = EmitLValueForField(LV, FD);
1854
1855 CXXThisValue = EmitLoadOfLValue(ThisLValue).getScalarVal();
1856 }
1857
1858 CapturedStmtInfo->EmitBody(*this, CD->getBody());
1859 FinishFunction(CD->getBodyRBrace());
1860
1861 return F;
1862 }
1863