1 //===--- CGClass.cpp - Emit LLVM Code for C++ classes ---------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code dealing with C++ code generation of classes
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "CGBlocks.h"
15 #include "CGDebugInfo.h"
16 #include "CGRecordLayout.h"
17 #include "CodeGenFunction.h"
18 #include "CGCXXABI.h"
19 #include "clang/AST/CXXInheritance.h"
20 #include "clang/AST/EvaluatedExprVisitor.h"
21 #include "clang/AST/RecordLayout.h"
22 #include "clang/AST/StmtCXX.h"
23 #include "clang/Basic/TargetBuiltins.h"
24 #include "clang/Frontend/CodeGenOptions.h"
25
26 using namespace clang;
27 using namespace CodeGen;
28
29 static CharUnits
ComputeNonVirtualBaseClassOffset(ASTContext & Context,const CXXRecordDecl * DerivedClass,CastExpr::path_const_iterator Start,CastExpr::path_const_iterator End)30 ComputeNonVirtualBaseClassOffset(ASTContext &Context,
31 const CXXRecordDecl *DerivedClass,
32 CastExpr::path_const_iterator Start,
33 CastExpr::path_const_iterator End) {
34 CharUnits Offset = CharUnits::Zero();
35
36 const CXXRecordDecl *RD = DerivedClass;
37
38 for (CastExpr::path_const_iterator I = Start; I != End; ++I) {
39 const CXXBaseSpecifier *Base = *I;
40 assert(!Base->isVirtual() && "Should not see virtual bases here!");
41
42 // Get the layout.
43 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
44
45 const CXXRecordDecl *BaseDecl =
46 cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
47
48 // Add the offset.
49 Offset += Layout.getBaseClassOffset(BaseDecl);
50
51 RD = BaseDecl;
52 }
53
54 return Offset;
55 }
56
57 llvm::Constant *
GetNonVirtualBaseClassOffset(const CXXRecordDecl * ClassDecl,CastExpr::path_const_iterator PathBegin,CastExpr::path_const_iterator PathEnd)58 CodeGenModule::GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl,
59 CastExpr::path_const_iterator PathBegin,
60 CastExpr::path_const_iterator PathEnd) {
61 assert(PathBegin != PathEnd && "Base path should not be empty!");
62
63 CharUnits Offset =
64 ComputeNonVirtualBaseClassOffset(getContext(), ClassDecl,
65 PathBegin, PathEnd);
66 if (Offset.isZero())
67 return 0;
68
69 llvm::Type *PtrDiffTy =
70 Types.ConvertType(getContext().getPointerDiffType());
71
72 return llvm::ConstantInt::get(PtrDiffTy, Offset.getQuantity());
73 }
74
75 /// Gets the address of a direct base class within a complete object.
76 /// This should only be used for (1) non-virtual bases or (2) virtual bases
77 /// when the type is known to be complete (e.g. in complete destructors).
78 ///
79 /// The object pointed to by 'This' is assumed to be non-null.
80 llvm::Value *
GetAddressOfDirectBaseInCompleteClass(llvm::Value * This,const CXXRecordDecl * Derived,const CXXRecordDecl * Base,bool BaseIsVirtual)81 CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(llvm::Value *This,
82 const CXXRecordDecl *Derived,
83 const CXXRecordDecl *Base,
84 bool BaseIsVirtual) {
85 // 'this' must be a pointer (in some address space) to Derived.
86 assert(This->getType()->isPointerTy() &&
87 cast<llvm::PointerType>(This->getType())->getElementType()
88 == ConvertType(Derived));
89
90 // Compute the offset of the virtual base.
91 CharUnits Offset;
92 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Derived);
93 if (BaseIsVirtual)
94 Offset = Layout.getVBaseClassOffset(Base);
95 else
96 Offset = Layout.getBaseClassOffset(Base);
97
98 // Shift and cast down to the base type.
99 // TODO: for complete types, this should be possible with a GEP.
100 llvm::Value *V = This;
101 if (Offset.isPositive()) {
102 V = Builder.CreateBitCast(V, Int8PtrTy);
103 V = Builder.CreateConstInBoundsGEP1_64(V, Offset.getQuantity());
104 }
105 V = Builder.CreateBitCast(V, ConvertType(Base)->getPointerTo());
106
107 return V;
108 }
109
110 static llvm::Value *
ApplyNonVirtualAndVirtualOffset(CodeGenFunction & CGF,llvm::Value * ptr,CharUnits nonVirtualOffset,llvm::Value * virtualOffset)111 ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ptr,
112 CharUnits nonVirtualOffset,
113 llvm::Value *virtualOffset) {
114 // Assert that we have something to do.
115 assert(!nonVirtualOffset.isZero() || virtualOffset != 0);
116
117 // Compute the offset from the static and dynamic components.
118 llvm::Value *baseOffset;
119 if (!nonVirtualOffset.isZero()) {
120 baseOffset = llvm::ConstantInt::get(CGF.PtrDiffTy,
121 nonVirtualOffset.getQuantity());
122 if (virtualOffset) {
123 baseOffset = CGF.Builder.CreateAdd(virtualOffset, baseOffset);
124 }
125 } else {
126 baseOffset = virtualOffset;
127 }
128
129 // Apply the base offset.
130 ptr = CGF.Builder.CreateBitCast(ptr, CGF.Int8PtrTy);
131 ptr = CGF.Builder.CreateInBoundsGEP(ptr, baseOffset, "add.ptr");
132 return ptr;
133 }
134
135 llvm::Value *
GetAddressOfBaseClass(llvm::Value * Value,const CXXRecordDecl * Derived,CastExpr::path_const_iterator PathBegin,CastExpr::path_const_iterator PathEnd,bool NullCheckValue)136 CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value,
137 const CXXRecordDecl *Derived,
138 CastExpr::path_const_iterator PathBegin,
139 CastExpr::path_const_iterator PathEnd,
140 bool NullCheckValue) {
141 assert(PathBegin != PathEnd && "Base path should not be empty!");
142
143 CastExpr::path_const_iterator Start = PathBegin;
144 const CXXRecordDecl *VBase = 0;
145
146 // Sema has done some convenient canonicalization here: if the
147 // access path involved any virtual steps, the conversion path will
148 // *start* with a step down to the correct virtual base subobject,
149 // and hence will not require any further steps.
150 if ((*Start)->isVirtual()) {
151 VBase =
152 cast<CXXRecordDecl>((*Start)->getType()->getAs<RecordType>()->getDecl());
153 ++Start;
154 }
155
156 // Compute the static offset of the ultimate destination within its
157 // allocating subobject (the virtual base, if there is one, or else
158 // the "complete" object that we see).
159 CharUnits NonVirtualOffset =
160 ComputeNonVirtualBaseClassOffset(getContext(), VBase ? VBase : Derived,
161 Start, PathEnd);
162
163 // If there's a virtual step, we can sometimes "devirtualize" it.
164 // For now, that's limited to when the derived type is final.
165 // TODO: "devirtualize" this for accesses to known-complete objects.
166 if (VBase && Derived->hasAttr<FinalAttr>()) {
167 const ASTRecordLayout &layout = getContext().getASTRecordLayout(Derived);
168 CharUnits vBaseOffset = layout.getVBaseClassOffset(VBase);
169 NonVirtualOffset += vBaseOffset;
170 VBase = 0; // we no longer have a virtual step
171 }
172
173 // Get the base pointer type.
174 llvm::Type *BasePtrTy =
175 ConvertType((PathEnd[-1])->getType())->getPointerTo();
176
177 // If the static offset is zero and we don't have a virtual step,
178 // just do a bitcast; null checks are unnecessary.
179 if (NonVirtualOffset.isZero() && !VBase) {
180 return Builder.CreateBitCast(Value, BasePtrTy);
181 }
182
183 llvm::BasicBlock *origBB = 0;
184 llvm::BasicBlock *endBB = 0;
185
186 // Skip over the offset (and the vtable load) if we're supposed to
187 // null-check the pointer.
188 if (NullCheckValue) {
189 origBB = Builder.GetInsertBlock();
190 llvm::BasicBlock *notNullBB = createBasicBlock("cast.notnull");
191 endBB = createBasicBlock("cast.end");
192
193 llvm::Value *isNull = Builder.CreateIsNull(Value);
194 Builder.CreateCondBr(isNull, endBB, notNullBB);
195 EmitBlock(notNullBB);
196 }
197
198 // Compute the virtual offset.
199 llvm::Value *VirtualOffset = 0;
200 if (VBase) {
201 VirtualOffset = GetVirtualBaseClassOffset(Value, Derived, VBase);
202 }
203
204 // Apply both offsets.
205 Value = ApplyNonVirtualAndVirtualOffset(*this, Value,
206 NonVirtualOffset,
207 VirtualOffset);
208
209 // Cast to the destination type.
210 Value = Builder.CreateBitCast(Value, BasePtrTy);
211
212 // Build a phi if we needed a null check.
213 if (NullCheckValue) {
214 llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
215 Builder.CreateBr(endBB);
216 EmitBlock(endBB);
217
218 llvm::PHINode *PHI = Builder.CreatePHI(BasePtrTy, 2, "cast.result");
219 PHI->addIncoming(Value, notNullBB);
220 PHI->addIncoming(llvm::Constant::getNullValue(BasePtrTy), origBB);
221 Value = PHI;
222 }
223
224 return Value;
225 }
226
227 llvm::Value *
GetAddressOfDerivedClass(llvm::Value * Value,const CXXRecordDecl * Derived,CastExpr::path_const_iterator PathBegin,CastExpr::path_const_iterator PathEnd,bool NullCheckValue)228 CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value,
229 const CXXRecordDecl *Derived,
230 CastExpr::path_const_iterator PathBegin,
231 CastExpr::path_const_iterator PathEnd,
232 bool NullCheckValue) {
233 assert(PathBegin != PathEnd && "Base path should not be empty!");
234
235 QualType DerivedTy =
236 getContext().getCanonicalType(getContext().getTagDeclType(Derived));
237 llvm::Type *DerivedPtrTy = ConvertType(DerivedTy)->getPointerTo();
238
239 llvm::Value *NonVirtualOffset =
240 CGM.GetNonVirtualBaseClassOffset(Derived, PathBegin, PathEnd);
241
242 if (!NonVirtualOffset) {
243 // No offset, we can just cast back.
244 return Builder.CreateBitCast(Value, DerivedPtrTy);
245 }
246
247 llvm::BasicBlock *CastNull = 0;
248 llvm::BasicBlock *CastNotNull = 0;
249 llvm::BasicBlock *CastEnd = 0;
250
251 if (NullCheckValue) {
252 CastNull = createBasicBlock("cast.null");
253 CastNotNull = createBasicBlock("cast.notnull");
254 CastEnd = createBasicBlock("cast.end");
255
256 llvm::Value *IsNull = Builder.CreateIsNull(Value);
257 Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
258 EmitBlock(CastNotNull);
259 }
260
261 // Apply the offset.
262 Value = Builder.CreateBitCast(Value, Int8PtrTy);
263 Value = Builder.CreateGEP(Value, Builder.CreateNeg(NonVirtualOffset),
264 "sub.ptr");
265
266 // Just cast.
267 Value = Builder.CreateBitCast(Value, DerivedPtrTy);
268
269 if (NullCheckValue) {
270 Builder.CreateBr(CastEnd);
271 EmitBlock(CastNull);
272 Builder.CreateBr(CastEnd);
273 EmitBlock(CastEnd);
274
275 llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
276 PHI->addIncoming(Value, CastNotNull);
277 PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()),
278 CastNull);
279 Value = PHI;
280 }
281
282 return Value;
283 }
284
GetVTTParameter(GlobalDecl GD,bool ForVirtualBase,bool Delegating)285 llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD,
286 bool ForVirtualBase,
287 bool Delegating) {
288 if (!CodeGenVTables::needsVTTParameter(GD)) {
289 // This constructor/destructor does not need a VTT parameter.
290 return 0;
291 }
292
293 const CXXRecordDecl *RD = cast<CXXMethodDecl>(CurFuncDecl)->getParent();
294 const CXXRecordDecl *Base = cast<CXXMethodDecl>(GD.getDecl())->getParent();
295
296 llvm::Value *VTT;
297
298 uint64_t SubVTTIndex;
299
300 if (Delegating) {
301 // If this is a delegating constructor call, just load the VTT.
302 return LoadCXXVTT();
303 } else if (RD == Base) {
304 // If the record matches the base, this is the complete ctor/dtor
305 // variant calling the base variant in a class with virtual bases.
306 assert(!CodeGenVTables::needsVTTParameter(CurGD) &&
307 "doing no-op VTT offset in base dtor/ctor?");
308 assert(!ForVirtualBase && "Can't have same class as virtual base!");
309 SubVTTIndex = 0;
310 } else {
311 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
312 CharUnits BaseOffset = ForVirtualBase ?
313 Layout.getVBaseClassOffset(Base) :
314 Layout.getBaseClassOffset(Base);
315
316 SubVTTIndex =
317 CGM.getVTables().getSubVTTIndex(RD, BaseSubobject(Base, BaseOffset));
318 assert(SubVTTIndex != 0 && "Sub-VTT index must be greater than zero!");
319 }
320
321 if (CodeGenVTables::needsVTTParameter(CurGD)) {
322 // A VTT parameter was passed to the constructor, use it.
323 VTT = LoadCXXVTT();
324 VTT = Builder.CreateConstInBoundsGEP1_64(VTT, SubVTTIndex);
325 } else {
326 // We're the complete constructor, so get the VTT by name.
327 VTT = CGM.getVTables().GetAddrOfVTT(RD);
328 VTT = Builder.CreateConstInBoundsGEP2_64(VTT, 0, SubVTTIndex);
329 }
330
331 return VTT;
332 }
333
334 namespace {
335 /// Call the destructor for a direct base class.
336 struct CallBaseDtor : EHScopeStack::Cleanup {
337 const CXXRecordDecl *BaseClass;
338 bool BaseIsVirtual;
CallBaseDtor__anon061ae5c60111::CallBaseDtor339 CallBaseDtor(const CXXRecordDecl *Base, bool BaseIsVirtual)
340 : BaseClass(Base), BaseIsVirtual(BaseIsVirtual) {}
341
Emit__anon061ae5c60111::CallBaseDtor342 void Emit(CodeGenFunction &CGF, Flags flags) {
343 const CXXRecordDecl *DerivedClass =
344 cast<CXXMethodDecl>(CGF.CurCodeDecl)->getParent();
345
346 const CXXDestructorDecl *D = BaseClass->getDestructor();
347 llvm::Value *Addr =
348 CGF.GetAddressOfDirectBaseInCompleteClass(CGF.LoadCXXThis(),
349 DerivedClass, BaseClass,
350 BaseIsVirtual);
351 CGF.EmitCXXDestructorCall(D, Dtor_Base, BaseIsVirtual,
352 /*Delegating=*/false, Addr);
353 }
354 };
355
356 /// A visitor which checks whether an initializer uses 'this' in a
357 /// way which requires the vtable to be properly set.
358 struct DynamicThisUseChecker : EvaluatedExprVisitor<DynamicThisUseChecker> {
359 typedef EvaluatedExprVisitor<DynamicThisUseChecker> super;
360
361 bool UsesThis;
362
DynamicThisUseChecker__anon061ae5c60111::DynamicThisUseChecker363 DynamicThisUseChecker(ASTContext &C) : super(C), UsesThis(false) {}
364
365 // Black-list all explicit and implicit references to 'this'.
366 //
367 // Do we need to worry about external references to 'this' derived
368 // from arbitrary code? If so, then anything which runs arbitrary
369 // external code might potentially access the vtable.
VisitCXXThisExpr__anon061ae5c60111::DynamicThisUseChecker370 void VisitCXXThisExpr(CXXThisExpr *E) { UsesThis = true; }
371 };
372 }
373
BaseInitializerUsesThis(ASTContext & C,const Expr * Init)374 static bool BaseInitializerUsesThis(ASTContext &C, const Expr *Init) {
375 DynamicThisUseChecker Checker(C);
376 Checker.Visit(const_cast<Expr*>(Init));
377 return Checker.UsesThis;
378 }
379
EmitBaseInitializer(CodeGenFunction & CGF,const CXXRecordDecl * ClassDecl,CXXCtorInitializer * BaseInit,CXXCtorType CtorType)380 static void EmitBaseInitializer(CodeGenFunction &CGF,
381 const CXXRecordDecl *ClassDecl,
382 CXXCtorInitializer *BaseInit,
383 CXXCtorType CtorType) {
384 assert(BaseInit->isBaseInitializer() &&
385 "Must have base initializer!");
386
387 llvm::Value *ThisPtr = CGF.LoadCXXThis();
388
389 const Type *BaseType = BaseInit->getBaseClass();
390 CXXRecordDecl *BaseClassDecl =
391 cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl());
392
393 bool isBaseVirtual = BaseInit->isBaseVirtual();
394
395 // The base constructor doesn't construct virtual bases.
396 if (CtorType == Ctor_Base && isBaseVirtual)
397 return;
398
399 // If the initializer for the base (other than the constructor
400 // itself) accesses 'this' in any way, we need to initialize the
401 // vtables.
402 if (BaseInitializerUsesThis(CGF.getContext(), BaseInit->getInit()))
403 CGF.InitializeVTablePointers(ClassDecl);
404
405 // We can pretend to be a complete class because it only matters for
406 // virtual bases, and we only do virtual bases for complete ctors.
407 llvm::Value *V =
408 CGF.GetAddressOfDirectBaseInCompleteClass(ThisPtr, ClassDecl,
409 BaseClassDecl,
410 isBaseVirtual);
411 CharUnits Alignment = CGF.getContext().getTypeAlignInChars(BaseType);
412 AggValueSlot AggSlot =
413 AggValueSlot::forAddr(V, Alignment, Qualifiers(),
414 AggValueSlot::IsDestructed,
415 AggValueSlot::DoesNotNeedGCBarriers,
416 AggValueSlot::IsNotAliased);
417
418 CGF.EmitAggExpr(BaseInit->getInit(), AggSlot);
419
420 if (CGF.CGM.getLangOpts().Exceptions &&
421 !BaseClassDecl->hasTrivialDestructor())
422 CGF.EHStack.pushCleanup<CallBaseDtor>(EHCleanup, BaseClassDecl,
423 isBaseVirtual);
424 }
425
EmitAggMemberInitializer(CodeGenFunction & CGF,LValue LHS,Expr * Init,llvm::Value * ArrayIndexVar,QualType T,ArrayRef<VarDecl * > ArrayIndexes,unsigned Index)426 static void EmitAggMemberInitializer(CodeGenFunction &CGF,
427 LValue LHS,
428 Expr *Init,
429 llvm::Value *ArrayIndexVar,
430 QualType T,
431 ArrayRef<VarDecl *> ArrayIndexes,
432 unsigned Index) {
433 if (Index == ArrayIndexes.size()) {
434 LValue LV = LHS;
435 { // Scope for Cleanups.
436 CodeGenFunction::RunCleanupsScope Cleanups(CGF);
437
438 if (ArrayIndexVar) {
439 // If we have an array index variable, load it and use it as an offset.
440 // Then, increment the value.
441 llvm::Value *Dest = LHS.getAddress();
442 llvm::Value *ArrayIndex = CGF.Builder.CreateLoad(ArrayIndexVar);
443 Dest = CGF.Builder.CreateInBoundsGEP(Dest, ArrayIndex, "destaddress");
444 llvm::Value *Next = llvm::ConstantInt::get(ArrayIndex->getType(), 1);
445 Next = CGF.Builder.CreateAdd(ArrayIndex, Next, "inc");
446 CGF.Builder.CreateStore(Next, ArrayIndexVar);
447
448 // Update the LValue.
449 LV.setAddress(Dest);
450 CharUnits Align = CGF.getContext().getTypeAlignInChars(T);
451 LV.setAlignment(std::min(Align, LV.getAlignment()));
452 }
453
454 switch (CGF.getEvaluationKind(T)) {
455 case TEK_Scalar:
456 CGF.EmitScalarInit(Init, /*decl*/ 0, LV, false);
457 break;
458 case TEK_Complex:
459 CGF.EmitComplexExprIntoLValue(Init, LV, /*isInit*/ true);
460 break;
461 case TEK_Aggregate: {
462 AggValueSlot Slot =
463 AggValueSlot::forLValue(LV,
464 AggValueSlot::IsDestructed,
465 AggValueSlot::DoesNotNeedGCBarriers,
466 AggValueSlot::IsNotAliased);
467
468 CGF.EmitAggExpr(Init, Slot);
469 break;
470 }
471 }
472 }
473
474 // Now, outside of the initializer cleanup scope, destroy the backing array
475 // for a std::initializer_list member.
476 CGF.MaybeEmitStdInitializerListCleanup(LV.getAddress(), Init);
477
478 return;
479 }
480
481 const ConstantArrayType *Array = CGF.getContext().getAsConstantArrayType(T);
482 assert(Array && "Array initialization without the array type?");
483 llvm::Value *IndexVar
484 = CGF.GetAddrOfLocalVar(ArrayIndexes[Index]);
485 assert(IndexVar && "Array index variable not loaded");
486
487 // Initialize this index variable to zero.
488 llvm::Value* Zero
489 = llvm::Constant::getNullValue(
490 CGF.ConvertType(CGF.getContext().getSizeType()));
491 CGF.Builder.CreateStore(Zero, IndexVar);
492
493 // Start the loop with a block that tests the condition.
494 llvm::BasicBlock *CondBlock = CGF.createBasicBlock("for.cond");
495 llvm::BasicBlock *AfterFor = CGF.createBasicBlock("for.end");
496
497 CGF.EmitBlock(CondBlock);
498
499 llvm::BasicBlock *ForBody = CGF.createBasicBlock("for.body");
500 // Generate: if (loop-index < number-of-elements) fall to the loop body,
501 // otherwise, go to the block after the for-loop.
502 uint64_t NumElements = Array->getSize().getZExtValue();
503 llvm::Value *Counter = CGF.Builder.CreateLoad(IndexVar);
504 llvm::Value *NumElementsPtr =
505 llvm::ConstantInt::get(Counter->getType(), NumElements);
506 llvm::Value *IsLess = CGF.Builder.CreateICmpULT(Counter, NumElementsPtr,
507 "isless");
508
509 // If the condition is true, execute the body.
510 CGF.Builder.CreateCondBr(IsLess, ForBody, AfterFor);
511
512 CGF.EmitBlock(ForBody);
513 llvm::BasicBlock *ContinueBlock = CGF.createBasicBlock("for.inc");
514
515 {
516 CodeGenFunction::RunCleanupsScope Cleanups(CGF);
517
518 // Inside the loop body recurse to emit the inner loop or, eventually, the
519 // constructor call.
520 EmitAggMemberInitializer(CGF, LHS, Init, ArrayIndexVar,
521 Array->getElementType(), ArrayIndexes, Index + 1);
522 }
523
524 CGF.EmitBlock(ContinueBlock);
525
526 // Emit the increment of the loop counter.
527 llvm::Value *NextVal = llvm::ConstantInt::get(Counter->getType(), 1);
528 Counter = CGF.Builder.CreateLoad(IndexVar);
529 NextVal = CGF.Builder.CreateAdd(Counter, NextVal, "inc");
530 CGF.Builder.CreateStore(NextVal, IndexVar);
531
532 // Finally, branch back up to the condition for the next iteration.
533 CGF.EmitBranch(CondBlock);
534
535 // Emit the fall-through block.
536 CGF.EmitBlock(AfterFor, true);
537 }
538
EmitMemberInitializer(CodeGenFunction & CGF,const CXXRecordDecl * ClassDecl,CXXCtorInitializer * MemberInit,const CXXConstructorDecl * Constructor,FunctionArgList & Args)539 static void EmitMemberInitializer(CodeGenFunction &CGF,
540 const CXXRecordDecl *ClassDecl,
541 CXXCtorInitializer *MemberInit,
542 const CXXConstructorDecl *Constructor,
543 FunctionArgList &Args) {
544 assert(MemberInit->isAnyMemberInitializer() &&
545 "Must have member initializer!");
546 assert(MemberInit->getInit() && "Must have initializer!");
547
548 // non-static data member initializers.
549 FieldDecl *Field = MemberInit->getAnyMember();
550 QualType FieldType = Field->getType();
551
552 llvm::Value *ThisPtr = CGF.LoadCXXThis();
553 QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
554 LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy);
555
556 if (MemberInit->isIndirectMemberInitializer()) {
557 // If we are initializing an anonymous union field, drill down to
558 // the field.
559 IndirectFieldDecl *IndirectField = MemberInit->getIndirectMember();
560 IndirectFieldDecl::chain_iterator I = IndirectField->chain_begin(),
561 IEnd = IndirectField->chain_end();
562 for ( ; I != IEnd; ++I)
563 LHS = CGF.EmitLValueForFieldInitialization(LHS, cast<FieldDecl>(*I));
564 FieldType = MemberInit->getIndirectMember()->getAnonField()->getType();
565 } else {
566 LHS = CGF.EmitLValueForFieldInitialization(LHS, Field);
567 }
568
569 // Special case: if we are in a copy or move constructor, and we are copying
570 // an array of PODs or classes with trivial copy constructors, ignore the
571 // AST and perform the copy we know is equivalent.
572 // FIXME: This is hacky at best... if we had a bit more explicit information
573 // in the AST, we could generalize it more easily.
574 const ConstantArrayType *Array
575 = CGF.getContext().getAsConstantArrayType(FieldType);
576 if (Array && Constructor->isImplicitlyDefined() &&
577 Constructor->isCopyOrMoveConstructor()) {
578 QualType BaseElementTy = CGF.getContext().getBaseElementType(Array);
579 CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit());
580 if (BaseElementTy.isPODType(CGF.getContext()) ||
581 (CE && CE->getConstructor()->isTrivial())) {
582 // Find the source pointer. We know it's the last argument because
583 // we know we're in an implicit copy constructor.
584 unsigned SrcArgIndex = Args.size() - 1;
585 llvm::Value *SrcPtr
586 = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(Args[SrcArgIndex]));
587 LValue ThisRHSLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy);
588 LValue Src = CGF.EmitLValueForFieldInitialization(ThisRHSLV, Field);
589
590 // Copy the aggregate.
591 CGF.EmitAggregateCopy(LHS.getAddress(), Src.getAddress(), FieldType,
592 LHS.isVolatileQualified());
593 return;
594 }
595 }
596
597 ArrayRef<VarDecl *> ArrayIndexes;
598 if (MemberInit->getNumArrayIndices())
599 ArrayIndexes = MemberInit->getArrayIndexes();
600 CGF.EmitInitializerForField(Field, LHS, MemberInit->getInit(), ArrayIndexes);
601 }
602
EmitInitializerForField(FieldDecl * Field,LValue LHS,Expr * Init,ArrayRef<VarDecl * > ArrayIndexes)603 void CodeGenFunction::EmitInitializerForField(FieldDecl *Field,
604 LValue LHS, Expr *Init,
605 ArrayRef<VarDecl *> ArrayIndexes) {
606 QualType FieldType = Field->getType();
607 switch (getEvaluationKind(FieldType)) {
608 case TEK_Scalar:
609 if (LHS.isSimple()) {
610 EmitExprAsInit(Init, Field, LHS, false);
611 } else {
612 RValue RHS = RValue::get(EmitScalarExpr(Init));
613 EmitStoreThroughLValue(RHS, LHS);
614 }
615 break;
616 case TEK_Complex:
617 EmitComplexExprIntoLValue(Init, LHS, /*isInit*/ true);
618 break;
619 case TEK_Aggregate: {
620 llvm::Value *ArrayIndexVar = 0;
621 if (ArrayIndexes.size()) {
622 llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
623
624 // The LHS is a pointer to the first object we'll be constructing, as
625 // a flat array.
626 QualType BaseElementTy = getContext().getBaseElementType(FieldType);
627 llvm::Type *BasePtr = ConvertType(BaseElementTy);
628 BasePtr = llvm::PointerType::getUnqual(BasePtr);
629 llvm::Value *BaseAddrPtr = Builder.CreateBitCast(LHS.getAddress(),
630 BasePtr);
631 LHS = MakeAddrLValue(BaseAddrPtr, BaseElementTy);
632
633 // Create an array index that will be used to walk over all of the
634 // objects we're constructing.
635 ArrayIndexVar = CreateTempAlloca(SizeTy, "object.index");
636 llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
637 Builder.CreateStore(Zero, ArrayIndexVar);
638
639
640 // Emit the block variables for the array indices, if any.
641 for (unsigned I = 0, N = ArrayIndexes.size(); I != N; ++I)
642 EmitAutoVarDecl(*ArrayIndexes[I]);
643 }
644
645 EmitAggMemberInitializer(*this, LHS, Init, ArrayIndexVar, FieldType,
646 ArrayIndexes, 0);
647 }
648 }
649
650 // Ensure that we destroy this object if an exception is thrown
651 // later in the constructor.
652 QualType::DestructionKind dtorKind = FieldType.isDestructedType();
653 if (needsEHCleanup(dtorKind))
654 pushEHDestroy(dtorKind, LHS.getAddress(), FieldType);
655 }
656
657 /// Checks whether the given constructor is a valid subject for the
658 /// complete-to-base constructor delegation optimization, i.e.
659 /// emitting the complete constructor as a simple call to the base
660 /// constructor.
IsConstructorDelegationValid(const CXXConstructorDecl * Ctor)661 static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor) {
662
663 // Currently we disable the optimization for classes with virtual
664 // bases because (1) the addresses of parameter variables need to be
665 // consistent across all initializers but (2) the delegate function
666 // call necessarily creates a second copy of the parameter variable.
667 //
668 // The limiting example (purely theoretical AFAIK):
669 // struct A { A(int &c) { c++; } };
670 // struct B : virtual A {
671 // B(int count) : A(count) { printf("%d\n", count); }
672 // };
673 // ...although even this example could in principle be emitted as a
674 // delegation since the address of the parameter doesn't escape.
675 if (Ctor->getParent()->getNumVBases()) {
676 // TODO: white-list trivial vbase initializers. This case wouldn't
677 // be subject to the restrictions below.
678
679 // TODO: white-list cases where:
680 // - there are no non-reference parameters to the constructor
681 // - the initializers don't access any non-reference parameters
682 // - the initializers don't take the address of non-reference
683 // parameters
684 // - etc.
685 // If we ever add any of the above cases, remember that:
686 // - function-try-blocks will always blacklist this optimization
687 // - we need to perform the constructor prologue and cleanup in
688 // EmitConstructorBody.
689
690 return false;
691 }
692
693 // We also disable the optimization for variadic functions because
694 // it's impossible to "re-pass" varargs.
695 if (Ctor->getType()->getAs<FunctionProtoType>()->isVariadic())
696 return false;
697
698 // FIXME: Decide if we can do a delegation of a delegating constructor.
699 if (Ctor->isDelegatingConstructor())
700 return false;
701
702 return true;
703 }
704
705 /// EmitConstructorBody - Emits the body of the current constructor.
EmitConstructorBody(FunctionArgList & Args)706 void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
707 const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(CurGD.getDecl());
708 CXXCtorType CtorType = CurGD.getCtorType();
709
710 // Before we go any further, try the complete->base constructor
711 // delegation optimization.
712 if (CtorType == Ctor_Complete && IsConstructorDelegationValid(Ctor) &&
713 CGM.getContext().getTargetInfo().getCXXABI().hasConstructorVariants()) {
714 if (CGDebugInfo *DI = getDebugInfo())
715 DI->EmitLocation(Builder, Ctor->getLocEnd());
716 EmitDelegateCXXConstructorCall(Ctor, Ctor_Base, Args);
717 return;
718 }
719
720 Stmt *Body = Ctor->getBody();
721
722 // Enter the function-try-block before the constructor prologue if
723 // applicable.
724 bool IsTryBody = (Body && isa<CXXTryStmt>(Body));
725 if (IsTryBody)
726 EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true);
727
728 EHScopeStack::stable_iterator CleanupDepth = EHStack.stable_begin();
729
730 // TODO: in restricted cases, we can emit the vbase initializers of
731 // a complete ctor and then delegate to the base ctor.
732
733 // Emit the constructor prologue, i.e. the base and member
734 // initializers.
735 EmitCtorPrologue(Ctor, CtorType, Args);
736
737 // Emit the body of the statement.
738 if (IsTryBody)
739 EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock());
740 else if (Body)
741 EmitStmt(Body);
742
743 // Emit any cleanup blocks associated with the member or base
744 // initializers, which includes (along the exceptional path) the
745 // destructors for those members and bases that were fully
746 // constructed.
747 PopCleanupBlocks(CleanupDepth);
748
749 if (IsTryBody)
750 ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true);
751 }
752
753 namespace {
754 class FieldMemcpyizer {
755 public:
FieldMemcpyizer(CodeGenFunction & CGF,const CXXRecordDecl * ClassDecl,const VarDecl * SrcRec)756 FieldMemcpyizer(CodeGenFunction &CGF, const CXXRecordDecl *ClassDecl,
757 const VarDecl *SrcRec)
758 : CGF(CGF), ClassDecl(ClassDecl), SrcRec(SrcRec),
759 RecLayout(CGF.getContext().getASTRecordLayout(ClassDecl)),
760 FirstField(0), LastField(0), FirstFieldOffset(0), LastFieldOffset(0),
761 LastAddedFieldIndex(0) { }
762
isMemcpyableField(FieldDecl * F)763 static bool isMemcpyableField(FieldDecl *F) {
764 Qualifiers Qual = F->getType().getQualifiers();
765 if (Qual.hasVolatile() || Qual.hasObjCLifetime())
766 return false;
767 return true;
768 }
769
addMemcpyableField(FieldDecl * F)770 void addMemcpyableField(FieldDecl *F) {
771 if (FirstField == 0)
772 addInitialField(F);
773 else
774 addNextField(F);
775 }
776
getMemcpySize() const777 CharUnits getMemcpySize() const {
778 unsigned LastFieldSize =
779 LastField->isBitField() ?
780 LastField->getBitWidthValue(CGF.getContext()) :
781 CGF.getContext().getTypeSize(LastField->getType());
782 uint64_t MemcpySizeBits =
783 LastFieldOffset + LastFieldSize - FirstFieldOffset +
784 CGF.getContext().getCharWidth() - 1;
785 CharUnits MemcpySize =
786 CGF.getContext().toCharUnitsFromBits(MemcpySizeBits);
787 return MemcpySize;
788 }
789
emitMemcpy()790 void emitMemcpy() {
791 // Give the subclass a chance to bail out if it feels the memcpy isn't
792 // worth it (e.g. Hasn't aggregated enough data).
793 if (FirstField == 0) {
794 return;
795 }
796
797 CharUnits Alignment;
798
799 if (FirstField->isBitField()) {
800 const CGRecordLayout &RL =
801 CGF.getTypes().getCGRecordLayout(FirstField->getParent());
802 const CGBitFieldInfo &BFInfo = RL.getBitFieldInfo(FirstField);
803 Alignment = CharUnits::fromQuantity(BFInfo.StorageAlignment);
804 } else {
805 Alignment = CGF.getContext().getDeclAlign(FirstField);
806 }
807
808 assert((CGF.getContext().toCharUnitsFromBits(FirstFieldOffset) %
809 Alignment) == 0 && "Bad field alignment.");
810
811 CharUnits MemcpySize = getMemcpySize();
812 QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
813 llvm::Value *ThisPtr = CGF.LoadCXXThis();
814 LValue DestLV = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy);
815 LValue Dest = CGF.EmitLValueForFieldInitialization(DestLV, FirstField);
816 llvm::Value *SrcPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(SrcRec));
817 LValue SrcLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy);
818 LValue Src = CGF.EmitLValueForFieldInitialization(SrcLV, FirstField);
819
820 emitMemcpyIR(Dest.isBitField() ? Dest.getBitFieldAddr() : Dest.getAddress(),
821 Src.isBitField() ? Src.getBitFieldAddr() : Src.getAddress(),
822 MemcpySize, Alignment);
823 reset();
824 }
825
reset()826 void reset() {
827 FirstField = 0;
828 }
829
830 protected:
831 CodeGenFunction &CGF;
832 const CXXRecordDecl *ClassDecl;
833
834 private:
835
emitMemcpyIR(llvm::Value * DestPtr,llvm::Value * SrcPtr,CharUnits Size,CharUnits Alignment)836 void emitMemcpyIR(llvm::Value *DestPtr, llvm::Value *SrcPtr,
837 CharUnits Size, CharUnits Alignment) {
838 llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType());
839 llvm::Type *DBP =
840 llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), DPT->getAddressSpace());
841 DestPtr = CGF.Builder.CreateBitCast(DestPtr, DBP);
842
843 llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
844 llvm::Type *SBP =
845 llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), SPT->getAddressSpace());
846 SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, SBP);
847
848 CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, Size.getQuantity(),
849 Alignment.getQuantity());
850 }
851
addInitialField(FieldDecl * F)852 void addInitialField(FieldDecl *F) {
853 FirstField = F;
854 LastField = F;
855 FirstFieldOffset = RecLayout.getFieldOffset(F->getFieldIndex());
856 LastFieldOffset = FirstFieldOffset;
857 LastAddedFieldIndex = F->getFieldIndex();
858 return;
859 }
860
addNextField(FieldDecl * F)861 void addNextField(FieldDecl *F) {
862 assert(F->getFieldIndex() == LastAddedFieldIndex + 1 &&
863 "Cannot aggregate non-contiguous fields.");
864 LastAddedFieldIndex = F->getFieldIndex();
865
866 // The 'first' and 'last' fields are chosen by offset, rather than field
867 // index. This allows the code to support bitfields, as well as regular
868 // fields.
869 uint64_t FOffset = RecLayout.getFieldOffset(F->getFieldIndex());
870 if (FOffset < FirstFieldOffset) {
871 FirstField = F;
872 FirstFieldOffset = FOffset;
873 } else if (FOffset > LastFieldOffset) {
874 LastField = F;
875 LastFieldOffset = FOffset;
876 }
877 }
878
879 const VarDecl *SrcRec;
880 const ASTRecordLayout &RecLayout;
881 FieldDecl *FirstField;
882 FieldDecl *LastField;
883 uint64_t FirstFieldOffset, LastFieldOffset;
884 unsigned LastAddedFieldIndex;
885 };
886
887 class ConstructorMemcpyizer : public FieldMemcpyizer {
888 private:
889
890 /// Get source argument for copy constructor. Returns null if not a copy
891 /// constructor.
getTrivialCopySource(const CXXConstructorDecl * CD,FunctionArgList & Args)892 static const VarDecl* getTrivialCopySource(const CXXConstructorDecl *CD,
893 FunctionArgList &Args) {
894 if (CD->isCopyOrMoveConstructor() && CD->isImplicitlyDefined())
895 return Args[Args.size() - 1];
896 return 0;
897 }
898
899 // Returns true if a CXXCtorInitializer represents a member initialization
900 // that can be rolled into a memcpy.
isMemberInitMemcpyable(CXXCtorInitializer * MemberInit) const901 bool isMemberInitMemcpyable(CXXCtorInitializer *MemberInit) const {
902 if (!MemcpyableCtor)
903 return false;
904 FieldDecl *Field = MemberInit->getMember();
905 assert(Field != 0 && "No field for member init.");
906 QualType FieldType = Field->getType();
907 CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit());
908
909 // Bail out on non-POD, not-trivially-constructable members.
910 if (!(CE && CE->getConstructor()->isTrivial()) &&
911 !(FieldType.isTriviallyCopyableType(CGF.getContext()) ||
912 FieldType->isReferenceType()))
913 return false;
914
915 // Bail out on volatile fields.
916 if (!isMemcpyableField(Field))
917 return false;
918
919 // Otherwise we're good.
920 return true;
921 }
922
923 public:
ConstructorMemcpyizer(CodeGenFunction & CGF,const CXXConstructorDecl * CD,FunctionArgList & Args)924 ConstructorMemcpyizer(CodeGenFunction &CGF, const CXXConstructorDecl *CD,
925 FunctionArgList &Args)
926 : FieldMemcpyizer(CGF, CD->getParent(), getTrivialCopySource(CD, Args)),
927 ConstructorDecl(CD),
928 MemcpyableCtor(CD->isImplicitlyDefined() &&
929 CD->isCopyOrMoveConstructor() &&
930 CGF.getLangOpts().getGC() == LangOptions::NonGC),
931 Args(Args) { }
932
addMemberInitializer(CXXCtorInitializer * MemberInit)933 void addMemberInitializer(CXXCtorInitializer *MemberInit) {
934 if (isMemberInitMemcpyable(MemberInit)) {
935 AggregatedInits.push_back(MemberInit);
936 addMemcpyableField(MemberInit->getMember());
937 } else {
938 emitAggregatedInits();
939 EmitMemberInitializer(CGF, ConstructorDecl->getParent(), MemberInit,
940 ConstructorDecl, Args);
941 }
942 }
943
emitAggregatedInits()944 void emitAggregatedInits() {
945 if (AggregatedInits.size() <= 1) {
946 // This memcpy is too small to be worthwhile. Fall back on default
947 // codegen.
948 for (unsigned i = 0; i < AggregatedInits.size(); ++i) {
949 EmitMemberInitializer(CGF, ConstructorDecl->getParent(),
950 AggregatedInits[i], ConstructorDecl, Args);
951 }
952 reset();
953 return;
954 }
955
956 pushEHDestructors();
957 emitMemcpy();
958 AggregatedInits.clear();
959 }
960
pushEHDestructors()961 void pushEHDestructors() {
962 llvm::Value *ThisPtr = CGF.LoadCXXThis();
963 QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
964 LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy);
965
966 for (unsigned i = 0; i < AggregatedInits.size(); ++i) {
967 QualType FieldType = AggregatedInits[i]->getMember()->getType();
968 QualType::DestructionKind dtorKind = FieldType.isDestructedType();
969 if (CGF.needsEHCleanup(dtorKind))
970 CGF.pushEHDestroy(dtorKind, LHS.getAddress(), FieldType);
971 }
972 }
973
finish()974 void finish() {
975 emitAggregatedInits();
976 }
977
978 private:
979 const CXXConstructorDecl *ConstructorDecl;
980 bool MemcpyableCtor;
981 FunctionArgList &Args;
982 SmallVector<CXXCtorInitializer*, 16> AggregatedInits;
983 };
984
985 class AssignmentMemcpyizer : public FieldMemcpyizer {
986 private:
987
988 // Returns the memcpyable field copied by the given statement, if one
989 // exists. Otherwise r
getMemcpyableField(Stmt * S)990 FieldDecl* getMemcpyableField(Stmt *S) {
991 if (!AssignmentsMemcpyable)
992 return 0;
993 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(S)) {
994 // Recognise trivial assignments.
995 if (BO->getOpcode() != BO_Assign)
996 return 0;
997 MemberExpr *ME = dyn_cast<MemberExpr>(BO->getLHS());
998 if (!ME)
999 return 0;
1000 FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl());
1001 if (!Field || !isMemcpyableField(Field))
1002 return 0;
1003 Stmt *RHS = BO->getRHS();
1004 if (ImplicitCastExpr *EC = dyn_cast<ImplicitCastExpr>(RHS))
1005 RHS = EC->getSubExpr();
1006 if (!RHS)
1007 return 0;
1008 MemberExpr *ME2 = dyn_cast<MemberExpr>(RHS);
1009 if (dyn_cast<FieldDecl>(ME2->getMemberDecl()) != Field)
1010 return 0;
1011 return Field;
1012 } else if (CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(S)) {
1013 CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MCE->getCalleeDecl());
1014 if (!(MD && (MD->isCopyAssignmentOperator() ||
1015 MD->isMoveAssignmentOperator()) &&
1016 MD->isTrivial()))
1017 return 0;
1018 MemberExpr *IOA = dyn_cast<MemberExpr>(MCE->getImplicitObjectArgument());
1019 if (!IOA)
1020 return 0;
1021 FieldDecl *Field = dyn_cast<FieldDecl>(IOA->getMemberDecl());
1022 if (!Field || !isMemcpyableField(Field))
1023 return 0;
1024 MemberExpr *Arg0 = dyn_cast<MemberExpr>(MCE->getArg(0));
1025 if (!Arg0 || Field != dyn_cast<FieldDecl>(Arg0->getMemberDecl()))
1026 return 0;
1027 return Field;
1028 } else if (CallExpr *CE = dyn_cast<CallExpr>(S)) {
1029 FunctionDecl *FD = dyn_cast<FunctionDecl>(CE->getCalleeDecl());
1030 if (!FD || FD->getBuiltinID() != Builtin::BI__builtin_memcpy)
1031 return 0;
1032 Expr *DstPtr = CE->getArg(0);
1033 if (ImplicitCastExpr *DC = dyn_cast<ImplicitCastExpr>(DstPtr))
1034 DstPtr = DC->getSubExpr();
1035 UnaryOperator *DUO = dyn_cast<UnaryOperator>(DstPtr);
1036 if (!DUO || DUO->getOpcode() != UO_AddrOf)
1037 return 0;
1038 MemberExpr *ME = dyn_cast<MemberExpr>(DUO->getSubExpr());
1039 if (!ME)
1040 return 0;
1041 FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl());
1042 if (!Field || !isMemcpyableField(Field))
1043 return 0;
1044 Expr *SrcPtr = CE->getArg(1);
1045 if (ImplicitCastExpr *SC = dyn_cast<ImplicitCastExpr>(SrcPtr))
1046 SrcPtr = SC->getSubExpr();
1047 UnaryOperator *SUO = dyn_cast<UnaryOperator>(SrcPtr);
1048 if (!SUO || SUO->getOpcode() != UO_AddrOf)
1049 return 0;
1050 MemberExpr *ME2 = dyn_cast<MemberExpr>(SUO->getSubExpr());
1051 if (!ME2 || Field != dyn_cast<FieldDecl>(ME2->getMemberDecl()))
1052 return 0;
1053 return Field;
1054 }
1055
1056 return 0;
1057 }
1058
1059 bool AssignmentsMemcpyable;
1060 SmallVector<Stmt*, 16> AggregatedStmts;
1061
1062 public:
1063
AssignmentMemcpyizer(CodeGenFunction & CGF,const CXXMethodDecl * AD,FunctionArgList & Args)1064 AssignmentMemcpyizer(CodeGenFunction &CGF, const CXXMethodDecl *AD,
1065 FunctionArgList &Args)
1066 : FieldMemcpyizer(CGF, AD->getParent(), Args[Args.size() - 1]),
1067 AssignmentsMemcpyable(CGF.getLangOpts().getGC() == LangOptions::NonGC) {
1068 assert(Args.size() == 2);
1069 }
1070
emitAssignment(Stmt * S)1071 void emitAssignment(Stmt *S) {
1072 FieldDecl *F = getMemcpyableField(S);
1073 if (F) {
1074 addMemcpyableField(F);
1075 AggregatedStmts.push_back(S);
1076 } else {
1077 emitAggregatedStmts();
1078 CGF.EmitStmt(S);
1079 }
1080 }
1081
emitAggregatedStmts()1082 void emitAggregatedStmts() {
1083 if (AggregatedStmts.size() <= 1) {
1084 for (unsigned i = 0; i < AggregatedStmts.size(); ++i)
1085 CGF.EmitStmt(AggregatedStmts[i]);
1086 reset();
1087 }
1088
1089 emitMemcpy();
1090 AggregatedStmts.clear();
1091 }
1092
finish()1093 void finish() {
1094 emitAggregatedStmts();
1095 }
1096 };
1097
1098 }
1099
1100 /// EmitCtorPrologue - This routine generates necessary code to initialize
1101 /// base classes and non-static data members belonging to this constructor.
EmitCtorPrologue(const CXXConstructorDecl * CD,CXXCtorType CtorType,FunctionArgList & Args)1102 void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
1103 CXXCtorType CtorType,
1104 FunctionArgList &Args) {
1105 if (CD->isDelegatingConstructor())
1106 return EmitDelegatingCXXConstructorCall(CD, Args);
1107
1108 const CXXRecordDecl *ClassDecl = CD->getParent();
1109
1110 CXXConstructorDecl::init_const_iterator B = CD->init_begin(),
1111 E = CD->init_end();
1112
1113 llvm::BasicBlock *BaseCtorContinueBB = 0;
1114 if (ClassDecl->getNumVBases() &&
1115 !CGM.getTarget().getCXXABI().hasConstructorVariants()) {
1116 // The ABIs that don't have constructor variants need to put a branch
1117 // before the virtual base initialization code.
1118 BaseCtorContinueBB = CGM.getCXXABI().EmitCtorCompleteObjectHandler(*this);
1119 assert(BaseCtorContinueBB);
1120 }
1121
1122 // Virtual base initializers first.
1123 for (; B != E && (*B)->isBaseInitializer() && (*B)->isBaseVirtual(); B++) {
1124 EmitBaseInitializer(*this, ClassDecl, *B, CtorType);
1125 }
1126
1127 if (BaseCtorContinueBB) {
1128 // Complete object handler should continue to the remaining initializers.
1129 Builder.CreateBr(BaseCtorContinueBB);
1130 EmitBlock(BaseCtorContinueBB);
1131 }
1132
1133 // Then, non-virtual base initializers.
1134 for (; B != E && (*B)->isBaseInitializer(); B++) {
1135 assert(!(*B)->isBaseVirtual());
1136 EmitBaseInitializer(*this, ClassDecl, *B, CtorType);
1137 }
1138
1139 InitializeVTablePointers(ClassDecl);
1140
1141 // And finally, initialize class members.
1142 ConstructorMemcpyizer CM(*this, CD, Args);
1143 for (; B != E; B++) {
1144 CXXCtorInitializer *Member = (*B);
1145 assert(!Member->isBaseInitializer());
1146 assert(Member->isAnyMemberInitializer() &&
1147 "Delegating initializer on non-delegating constructor");
1148 CM.addMemberInitializer(Member);
1149 }
1150 CM.finish();
1151 }
1152
1153 static bool
1154 FieldHasTrivialDestructorBody(ASTContext &Context, const FieldDecl *Field);
1155
1156 static bool
HasTrivialDestructorBody(ASTContext & Context,const CXXRecordDecl * BaseClassDecl,const CXXRecordDecl * MostDerivedClassDecl)1157 HasTrivialDestructorBody(ASTContext &Context,
1158 const CXXRecordDecl *BaseClassDecl,
1159 const CXXRecordDecl *MostDerivedClassDecl)
1160 {
1161 // If the destructor is trivial we don't have to check anything else.
1162 if (BaseClassDecl->hasTrivialDestructor())
1163 return true;
1164
1165 if (!BaseClassDecl->getDestructor()->hasTrivialBody())
1166 return false;
1167
1168 // Check fields.
1169 for (CXXRecordDecl::field_iterator I = BaseClassDecl->field_begin(),
1170 E = BaseClassDecl->field_end(); I != E; ++I) {
1171 const FieldDecl *Field = *I;
1172
1173 if (!FieldHasTrivialDestructorBody(Context, Field))
1174 return false;
1175 }
1176
1177 // Check non-virtual bases.
1178 for (CXXRecordDecl::base_class_const_iterator I =
1179 BaseClassDecl->bases_begin(), E = BaseClassDecl->bases_end();
1180 I != E; ++I) {
1181 if (I->isVirtual())
1182 continue;
1183
1184 const CXXRecordDecl *NonVirtualBase =
1185 cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
1186 if (!HasTrivialDestructorBody(Context, NonVirtualBase,
1187 MostDerivedClassDecl))
1188 return false;
1189 }
1190
1191 if (BaseClassDecl == MostDerivedClassDecl) {
1192 // Check virtual bases.
1193 for (CXXRecordDecl::base_class_const_iterator I =
1194 BaseClassDecl->vbases_begin(), E = BaseClassDecl->vbases_end();
1195 I != E; ++I) {
1196 const CXXRecordDecl *VirtualBase =
1197 cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
1198 if (!HasTrivialDestructorBody(Context, VirtualBase,
1199 MostDerivedClassDecl))
1200 return false;
1201 }
1202 }
1203
1204 return true;
1205 }
1206
1207 static bool
FieldHasTrivialDestructorBody(ASTContext & Context,const FieldDecl * Field)1208 FieldHasTrivialDestructorBody(ASTContext &Context,
1209 const FieldDecl *Field)
1210 {
1211 QualType FieldBaseElementType = Context.getBaseElementType(Field->getType());
1212
1213 const RecordType *RT = FieldBaseElementType->getAs<RecordType>();
1214 if (!RT)
1215 return true;
1216
1217 CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl());
1218 return HasTrivialDestructorBody(Context, FieldClassDecl, FieldClassDecl);
1219 }
1220
1221 /// CanSkipVTablePointerInitialization - Check whether we need to initialize
1222 /// any vtable pointers before calling this destructor.
CanSkipVTablePointerInitialization(ASTContext & Context,const CXXDestructorDecl * Dtor)1223 static bool CanSkipVTablePointerInitialization(ASTContext &Context,
1224 const CXXDestructorDecl *Dtor) {
1225 if (!Dtor->hasTrivialBody())
1226 return false;
1227
1228 // Check the fields.
1229 const CXXRecordDecl *ClassDecl = Dtor->getParent();
1230 for (CXXRecordDecl::field_iterator I = ClassDecl->field_begin(),
1231 E = ClassDecl->field_end(); I != E; ++I) {
1232 const FieldDecl *Field = *I;
1233
1234 if (!FieldHasTrivialDestructorBody(Context, Field))
1235 return false;
1236 }
1237
1238 return true;
1239 }
1240
1241 /// EmitDestructorBody - Emits the body of the current destructor.
EmitDestructorBody(FunctionArgList & Args)1242 void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
1243 const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CurGD.getDecl());
1244 CXXDtorType DtorType = CurGD.getDtorType();
1245
1246 // The call to operator delete in a deleting destructor happens
1247 // outside of the function-try-block, which means it's always
1248 // possible to delegate the destructor body to the complete
1249 // destructor. Do so.
1250 if (DtorType == Dtor_Deleting) {
1251 EnterDtorCleanups(Dtor, Dtor_Deleting);
1252 EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false,
1253 /*Delegating=*/false, LoadCXXThis());
1254 PopCleanupBlock();
1255 return;
1256 }
1257
1258 Stmt *Body = Dtor->getBody();
1259
1260 // If the body is a function-try-block, enter the try before
1261 // anything else.
1262 bool isTryBody = (Body && isa<CXXTryStmt>(Body));
1263 if (isTryBody)
1264 EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true);
1265
1266 // Enter the epilogue cleanups.
1267 RunCleanupsScope DtorEpilogue(*this);
1268
1269 // If this is the complete variant, just invoke the base variant;
1270 // the epilogue will destruct the virtual bases. But we can't do
1271 // this optimization if the body is a function-try-block, because
1272 // we'd introduce *two* handler blocks.
1273 switch (DtorType) {
1274 case Dtor_Deleting: llvm_unreachable("already handled deleting case");
1275
1276 case Dtor_Complete:
1277 // Enter the cleanup scopes for virtual bases.
1278 EnterDtorCleanups(Dtor, Dtor_Complete);
1279
1280 if (!isTryBody &&
1281 CGM.getContext().getTargetInfo().getCXXABI().hasDestructorVariants()) {
1282 EmitCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false,
1283 /*Delegating=*/false, LoadCXXThis());
1284 break;
1285 }
1286 // Fallthrough: act like we're in the base variant.
1287
1288 case Dtor_Base:
1289 // Enter the cleanup scopes for fields and non-virtual bases.
1290 EnterDtorCleanups(Dtor, Dtor_Base);
1291
1292 // Initialize the vtable pointers before entering the body.
1293 if (!CanSkipVTablePointerInitialization(getContext(), Dtor))
1294 InitializeVTablePointers(Dtor->getParent());
1295
1296 if (isTryBody)
1297 EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock());
1298 else if (Body)
1299 EmitStmt(Body);
1300 else {
1301 assert(Dtor->isImplicit() && "bodyless dtor not implicit");
1302 // nothing to do besides what's in the epilogue
1303 }
1304 // -fapple-kext must inline any call to this dtor into
1305 // the caller's body.
1306 if (getLangOpts().AppleKext)
1307 CurFn->addFnAttr(llvm::Attribute::AlwaysInline);
1308 break;
1309 }
1310
1311 // Jump out through the epilogue cleanups.
1312 DtorEpilogue.ForceCleanup();
1313
1314 // Exit the try if applicable.
1315 if (isTryBody)
1316 ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true);
1317 }
1318
emitImplicitAssignmentOperatorBody(FunctionArgList & Args)1319 void CodeGenFunction::emitImplicitAssignmentOperatorBody(FunctionArgList &Args) {
1320 const CXXMethodDecl *AssignOp = cast<CXXMethodDecl>(CurGD.getDecl());
1321 const Stmt *RootS = AssignOp->getBody();
1322 assert(isa<CompoundStmt>(RootS) &&
1323 "Body of an implicit assignment operator should be compound stmt.");
1324 const CompoundStmt *RootCS = cast<CompoundStmt>(RootS);
1325
1326 LexicalScope Scope(*this, RootCS->getSourceRange());
1327
1328 AssignmentMemcpyizer AM(*this, AssignOp, Args);
1329 for (CompoundStmt::const_body_iterator I = RootCS->body_begin(),
1330 E = RootCS->body_end();
1331 I != E; ++I) {
1332 AM.emitAssignment(*I);
1333 }
1334 AM.finish();
1335 }
1336
1337 namespace {
1338 /// Call the operator delete associated with the current destructor.
1339 struct CallDtorDelete : EHScopeStack::Cleanup {
CallDtorDelete__anon061ae5c60311::CallDtorDelete1340 CallDtorDelete() {}
1341
Emit__anon061ae5c60311::CallDtorDelete1342 void Emit(CodeGenFunction &CGF, Flags flags) {
1343 const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl);
1344 const CXXRecordDecl *ClassDecl = Dtor->getParent();
1345 CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(),
1346 CGF.getContext().getTagDeclType(ClassDecl));
1347 }
1348 };
1349
1350 struct CallDtorDeleteConditional : EHScopeStack::Cleanup {
1351 llvm::Value *ShouldDeleteCondition;
1352 public:
CallDtorDeleteConditional__anon061ae5c60311::CallDtorDeleteConditional1353 CallDtorDeleteConditional(llvm::Value *ShouldDeleteCondition)
1354 : ShouldDeleteCondition(ShouldDeleteCondition) {
1355 assert(ShouldDeleteCondition != NULL);
1356 }
1357
Emit__anon061ae5c60311::CallDtorDeleteConditional1358 void Emit(CodeGenFunction &CGF, Flags flags) {
1359 llvm::BasicBlock *callDeleteBB = CGF.createBasicBlock("dtor.call_delete");
1360 llvm::BasicBlock *continueBB = CGF.createBasicBlock("dtor.continue");
1361 llvm::Value *ShouldCallDelete
1362 = CGF.Builder.CreateIsNull(ShouldDeleteCondition);
1363 CGF.Builder.CreateCondBr(ShouldCallDelete, continueBB, callDeleteBB);
1364
1365 CGF.EmitBlock(callDeleteBB);
1366 const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl);
1367 const CXXRecordDecl *ClassDecl = Dtor->getParent();
1368 CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(),
1369 CGF.getContext().getTagDeclType(ClassDecl));
1370 CGF.Builder.CreateBr(continueBB);
1371
1372 CGF.EmitBlock(continueBB);
1373 }
1374 };
1375
1376 class DestroyField : public EHScopeStack::Cleanup {
1377 const FieldDecl *field;
1378 CodeGenFunction::Destroyer *destroyer;
1379 bool useEHCleanupForArray;
1380
1381 public:
DestroyField(const FieldDecl * field,CodeGenFunction::Destroyer * destroyer,bool useEHCleanupForArray)1382 DestroyField(const FieldDecl *field, CodeGenFunction::Destroyer *destroyer,
1383 bool useEHCleanupForArray)
1384 : field(field), destroyer(destroyer),
1385 useEHCleanupForArray(useEHCleanupForArray) {}
1386
Emit(CodeGenFunction & CGF,Flags flags)1387 void Emit(CodeGenFunction &CGF, Flags flags) {
1388 // Find the address of the field.
1389 llvm::Value *thisValue = CGF.LoadCXXThis();
1390 QualType RecordTy = CGF.getContext().getTagDeclType(field->getParent());
1391 LValue ThisLV = CGF.MakeAddrLValue(thisValue, RecordTy);
1392 LValue LV = CGF.EmitLValueForField(ThisLV, field);
1393 assert(LV.isSimple());
1394
1395 CGF.emitDestroy(LV.getAddress(), field->getType(), destroyer,
1396 flags.isForNormalCleanup() && useEHCleanupForArray);
1397 }
1398 };
1399 }
1400
1401 /// EmitDtorEpilogue - Emit all code that comes at the end of class's
1402 /// destructor. This is to call destructors on members and base classes
1403 /// in reverse order of their construction.
EnterDtorCleanups(const CXXDestructorDecl * DD,CXXDtorType DtorType)1404 void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
1405 CXXDtorType DtorType) {
1406 assert(!DD->isTrivial() &&
1407 "Should not emit dtor epilogue for trivial dtor!");
1408
1409 // The deleting-destructor phase just needs to call the appropriate
1410 // operator delete that Sema picked up.
1411 if (DtorType == Dtor_Deleting) {
1412 assert(DD->getOperatorDelete() &&
1413 "operator delete missing - EmitDtorEpilogue");
1414 if (CXXStructorImplicitParamValue) {
1415 // If there is an implicit param to the deleting dtor, it's a boolean
1416 // telling whether we should call delete at the end of the dtor.
1417 EHStack.pushCleanup<CallDtorDeleteConditional>(
1418 NormalAndEHCleanup, CXXStructorImplicitParamValue);
1419 } else {
1420 EHStack.pushCleanup<CallDtorDelete>(NormalAndEHCleanup);
1421 }
1422 return;
1423 }
1424
1425 const CXXRecordDecl *ClassDecl = DD->getParent();
1426
1427 // Unions have no bases and do not call field destructors.
1428 if (ClassDecl->isUnion())
1429 return;
1430
1431 // The complete-destructor phase just destructs all the virtual bases.
1432 if (DtorType == Dtor_Complete) {
1433
1434 // We push them in the forward order so that they'll be popped in
1435 // the reverse order.
1436 for (CXXRecordDecl::base_class_const_iterator I =
1437 ClassDecl->vbases_begin(), E = ClassDecl->vbases_end();
1438 I != E; ++I) {
1439 const CXXBaseSpecifier &Base = *I;
1440 CXXRecordDecl *BaseClassDecl
1441 = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
1442
1443 // Ignore trivial destructors.
1444 if (BaseClassDecl->hasTrivialDestructor())
1445 continue;
1446
1447 EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup,
1448 BaseClassDecl,
1449 /*BaseIsVirtual*/ true);
1450 }
1451
1452 return;
1453 }
1454
1455 assert(DtorType == Dtor_Base);
1456
1457 // Destroy non-virtual bases.
1458 for (CXXRecordDecl::base_class_const_iterator I =
1459 ClassDecl->bases_begin(), E = ClassDecl->bases_end(); I != E; ++I) {
1460 const CXXBaseSpecifier &Base = *I;
1461
1462 // Ignore virtual bases.
1463 if (Base.isVirtual())
1464 continue;
1465
1466 CXXRecordDecl *BaseClassDecl = Base.getType()->getAsCXXRecordDecl();
1467
1468 // Ignore trivial destructors.
1469 if (BaseClassDecl->hasTrivialDestructor())
1470 continue;
1471
1472 EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup,
1473 BaseClassDecl,
1474 /*BaseIsVirtual*/ false);
1475 }
1476
1477 // Destroy direct fields.
1478 SmallVector<const FieldDecl *, 16> FieldDecls;
1479 for (CXXRecordDecl::field_iterator I = ClassDecl->field_begin(),
1480 E = ClassDecl->field_end(); I != E; ++I) {
1481 const FieldDecl *field = *I;
1482 QualType type = field->getType();
1483 QualType::DestructionKind dtorKind = type.isDestructedType();
1484 if (!dtorKind) continue;
1485
1486 // Anonymous union members do not have their destructors called.
1487 const RecordType *RT = type->getAsUnionType();
1488 if (RT && RT->getDecl()->isAnonymousStructOrUnion()) continue;
1489
1490 CleanupKind cleanupKind = getCleanupKind(dtorKind);
1491 EHStack.pushCleanup<DestroyField>(cleanupKind, field,
1492 getDestroyer(dtorKind),
1493 cleanupKind & EHCleanup);
1494 }
1495 }
1496
1497 /// EmitCXXAggrConstructorCall - Emit a loop to call a particular
1498 /// constructor for each of several members of an array.
1499 ///
1500 /// \param ctor the constructor to call for each element
1501 /// \param arrayType the type of the array to initialize
1502 /// \param arrayBegin an arrayType*
1503 /// \param zeroInitialize true if each element should be
1504 /// zero-initialized before it is constructed
1505 void
EmitCXXAggrConstructorCall(const CXXConstructorDecl * ctor,const ConstantArrayType * arrayType,llvm::Value * arrayBegin,CallExpr::const_arg_iterator argBegin,CallExpr::const_arg_iterator argEnd,bool zeroInitialize)1506 CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
1507 const ConstantArrayType *arrayType,
1508 llvm::Value *arrayBegin,
1509 CallExpr::const_arg_iterator argBegin,
1510 CallExpr::const_arg_iterator argEnd,
1511 bool zeroInitialize) {
1512 QualType elementType;
1513 llvm::Value *numElements =
1514 emitArrayLength(arrayType, elementType, arrayBegin);
1515
1516 EmitCXXAggrConstructorCall(ctor, numElements, arrayBegin,
1517 argBegin, argEnd, zeroInitialize);
1518 }
1519
1520 /// EmitCXXAggrConstructorCall - Emit a loop to call a particular
1521 /// constructor for each of several members of an array.
1522 ///
1523 /// \param ctor the constructor to call for each element
1524 /// \param numElements the number of elements in the array;
1525 /// may be zero
1526 /// \param arrayBegin a T*, where T is the type constructed by ctor
1527 /// \param zeroInitialize true if each element should be
1528 /// zero-initialized before it is constructed
1529 void
EmitCXXAggrConstructorCall(const CXXConstructorDecl * ctor,llvm::Value * numElements,llvm::Value * arrayBegin,CallExpr::const_arg_iterator argBegin,CallExpr::const_arg_iterator argEnd,bool zeroInitialize)1530 CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
1531 llvm::Value *numElements,
1532 llvm::Value *arrayBegin,
1533 CallExpr::const_arg_iterator argBegin,
1534 CallExpr::const_arg_iterator argEnd,
1535 bool zeroInitialize) {
1536
1537 // It's legal for numElements to be zero. This can happen both
1538 // dynamically, because x can be zero in 'new A[x]', and statically,
1539 // because of GCC extensions that permit zero-length arrays. There
1540 // are probably legitimate places where we could assume that this
1541 // doesn't happen, but it's not clear that it's worth it.
1542 llvm::BranchInst *zeroCheckBranch = 0;
1543
1544 // Optimize for a constant count.
1545 llvm::ConstantInt *constantCount
1546 = dyn_cast<llvm::ConstantInt>(numElements);
1547 if (constantCount) {
1548 // Just skip out if the constant count is zero.
1549 if (constantCount->isZero()) return;
1550
1551 // Otherwise, emit the check.
1552 } else {
1553 llvm::BasicBlock *loopBB = createBasicBlock("new.ctorloop");
1554 llvm::Value *iszero = Builder.CreateIsNull(numElements, "isempty");
1555 zeroCheckBranch = Builder.CreateCondBr(iszero, loopBB, loopBB);
1556 EmitBlock(loopBB);
1557 }
1558
1559 // Find the end of the array.
1560 llvm::Value *arrayEnd = Builder.CreateInBoundsGEP(arrayBegin, numElements,
1561 "arrayctor.end");
1562
1563 // Enter the loop, setting up a phi for the current location to initialize.
1564 llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
1565 llvm::BasicBlock *loopBB = createBasicBlock("arrayctor.loop");
1566 EmitBlock(loopBB);
1567 llvm::PHINode *cur = Builder.CreatePHI(arrayBegin->getType(), 2,
1568 "arrayctor.cur");
1569 cur->addIncoming(arrayBegin, entryBB);
1570
1571 // Inside the loop body, emit the constructor call on the array element.
1572
1573 QualType type = getContext().getTypeDeclType(ctor->getParent());
1574
1575 // Zero initialize the storage, if requested.
1576 if (zeroInitialize)
1577 EmitNullInitialization(cur, type);
1578
1579 // C++ [class.temporary]p4:
1580 // There are two contexts in which temporaries are destroyed at a different
1581 // point than the end of the full-expression. The first context is when a
1582 // default constructor is called to initialize an element of an array.
1583 // If the constructor has one or more default arguments, the destruction of
1584 // every temporary created in a default argument expression is sequenced
1585 // before the construction of the next array element, if any.
1586
1587 {
1588 RunCleanupsScope Scope(*this);
1589
1590 // Evaluate the constructor and its arguments in a regular
1591 // partial-destroy cleanup.
1592 if (getLangOpts().Exceptions &&
1593 !ctor->getParent()->hasTrivialDestructor()) {
1594 Destroyer *destroyer = destroyCXXObject;
1595 pushRegularPartialArrayCleanup(arrayBegin, cur, type, *destroyer);
1596 }
1597
1598 EmitCXXConstructorCall(ctor, Ctor_Complete, /*ForVirtualBase=*/ false,
1599 /*Delegating=*/false, cur, argBegin, argEnd);
1600 }
1601
1602 // Go to the next element.
1603 llvm::Value *next =
1604 Builder.CreateInBoundsGEP(cur, llvm::ConstantInt::get(SizeTy, 1),
1605 "arrayctor.next");
1606 cur->addIncoming(next, Builder.GetInsertBlock());
1607
1608 // Check whether that's the end of the loop.
1609 llvm::Value *done = Builder.CreateICmpEQ(next, arrayEnd, "arrayctor.done");
1610 llvm::BasicBlock *contBB = createBasicBlock("arrayctor.cont");
1611 Builder.CreateCondBr(done, contBB, loopBB);
1612
1613 // Patch the earlier check to skip over the loop.
1614 if (zeroCheckBranch) zeroCheckBranch->setSuccessor(0, contBB);
1615
1616 EmitBlock(contBB);
1617 }
1618
destroyCXXObject(CodeGenFunction & CGF,llvm::Value * addr,QualType type)1619 void CodeGenFunction::destroyCXXObject(CodeGenFunction &CGF,
1620 llvm::Value *addr,
1621 QualType type) {
1622 const RecordType *rtype = type->castAs<RecordType>();
1623 const CXXRecordDecl *record = cast<CXXRecordDecl>(rtype->getDecl());
1624 const CXXDestructorDecl *dtor = record->getDestructor();
1625 assert(!dtor->isTrivial());
1626 CGF.EmitCXXDestructorCall(dtor, Dtor_Complete, /*for vbase*/ false,
1627 /*Delegating=*/false, addr);
1628 }
1629
1630 void
EmitCXXConstructorCall(const CXXConstructorDecl * D,CXXCtorType Type,bool ForVirtualBase,bool Delegating,llvm::Value * This,CallExpr::const_arg_iterator ArgBeg,CallExpr::const_arg_iterator ArgEnd)1631 CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
1632 CXXCtorType Type, bool ForVirtualBase,
1633 bool Delegating,
1634 llvm::Value *This,
1635 CallExpr::const_arg_iterator ArgBeg,
1636 CallExpr::const_arg_iterator ArgEnd) {
1637
1638 CGDebugInfo *DI = getDebugInfo();
1639 if (DI &&
1640 CGM.getCodeGenOpts().getDebugInfo() == CodeGenOptions::LimitedDebugInfo) {
1641 // If debug info for this class has not been emitted then this is the
1642 // right time to do so.
1643 const CXXRecordDecl *Parent = D->getParent();
1644 DI->getOrCreateRecordType(CGM.getContext().getTypeDeclType(Parent),
1645 Parent->getLocation());
1646 }
1647
1648 // If this is a trivial constructor, just emit what's needed.
1649 if (D->isTrivial()) {
1650 if (ArgBeg == ArgEnd) {
1651 // Trivial default constructor, no codegen required.
1652 assert(D->isDefaultConstructor() &&
1653 "trivial 0-arg ctor not a default ctor");
1654 return;
1655 }
1656
1657 assert(ArgBeg + 1 == ArgEnd && "unexpected argcount for trivial ctor");
1658 assert(D->isCopyOrMoveConstructor() &&
1659 "trivial 1-arg ctor not a copy/move ctor");
1660
1661 const Expr *E = (*ArgBeg);
1662 QualType Ty = E->getType();
1663 llvm::Value *Src = EmitLValue(E).getAddress();
1664 EmitAggregateCopy(This, Src, Ty);
1665 return;
1666 }
1667
1668 // Non-trivial constructors are handled in an ABI-specific manner.
1669 CGM.getCXXABI().EmitConstructorCall(*this, D, Type, ForVirtualBase,
1670 Delegating, This, ArgBeg, ArgEnd);
1671 }
1672
1673 void
EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl * D,llvm::Value * This,llvm::Value * Src,CallExpr::const_arg_iterator ArgBeg,CallExpr::const_arg_iterator ArgEnd)1674 CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
1675 llvm::Value *This, llvm::Value *Src,
1676 CallExpr::const_arg_iterator ArgBeg,
1677 CallExpr::const_arg_iterator ArgEnd) {
1678 if (D->isTrivial()) {
1679 assert(ArgBeg + 1 == ArgEnd && "unexpected argcount for trivial ctor");
1680 assert(D->isCopyOrMoveConstructor() &&
1681 "trivial 1-arg ctor not a copy/move ctor");
1682 EmitAggregateCopy(This, Src, (*ArgBeg)->getType());
1683 return;
1684 }
1685 llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D,
1686 clang::Ctor_Complete);
1687 assert(D->isInstance() &&
1688 "Trying to emit a member call expr on a static method!");
1689
1690 const FunctionProtoType *FPT = D->getType()->getAs<FunctionProtoType>();
1691
1692 CallArgList Args;
1693
1694 // Push the this ptr.
1695 Args.add(RValue::get(This), D->getThisType(getContext()));
1696
1697
1698 // Push the src ptr.
1699 QualType QT = *(FPT->arg_type_begin());
1700 llvm::Type *t = CGM.getTypes().ConvertType(QT);
1701 Src = Builder.CreateBitCast(Src, t);
1702 Args.add(RValue::get(Src), QT);
1703
1704 // Skip over first argument (Src).
1705 ++ArgBeg;
1706 CallExpr::const_arg_iterator Arg = ArgBeg;
1707 for (FunctionProtoType::arg_type_iterator I = FPT->arg_type_begin()+1,
1708 E = FPT->arg_type_end(); I != E; ++I, ++Arg) {
1709 assert(Arg != ArgEnd && "Running over edge of argument list!");
1710 EmitCallArg(Args, *Arg, *I);
1711 }
1712 // Either we've emitted all the call args, or we have a call to a
1713 // variadic function.
1714 assert((Arg == ArgEnd || FPT->isVariadic()) &&
1715 "Extra arguments in non-variadic function!");
1716 // If we still have any arguments, emit them using the type of the argument.
1717 for (; Arg != ArgEnd; ++Arg) {
1718 QualType ArgType = Arg->getType();
1719 EmitCallArg(Args, *Arg, ArgType);
1720 }
1721
1722 EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, RequiredArgs::All),
1723 Callee, ReturnValueSlot(), Args, D);
1724 }
1725
1726 void
EmitDelegateCXXConstructorCall(const CXXConstructorDecl * Ctor,CXXCtorType CtorType,const FunctionArgList & Args)1727 CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
1728 CXXCtorType CtorType,
1729 const FunctionArgList &Args) {
1730 CallArgList DelegateArgs;
1731
1732 FunctionArgList::const_iterator I = Args.begin(), E = Args.end();
1733 assert(I != E && "no parameters to constructor");
1734
1735 // this
1736 DelegateArgs.add(RValue::get(LoadCXXThis()), (*I)->getType());
1737 ++I;
1738
1739 // vtt
1740 if (llvm::Value *VTT = GetVTTParameter(GlobalDecl(Ctor, CtorType),
1741 /*ForVirtualBase=*/false,
1742 /*Delegating=*/true)) {
1743 QualType VoidPP = getContext().getPointerType(getContext().VoidPtrTy);
1744 DelegateArgs.add(RValue::get(VTT), VoidPP);
1745
1746 if (CodeGenVTables::needsVTTParameter(CurGD)) {
1747 assert(I != E && "cannot skip vtt parameter, already done with args");
1748 assert((*I)->getType() == VoidPP && "skipping parameter not of vtt type");
1749 ++I;
1750 }
1751 }
1752
1753 // Explicit arguments.
1754 for (; I != E; ++I) {
1755 const VarDecl *param = *I;
1756 EmitDelegateCallArg(DelegateArgs, param);
1757 }
1758
1759 EmitCall(CGM.getTypes().arrangeCXXConstructorDeclaration(Ctor, CtorType),
1760 CGM.GetAddrOfCXXConstructor(Ctor, CtorType),
1761 ReturnValueSlot(), DelegateArgs, Ctor);
1762 }
1763
1764 namespace {
1765 struct CallDelegatingCtorDtor : EHScopeStack::Cleanup {
1766 const CXXDestructorDecl *Dtor;
1767 llvm::Value *Addr;
1768 CXXDtorType Type;
1769
CallDelegatingCtorDtor__anon061ae5c60411::CallDelegatingCtorDtor1770 CallDelegatingCtorDtor(const CXXDestructorDecl *D, llvm::Value *Addr,
1771 CXXDtorType Type)
1772 : Dtor(D), Addr(Addr), Type(Type) {}
1773
Emit__anon061ae5c60411::CallDelegatingCtorDtor1774 void Emit(CodeGenFunction &CGF, Flags flags) {
1775 CGF.EmitCXXDestructorCall(Dtor, Type, /*ForVirtualBase=*/false,
1776 /*Delegating=*/true, Addr);
1777 }
1778 };
1779 }
1780
1781 void
EmitDelegatingCXXConstructorCall(const CXXConstructorDecl * Ctor,const FunctionArgList & Args)1782 CodeGenFunction::EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor,
1783 const FunctionArgList &Args) {
1784 assert(Ctor->isDelegatingConstructor());
1785
1786 llvm::Value *ThisPtr = LoadCXXThis();
1787
1788 QualType Ty = getContext().getTagDeclType(Ctor->getParent());
1789 CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
1790 AggValueSlot AggSlot =
1791 AggValueSlot::forAddr(ThisPtr, Alignment, Qualifiers(),
1792 AggValueSlot::IsDestructed,
1793 AggValueSlot::DoesNotNeedGCBarriers,
1794 AggValueSlot::IsNotAliased);
1795
1796 EmitAggExpr(Ctor->init_begin()[0]->getInit(), AggSlot);
1797
1798 const CXXRecordDecl *ClassDecl = Ctor->getParent();
1799 if (CGM.getLangOpts().Exceptions && !ClassDecl->hasTrivialDestructor()) {
1800 CXXDtorType Type =
1801 CurGD.getCtorType() == Ctor_Complete ? Dtor_Complete : Dtor_Base;
1802
1803 EHStack.pushCleanup<CallDelegatingCtorDtor>(EHCleanup,
1804 ClassDecl->getDestructor(),
1805 ThisPtr, Type);
1806 }
1807 }
1808
EmitCXXDestructorCall(const CXXDestructorDecl * DD,CXXDtorType Type,bool ForVirtualBase,bool Delegating,llvm::Value * This)1809 void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD,
1810 CXXDtorType Type,
1811 bool ForVirtualBase,
1812 bool Delegating,
1813 llvm::Value *This) {
1814 llvm::Value *VTT = GetVTTParameter(GlobalDecl(DD, Type),
1815 ForVirtualBase, Delegating);
1816 llvm::Value *Callee = 0;
1817 if (getLangOpts().AppleKext)
1818 Callee = BuildAppleKextVirtualDestructorCall(DD, Type,
1819 DD->getParent());
1820
1821 if (!Callee)
1822 Callee = CGM.GetAddrOfCXXDestructor(DD, Type);
1823
1824 // FIXME: Provide a source location here.
1825 EmitCXXMemberCall(DD, SourceLocation(), Callee, ReturnValueSlot(), This,
1826 VTT, getContext().getPointerType(getContext().VoidPtrTy),
1827 0, 0);
1828 }
1829
1830 namespace {
1831 struct CallLocalDtor : EHScopeStack::Cleanup {
1832 const CXXDestructorDecl *Dtor;
1833 llvm::Value *Addr;
1834
CallLocalDtor__anon061ae5c60511::CallLocalDtor1835 CallLocalDtor(const CXXDestructorDecl *D, llvm::Value *Addr)
1836 : Dtor(D), Addr(Addr) {}
1837
Emit__anon061ae5c60511::CallLocalDtor1838 void Emit(CodeGenFunction &CGF, Flags flags) {
1839 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
1840 /*ForVirtualBase=*/false,
1841 /*Delegating=*/false, Addr);
1842 }
1843 };
1844 }
1845
PushDestructorCleanup(const CXXDestructorDecl * D,llvm::Value * Addr)1846 void CodeGenFunction::PushDestructorCleanup(const CXXDestructorDecl *D,
1847 llvm::Value *Addr) {
1848 EHStack.pushCleanup<CallLocalDtor>(NormalAndEHCleanup, D, Addr);
1849 }
1850
PushDestructorCleanup(QualType T,llvm::Value * Addr)1851 void CodeGenFunction::PushDestructorCleanup(QualType T, llvm::Value *Addr) {
1852 CXXRecordDecl *ClassDecl = T->getAsCXXRecordDecl();
1853 if (!ClassDecl) return;
1854 if (ClassDecl->hasTrivialDestructor()) return;
1855
1856 const CXXDestructorDecl *D = ClassDecl->getDestructor();
1857 assert(D && D->isUsed() && "destructor not marked as used!");
1858 PushDestructorCleanup(D, Addr);
1859 }
1860
1861 llvm::Value *
GetVirtualBaseClassOffset(llvm::Value * This,const CXXRecordDecl * ClassDecl,const CXXRecordDecl * BaseClassDecl)1862 CodeGenFunction::GetVirtualBaseClassOffset(llvm::Value *This,
1863 const CXXRecordDecl *ClassDecl,
1864 const CXXRecordDecl *BaseClassDecl) {
1865 llvm::Value *VTablePtr = GetVTablePtr(This, Int8PtrTy);
1866 CharUnits VBaseOffsetOffset =
1867 CGM.getVTableContext().getVirtualBaseOffsetOffset(ClassDecl, BaseClassDecl);
1868
1869 llvm::Value *VBaseOffsetPtr =
1870 Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset.getQuantity(),
1871 "vbase.offset.ptr");
1872 llvm::Type *PtrDiffTy =
1873 ConvertType(getContext().getPointerDiffType());
1874
1875 VBaseOffsetPtr = Builder.CreateBitCast(VBaseOffsetPtr,
1876 PtrDiffTy->getPointerTo());
1877
1878 llvm::Value *VBaseOffset = Builder.CreateLoad(VBaseOffsetPtr, "vbase.offset");
1879
1880 return VBaseOffset;
1881 }
1882
1883 void
InitializeVTablePointer(BaseSubobject Base,const CXXRecordDecl * NearestVBase,CharUnits OffsetFromNearestVBase,llvm::Constant * VTable,const CXXRecordDecl * VTableClass)1884 CodeGenFunction::InitializeVTablePointer(BaseSubobject Base,
1885 const CXXRecordDecl *NearestVBase,
1886 CharUnits OffsetFromNearestVBase,
1887 llvm::Constant *VTable,
1888 const CXXRecordDecl *VTableClass) {
1889 const CXXRecordDecl *RD = Base.getBase();
1890
1891 // Compute the address point.
1892 llvm::Value *VTableAddressPoint;
1893
1894 // Check if we need to use a vtable from the VTT.
1895 if (CodeGenVTables::needsVTTParameter(CurGD) &&
1896 (RD->getNumVBases() || NearestVBase)) {
1897 // Get the secondary vpointer index.
1898 uint64_t VirtualPointerIndex =
1899 CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
1900
1901 /// Load the VTT.
1902 llvm::Value *VTT = LoadCXXVTT();
1903 if (VirtualPointerIndex)
1904 VTT = Builder.CreateConstInBoundsGEP1_64(VTT, VirtualPointerIndex);
1905
1906 // And load the address point from the VTT.
1907 VTableAddressPoint = Builder.CreateLoad(VTT);
1908 } else {
1909 uint64_t AddressPoint =
1910 CGM.getVTableContext().getVTableLayout(VTableClass).getAddressPoint(Base);
1911 VTableAddressPoint =
1912 Builder.CreateConstInBoundsGEP2_64(VTable, 0, AddressPoint);
1913 }
1914
1915 // Compute where to store the address point.
1916 llvm::Value *VirtualOffset = 0;
1917 CharUnits NonVirtualOffset = CharUnits::Zero();
1918
1919 if (CodeGenVTables::needsVTTParameter(CurGD) && NearestVBase) {
1920 // We need to use the virtual base offset offset because the virtual base
1921 // might have a different offset in the most derived class.
1922 VirtualOffset = GetVirtualBaseClassOffset(LoadCXXThis(), VTableClass,
1923 NearestVBase);
1924 NonVirtualOffset = OffsetFromNearestVBase;
1925 } else {
1926 // We can just use the base offset in the complete class.
1927 NonVirtualOffset = Base.getBaseOffset();
1928 }
1929
1930 // Apply the offsets.
1931 llvm::Value *VTableField = LoadCXXThis();
1932
1933 if (!NonVirtualOffset.isZero() || VirtualOffset)
1934 VTableField = ApplyNonVirtualAndVirtualOffset(*this, VTableField,
1935 NonVirtualOffset,
1936 VirtualOffset);
1937
1938 // Finally, store the address point.
1939 llvm::Type *AddressPointPtrTy =
1940 VTableAddressPoint->getType()->getPointerTo();
1941 VTableField = Builder.CreateBitCast(VTableField, AddressPointPtrTy);
1942 llvm::StoreInst *Store = Builder.CreateStore(VTableAddressPoint, VTableField);
1943 CGM.DecorateInstruction(Store, CGM.getTBAAInfoForVTablePtr());
1944 }
1945
1946 void
InitializeVTablePointers(BaseSubobject Base,const CXXRecordDecl * NearestVBase,CharUnits OffsetFromNearestVBase,bool BaseIsNonVirtualPrimaryBase,llvm::Constant * VTable,const CXXRecordDecl * VTableClass,VisitedVirtualBasesSetTy & VBases)1947 CodeGenFunction::InitializeVTablePointers(BaseSubobject Base,
1948 const CXXRecordDecl *NearestVBase,
1949 CharUnits OffsetFromNearestVBase,
1950 bool BaseIsNonVirtualPrimaryBase,
1951 llvm::Constant *VTable,
1952 const CXXRecordDecl *VTableClass,
1953 VisitedVirtualBasesSetTy& VBases) {
1954 // If this base is a non-virtual primary base the address point has already
1955 // been set.
1956 if (!BaseIsNonVirtualPrimaryBase) {
1957 // Initialize the vtable pointer for this base.
1958 InitializeVTablePointer(Base, NearestVBase, OffsetFromNearestVBase,
1959 VTable, VTableClass);
1960 }
1961
1962 const CXXRecordDecl *RD = Base.getBase();
1963
1964 // Traverse bases.
1965 for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
1966 E = RD->bases_end(); I != E; ++I) {
1967 CXXRecordDecl *BaseDecl
1968 = cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
1969
1970 // Ignore classes without a vtable.
1971 if (!BaseDecl->isDynamicClass())
1972 continue;
1973
1974 CharUnits BaseOffset;
1975 CharUnits BaseOffsetFromNearestVBase;
1976 bool BaseDeclIsNonVirtualPrimaryBase;
1977
1978 if (I->isVirtual()) {
1979 // Check if we've visited this virtual base before.
1980 if (!VBases.insert(BaseDecl))
1981 continue;
1982
1983 const ASTRecordLayout &Layout =
1984 getContext().getASTRecordLayout(VTableClass);
1985
1986 BaseOffset = Layout.getVBaseClassOffset(BaseDecl);
1987 BaseOffsetFromNearestVBase = CharUnits::Zero();
1988 BaseDeclIsNonVirtualPrimaryBase = false;
1989 } else {
1990 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
1991
1992 BaseOffset = Base.getBaseOffset() + Layout.getBaseClassOffset(BaseDecl);
1993 BaseOffsetFromNearestVBase =
1994 OffsetFromNearestVBase + Layout.getBaseClassOffset(BaseDecl);
1995 BaseDeclIsNonVirtualPrimaryBase = Layout.getPrimaryBase() == BaseDecl;
1996 }
1997
1998 InitializeVTablePointers(BaseSubobject(BaseDecl, BaseOffset),
1999 I->isVirtual() ? BaseDecl : NearestVBase,
2000 BaseOffsetFromNearestVBase,
2001 BaseDeclIsNonVirtualPrimaryBase,
2002 VTable, VTableClass, VBases);
2003 }
2004 }
2005
InitializeVTablePointers(const CXXRecordDecl * RD)2006 void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) {
2007 // Ignore classes without a vtable.
2008 if (!RD->isDynamicClass())
2009 return;
2010
2011 // Get the VTable.
2012 llvm::Constant *VTable = CGM.getVTables().GetAddrOfVTable(RD);
2013
2014 // Initialize the vtable pointers for this class and all of its bases.
2015 VisitedVirtualBasesSetTy VBases;
2016 InitializeVTablePointers(BaseSubobject(RD, CharUnits::Zero()),
2017 /*NearestVBase=*/0,
2018 /*OffsetFromNearestVBase=*/CharUnits::Zero(),
2019 /*BaseIsNonVirtualPrimaryBase=*/false,
2020 VTable, RD, VBases);
2021 }
2022
GetVTablePtr(llvm::Value * This,llvm::Type * Ty)2023 llvm::Value *CodeGenFunction::GetVTablePtr(llvm::Value *This,
2024 llvm::Type *Ty) {
2025 llvm::Value *VTablePtrSrc = Builder.CreateBitCast(This, Ty->getPointerTo());
2026 llvm::Instruction *VTable = Builder.CreateLoad(VTablePtrSrc, "vtable");
2027 CGM.DecorateInstruction(VTable, CGM.getTBAAInfoForVTablePtr());
2028 return VTable;
2029 }
2030
getMostDerivedClassDecl(const Expr * Base)2031 static const CXXRecordDecl *getMostDerivedClassDecl(const Expr *Base) {
2032 const Expr *E = Base;
2033
2034 while (true) {
2035 E = E->IgnoreParens();
2036 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
2037 if (CE->getCastKind() == CK_DerivedToBase ||
2038 CE->getCastKind() == CK_UncheckedDerivedToBase ||
2039 CE->getCastKind() == CK_NoOp) {
2040 E = CE->getSubExpr();
2041 continue;
2042 }
2043 }
2044
2045 break;
2046 }
2047
2048 QualType DerivedType = E->getType();
2049 if (const PointerType *PTy = DerivedType->getAs<PointerType>())
2050 DerivedType = PTy->getPointeeType();
2051
2052 return cast<CXXRecordDecl>(DerivedType->castAs<RecordType>()->getDecl());
2053 }
2054
2055 // FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do
2056 // quite what we want.
skipNoOpCastsAndParens(const Expr * E)2057 static const Expr *skipNoOpCastsAndParens(const Expr *E) {
2058 while (true) {
2059 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
2060 E = PE->getSubExpr();
2061 continue;
2062 }
2063
2064 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
2065 if (CE->getCastKind() == CK_NoOp) {
2066 E = CE->getSubExpr();
2067 continue;
2068 }
2069 }
2070 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
2071 if (UO->getOpcode() == UO_Extension) {
2072 E = UO->getSubExpr();
2073 continue;
2074 }
2075 }
2076 return E;
2077 }
2078 }
2079
2080 /// canDevirtualizeMemberFunctionCall - Checks whether the given virtual member
2081 /// function call on the given expr can be devirtualized.
canDevirtualizeMemberFunctionCall(const Expr * Base,const CXXMethodDecl * MD)2082 static bool canDevirtualizeMemberFunctionCall(const Expr *Base,
2083 const CXXMethodDecl *MD) {
2084 // If the most derived class is marked final, we know that no subclass can
2085 // override this member function and so we can devirtualize it. For example:
2086 //
2087 // struct A { virtual void f(); }
2088 // struct B final : A { };
2089 //
2090 // void f(B *b) {
2091 // b->f();
2092 // }
2093 //
2094 const CXXRecordDecl *MostDerivedClassDecl = getMostDerivedClassDecl(Base);
2095 if (MostDerivedClassDecl->hasAttr<FinalAttr>())
2096 return true;
2097
2098 // If the member function is marked 'final', we know that it can't be
2099 // overridden and can therefore devirtualize it.
2100 if (MD->hasAttr<FinalAttr>())
2101 return true;
2102
2103 // Similarly, if the class itself is marked 'final' it can't be overridden
2104 // and we can therefore devirtualize the member function call.
2105 if (MD->getParent()->hasAttr<FinalAttr>())
2106 return true;
2107
2108 Base = skipNoOpCastsAndParens(Base);
2109 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
2110 if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
2111 // This is a record decl. We know the type and can devirtualize it.
2112 return VD->getType()->isRecordType();
2113 }
2114
2115 return false;
2116 }
2117
2118 // We can always devirtualize calls on temporary object expressions.
2119 if (isa<CXXConstructExpr>(Base))
2120 return true;
2121
2122 // And calls on bound temporaries.
2123 if (isa<CXXBindTemporaryExpr>(Base))
2124 return true;
2125
2126 // Check if this is a call expr that returns a record type.
2127 if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
2128 return CE->getCallReturnType()->isRecordType();
2129
2130 // We can't devirtualize the call.
2131 return false;
2132 }
2133
UseVirtualCall(ASTContext & Context,const CXXOperatorCallExpr * CE,const CXXMethodDecl * MD)2134 static bool UseVirtualCall(ASTContext &Context,
2135 const CXXOperatorCallExpr *CE,
2136 const CXXMethodDecl *MD) {
2137 if (!MD->isVirtual())
2138 return false;
2139
2140 // When building with -fapple-kext, all calls must go through the vtable since
2141 // the kernel linker can do runtime patching of vtables.
2142 if (Context.getLangOpts().AppleKext)
2143 return true;
2144
2145 return !canDevirtualizeMemberFunctionCall(CE->getArg(0), MD);
2146 }
2147
2148 llvm::Value *
EmitCXXOperatorMemberCallee(const CXXOperatorCallExpr * E,const CXXMethodDecl * MD,llvm::Value * This)2149 CodeGenFunction::EmitCXXOperatorMemberCallee(const CXXOperatorCallExpr *E,
2150 const CXXMethodDecl *MD,
2151 llvm::Value *This) {
2152 llvm::FunctionType *fnType =
2153 CGM.getTypes().GetFunctionType(
2154 CGM.getTypes().arrangeCXXMethodDeclaration(MD));
2155
2156 if (UseVirtualCall(getContext(), E, MD))
2157 return BuildVirtualCall(MD, This, fnType);
2158
2159 return CGM.GetAddrOfFunction(MD, fnType);
2160 }
2161
EmitForwardingCallToLambda(const CXXRecordDecl * lambda,CallArgList & callArgs)2162 void CodeGenFunction::EmitForwardingCallToLambda(const CXXRecordDecl *lambda,
2163 CallArgList &callArgs) {
2164 // Lookup the call operator
2165 DeclarationName operatorName
2166 = getContext().DeclarationNames.getCXXOperatorName(OO_Call);
2167 CXXMethodDecl *callOperator =
2168 cast<CXXMethodDecl>(lambda->lookup(operatorName).front());
2169
2170 // Get the address of the call operator.
2171 const CGFunctionInfo &calleeFnInfo =
2172 CGM.getTypes().arrangeCXXMethodDeclaration(callOperator);
2173 llvm::Value *callee =
2174 CGM.GetAddrOfFunction(GlobalDecl(callOperator),
2175 CGM.getTypes().GetFunctionType(calleeFnInfo));
2176
2177 // Prepare the return slot.
2178 const FunctionProtoType *FPT =
2179 callOperator->getType()->castAs<FunctionProtoType>();
2180 QualType resultType = FPT->getResultType();
2181 ReturnValueSlot returnSlot;
2182 if (!resultType->isVoidType() &&
2183 calleeFnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect &&
2184 !hasScalarEvaluationKind(calleeFnInfo.getReturnType()))
2185 returnSlot = ReturnValueSlot(ReturnValue, resultType.isVolatileQualified());
2186
2187 // We don't need to separately arrange the call arguments because
2188 // the call can't be variadic anyway --- it's impossible to forward
2189 // variadic arguments.
2190
2191 // Now emit our call.
2192 RValue RV = EmitCall(calleeFnInfo, callee, returnSlot,
2193 callArgs, callOperator);
2194
2195 // If necessary, copy the returned value into the slot.
2196 if (!resultType->isVoidType() && returnSlot.isNull())
2197 EmitReturnOfRValue(RV, resultType);
2198 else
2199 EmitBranchThroughCleanup(ReturnBlock);
2200 }
2201
EmitLambdaBlockInvokeBody()2202 void CodeGenFunction::EmitLambdaBlockInvokeBody() {
2203 const BlockDecl *BD = BlockInfo->getBlockDecl();
2204 const VarDecl *variable = BD->capture_begin()->getVariable();
2205 const CXXRecordDecl *Lambda = variable->getType()->getAsCXXRecordDecl();
2206
2207 // Start building arguments for forwarding call
2208 CallArgList CallArgs;
2209
2210 QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda));
2211 llvm::Value *ThisPtr = GetAddrOfBlockDecl(variable, false);
2212 CallArgs.add(RValue::get(ThisPtr), ThisType);
2213
2214 // Add the rest of the parameters.
2215 for (BlockDecl::param_const_iterator I = BD->param_begin(),
2216 E = BD->param_end(); I != E; ++I) {
2217 ParmVarDecl *param = *I;
2218 EmitDelegateCallArg(CallArgs, param);
2219 }
2220
2221 EmitForwardingCallToLambda(Lambda, CallArgs);
2222 }
2223
EmitLambdaToBlockPointerBody(FunctionArgList & Args)2224 void CodeGenFunction::EmitLambdaToBlockPointerBody(FunctionArgList &Args) {
2225 if (cast<CXXMethodDecl>(CurFuncDecl)->isVariadic()) {
2226 // FIXME: Making this work correctly is nasty because it requires either
2227 // cloning the body of the call operator or making the call operator forward.
2228 CGM.ErrorUnsupported(CurFuncDecl, "lambda conversion to variadic function");
2229 return;
2230 }
2231
2232 EmitFunctionBody(Args);
2233 }
2234
EmitLambdaDelegatingInvokeBody(const CXXMethodDecl * MD)2235 void CodeGenFunction::EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) {
2236 const CXXRecordDecl *Lambda = MD->getParent();
2237
2238 // Start building arguments for forwarding call
2239 CallArgList CallArgs;
2240
2241 QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda));
2242 llvm::Value *ThisPtr = llvm::UndefValue::get(getTypes().ConvertType(ThisType));
2243 CallArgs.add(RValue::get(ThisPtr), ThisType);
2244
2245 // Add the rest of the parameters.
2246 for (FunctionDecl::param_const_iterator I = MD->param_begin(),
2247 E = MD->param_end(); I != E; ++I) {
2248 ParmVarDecl *param = *I;
2249 EmitDelegateCallArg(CallArgs, param);
2250 }
2251
2252 EmitForwardingCallToLambda(Lambda, CallArgs);
2253 }
2254
EmitLambdaStaticInvokeFunction(const CXXMethodDecl * MD)2255 void CodeGenFunction::EmitLambdaStaticInvokeFunction(const CXXMethodDecl *MD) {
2256 if (MD->isVariadic()) {
2257 // FIXME: Making this work correctly is nasty because it requires either
2258 // cloning the body of the call operator or making the call operator forward.
2259 CGM.ErrorUnsupported(MD, "lambda conversion to variadic function");
2260 return;
2261 }
2262
2263 EmitLambdaDelegatingInvokeBody(MD);
2264 }
2265