1 //===--- CGClass.cpp - Emit LLVM Code for C++ classes ---------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code dealing with C++ code generation of classes
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "CGBlocks.h"
15 #include "CGDebugInfo.h"
16 #include "CGRecordLayout.h"
17 #include "CodeGenFunction.h"
18 #include "CGCXXABI.h"
19 #include "clang/AST/CXXInheritance.h"
20 #include "clang/AST/EvaluatedExprVisitor.h"
21 #include "clang/AST/RecordLayout.h"
22 #include "clang/AST/StmtCXX.h"
23 #include "clang/Basic/TargetBuiltins.h"
24 #include "clang/Frontend/CodeGenOptions.h"
25
26 using namespace clang;
27 using namespace CodeGen;
28
29 static CharUnits
ComputeNonVirtualBaseClassOffset(ASTContext & Context,const CXXRecordDecl * DerivedClass,CastExpr::path_const_iterator Start,CastExpr::path_const_iterator End)30 ComputeNonVirtualBaseClassOffset(ASTContext &Context,
31 const CXXRecordDecl *DerivedClass,
32 CastExpr::path_const_iterator Start,
33 CastExpr::path_const_iterator End) {
34 CharUnits Offset = CharUnits::Zero();
35
36 const CXXRecordDecl *RD = DerivedClass;
37
38 for (CastExpr::path_const_iterator I = Start; I != End; ++I) {
39 const CXXBaseSpecifier *Base = *I;
40 assert(!Base->isVirtual() && "Should not see virtual bases here!");
41
42 // Get the layout.
43 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
44
45 const CXXRecordDecl *BaseDecl =
46 cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
47
48 // Add the offset.
49 Offset += Layout.getBaseClassOffset(BaseDecl);
50
51 RD = BaseDecl;
52 }
53
54 return Offset;
55 }
56
57 llvm::Constant *
GetNonVirtualBaseClassOffset(const CXXRecordDecl * ClassDecl,CastExpr::path_const_iterator PathBegin,CastExpr::path_const_iterator PathEnd)58 CodeGenModule::GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl,
59 CastExpr::path_const_iterator PathBegin,
60 CastExpr::path_const_iterator PathEnd) {
61 assert(PathBegin != PathEnd && "Base path should not be empty!");
62
63 CharUnits Offset =
64 ComputeNonVirtualBaseClassOffset(getContext(), ClassDecl,
65 PathBegin, PathEnd);
66 if (Offset.isZero())
67 return 0;
68
69 llvm::Type *PtrDiffTy =
70 Types.ConvertType(getContext().getPointerDiffType());
71
72 return llvm::ConstantInt::get(PtrDiffTy, Offset.getQuantity());
73 }
74
75 /// Gets the address of a direct base class within a complete object.
76 /// This should only be used for (1) non-virtual bases or (2) virtual bases
77 /// when the type is known to be complete (e.g. in complete destructors).
78 ///
79 /// The object pointed to by 'This' is assumed to be non-null.
80 llvm::Value *
GetAddressOfDirectBaseInCompleteClass(llvm::Value * This,const CXXRecordDecl * Derived,const CXXRecordDecl * Base,bool BaseIsVirtual)81 CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(llvm::Value *This,
82 const CXXRecordDecl *Derived,
83 const CXXRecordDecl *Base,
84 bool BaseIsVirtual) {
85 // 'this' must be a pointer (in some address space) to Derived.
86 assert(This->getType()->isPointerTy() &&
87 cast<llvm::PointerType>(This->getType())->getElementType()
88 == ConvertType(Derived));
89
90 // Compute the offset of the virtual base.
91 CharUnits Offset;
92 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Derived);
93 if (BaseIsVirtual)
94 Offset = Layout.getVBaseClassOffset(Base);
95 else
96 Offset = Layout.getBaseClassOffset(Base);
97
98 // Shift and cast down to the base type.
99 // TODO: for complete types, this should be possible with a GEP.
100 llvm::Value *V = This;
101 if (Offset.isPositive()) {
102 V = Builder.CreateBitCast(V, Int8PtrTy);
103 V = Builder.CreateConstInBoundsGEP1_64(V, Offset.getQuantity());
104 }
105 V = Builder.CreateBitCast(V, ConvertType(Base)->getPointerTo());
106
107 return V;
108 }
109
110 static llvm::Value *
ApplyNonVirtualAndVirtualOffset(CodeGenFunction & CGF,llvm::Value * ptr,CharUnits nonVirtualOffset,llvm::Value * virtualOffset)111 ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ptr,
112 CharUnits nonVirtualOffset,
113 llvm::Value *virtualOffset) {
114 // Assert that we have something to do.
115 assert(!nonVirtualOffset.isZero() || virtualOffset != 0);
116
117 // Compute the offset from the static and dynamic components.
118 llvm::Value *baseOffset;
119 if (!nonVirtualOffset.isZero()) {
120 baseOffset = llvm::ConstantInt::get(CGF.PtrDiffTy,
121 nonVirtualOffset.getQuantity());
122 if (virtualOffset) {
123 baseOffset = CGF.Builder.CreateAdd(virtualOffset, baseOffset);
124 }
125 } else {
126 baseOffset = virtualOffset;
127 }
128
129 // Apply the base offset.
130 ptr = CGF.Builder.CreateBitCast(ptr, CGF.Int8PtrTy);
131 ptr = CGF.Builder.CreateInBoundsGEP(ptr, baseOffset, "add.ptr");
132 return ptr;
133 }
134
135 llvm::Value *
GetAddressOfBaseClass(llvm::Value * Value,const CXXRecordDecl * Derived,CastExpr::path_const_iterator PathBegin,CastExpr::path_const_iterator PathEnd,bool NullCheckValue)136 CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value,
137 const CXXRecordDecl *Derived,
138 CastExpr::path_const_iterator PathBegin,
139 CastExpr::path_const_iterator PathEnd,
140 bool NullCheckValue) {
141 assert(PathBegin != PathEnd && "Base path should not be empty!");
142
143 CastExpr::path_const_iterator Start = PathBegin;
144 const CXXRecordDecl *VBase = 0;
145
146 // Sema has done some convenient canonicalization here: if the
147 // access path involved any virtual steps, the conversion path will
148 // *start* with a step down to the correct virtual base subobject,
149 // and hence will not require any further steps.
150 if ((*Start)->isVirtual()) {
151 VBase =
152 cast<CXXRecordDecl>((*Start)->getType()->getAs<RecordType>()->getDecl());
153 ++Start;
154 }
155
156 // Compute the static offset of the ultimate destination within its
157 // allocating subobject (the virtual base, if there is one, or else
158 // the "complete" object that we see).
159 CharUnits NonVirtualOffset =
160 ComputeNonVirtualBaseClassOffset(getContext(), VBase ? VBase : Derived,
161 Start, PathEnd);
162
163 // If there's a virtual step, we can sometimes "devirtualize" it.
164 // For now, that's limited to when the derived type is final.
165 // TODO: "devirtualize" this for accesses to known-complete objects.
166 if (VBase && Derived->hasAttr<FinalAttr>()) {
167 const ASTRecordLayout &layout = getContext().getASTRecordLayout(Derived);
168 CharUnits vBaseOffset = layout.getVBaseClassOffset(VBase);
169 NonVirtualOffset += vBaseOffset;
170 VBase = 0; // we no longer have a virtual step
171 }
172
173 // Get the base pointer type.
174 llvm::Type *BasePtrTy =
175 ConvertType((PathEnd[-1])->getType())->getPointerTo();
176
177 // If the static offset is zero and we don't have a virtual step,
178 // just do a bitcast; null checks are unnecessary.
179 if (NonVirtualOffset.isZero() && !VBase) {
180 return Builder.CreateBitCast(Value, BasePtrTy);
181 }
182
183 llvm::BasicBlock *origBB = 0;
184 llvm::BasicBlock *endBB = 0;
185
186 // Skip over the offset (and the vtable load) if we're supposed to
187 // null-check the pointer.
188 if (NullCheckValue) {
189 origBB = Builder.GetInsertBlock();
190 llvm::BasicBlock *notNullBB = createBasicBlock("cast.notnull");
191 endBB = createBasicBlock("cast.end");
192
193 llvm::Value *isNull = Builder.CreateIsNull(Value);
194 Builder.CreateCondBr(isNull, endBB, notNullBB);
195 EmitBlock(notNullBB);
196 }
197
198 // Compute the virtual offset.
199 llvm::Value *VirtualOffset = 0;
200 if (VBase) {
201 VirtualOffset =
202 CGM.getCXXABI().GetVirtualBaseClassOffset(*this, Value, Derived, VBase);
203 }
204
205 // Apply both offsets.
206 Value = ApplyNonVirtualAndVirtualOffset(*this, Value,
207 NonVirtualOffset,
208 VirtualOffset);
209
210 // Cast to the destination type.
211 Value = Builder.CreateBitCast(Value, BasePtrTy);
212
213 // Build a phi if we needed a null check.
214 if (NullCheckValue) {
215 llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
216 Builder.CreateBr(endBB);
217 EmitBlock(endBB);
218
219 llvm::PHINode *PHI = Builder.CreatePHI(BasePtrTy, 2, "cast.result");
220 PHI->addIncoming(Value, notNullBB);
221 PHI->addIncoming(llvm::Constant::getNullValue(BasePtrTy), origBB);
222 Value = PHI;
223 }
224
225 return Value;
226 }
227
228 llvm::Value *
GetAddressOfDerivedClass(llvm::Value * Value,const CXXRecordDecl * Derived,CastExpr::path_const_iterator PathBegin,CastExpr::path_const_iterator PathEnd,bool NullCheckValue)229 CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value,
230 const CXXRecordDecl *Derived,
231 CastExpr::path_const_iterator PathBegin,
232 CastExpr::path_const_iterator PathEnd,
233 bool NullCheckValue) {
234 assert(PathBegin != PathEnd && "Base path should not be empty!");
235
236 QualType DerivedTy =
237 getContext().getCanonicalType(getContext().getTagDeclType(Derived));
238 llvm::Type *DerivedPtrTy = ConvertType(DerivedTy)->getPointerTo();
239
240 llvm::Value *NonVirtualOffset =
241 CGM.GetNonVirtualBaseClassOffset(Derived, PathBegin, PathEnd);
242
243 if (!NonVirtualOffset) {
244 // No offset, we can just cast back.
245 return Builder.CreateBitCast(Value, DerivedPtrTy);
246 }
247
248 llvm::BasicBlock *CastNull = 0;
249 llvm::BasicBlock *CastNotNull = 0;
250 llvm::BasicBlock *CastEnd = 0;
251
252 if (NullCheckValue) {
253 CastNull = createBasicBlock("cast.null");
254 CastNotNull = createBasicBlock("cast.notnull");
255 CastEnd = createBasicBlock("cast.end");
256
257 llvm::Value *IsNull = Builder.CreateIsNull(Value);
258 Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
259 EmitBlock(CastNotNull);
260 }
261
262 // Apply the offset.
263 Value = Builder.CreateBitCast(Value, Int8PtrTy);
264 Value = Builder.CreateGEP(Value, Builder.CreateNeg(NonVirtualOffset),
265 "sub.ptr");
266
267 // Just cast.
268 Value = Builder.CreateBitCast(Value, DerivedPtrTy);
269
270 if (NullCheckValue) {
271 Builder.CreateBr(CastEnd);
272 EmitBlock(CastNull);
273 Builder.CreateBr(CastEnd);
274 EmitBlock(CastEnd);
275
276 llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
277 PHI->addIncoming(Value, CastNotNull);
278 PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()),
279 CastNull);
280 Value = PHI;
281 }
282
283 return Value;
284 }
285
GetVTTParameter(GlobalDecl GD,bool ForVirtualBase,bool Delegating)286 llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD,
287 bool ForVirtualBase,
288 bool Delegating) {
289 if (!CGM.getCXXABI().NeedsVTTParameter(GD)) {
290 // This constructor/destructor does not need a VTT parameter.
291 return 0;
292 }
293
294 const CXXRecordDecl *RD = cast<CXXMethodDecl>(CurCodeDecl)->getParent();
295 const CXXRecordDecl *Base = cast<CXXMethodDecl>(GD.getDecl())->getParent();
296
297 llvm::Value *VTT;
298
299 uint64_t SubVTTIndex;
300
301 if (Delegating) {
302 // If this is a delegating constructor call, just load the VTT.
303 return LoadCXXVTT();
304 } else if (RD == Base) {
305 // If the record matches the base, this is the complete ctor/dtor
306 // variant calling the base variant in a class with virtual bases.
307 assert(!CGM.getCXXABI().NeedsVTTParameter(CurGD) &&
308 "doing no-op VTT offset in base dtor/ctor?");
309 assert(!ForVirtualBase && "Can't have same class as virtual base!");
310 SubVTTIndex = 0;
311 } else {
312 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
313 CharUnits BaseOffset = ForVirtualBase ?
314 Layout.getVBaseClassOffset(Base) :
315 Layout.getBaseClassOffset(Base);
316
317 SubVTTIndex =
318 CGM.getVTables().getSubVTTIndex(RD, BaseSubobject(Base, BaseOffset));
319 assert(SubVTTIndex != 0 && "Sub-VTT index must be greater than zero!");
320 }
321
322 if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) {
323 // A VTT parameter was passed to the constructor, use it.
324 VTT = LoadCXXVTT();
325 VTT = Builder.CreateConstInBoundsGEP1_64(VTT, SubVTTIndex);
326 } else {
327 // We're the complete constructor, so get the VTT by name.
328 VTT = CGM.getVTables().GetAddrOfVTT(RD);
329 VTT = Builder.CreateConstInBoundsGEP2_64(VTT, 0, SubVTTIndex);
330 }
331
332 return VTT;
333 }
334
335 namespace {
336 /// Call the destructor for a direct base class.
337 struct CallBaseDtor : EHScopeStack::Cleanup {
338 const CXXRecordDecl *BaseClass;
339 bool BaseIsVirtual;
CallBaseDtor__anonc19dd22a0111::CallBaseDtor340 CallBaseDtor(const CXXRecordDecl *Base, bool BaseIsVirtual)
341 : BaseClass(Base), BaseIsVirtual(BaseIsVirtual) {}
342
Emit__anonc19dd22a0111::CallBaseDtor343 void Emit(CodeGenFunction &CGF, Flags flags) {
344 const CXXRecordDecl *DerivedClass =
345 cast<CXXMethodDecl>(CGF.CurCodeDecl)->getParent();
346
347 const CXXDestructorDecl *D = BaseClass->getDestructor();
348 llvm::Value *Addr =
349 CGF.GetAddressOfDirectBaseInCompleteClass(CGF.LoadCXXThis(),
350 DerivedClass, BaseClass,
351 BaseIsVirtual);
352 CGF.EmitCXXDestructorCall(D, Dtor_Base, BaseIsVirtual,
353 /*Delegating=*/false, Addr);
354 }
355 };
356
357 /// A visitor which checks whether an initializer uses 'this' in a
358 /// way which requires the vtable to be properly set.
359 struct DynamicThisUseChecker : EvaluatedExprVisitor<DynamicThisUseChecker> {
360 typedef EvaluatedExprVisitor<DynamicThisUseChecker> super;
361
362 bool UsesThis;
363
DynamicThisUseChecker__anonc19dd22a0111::DynamicThisUseChecker364 DynamicThisUseChecker(ASTContext &C) : super(C), UsesThis(false) {}
365
366 // Black-list all explicit and implicit references to 'this'.
367 //
368 // Do we need to worry about external references to 'this' derived
369 // from arbitrary code? If so, then anything which runs arbitrary
370 // external code might potentially access the vtable.
VisitCXXThisExpr__anonc19dd22a0111::DynamicThisUseChecker371 void VisitCXXThisExpr(CXXThisExpr *E) { UsesThis = true; }
372 };
373 }
374
BaseInitializerUsesThis(ASTContext & C,const Expr * Init)375 static bool BaseInitializerUsesThis(ASTContext &C, const Expr *Init) {
376 DynamicThisUseChecker Checker(C);
377 Checker.Visit(const_cast<Expr*>(Init));
378 return Checker.UsesThis;
379 }
380
EmitBaseInitializer(CodeGenFunction & CGF,const CXXRecordDecl * ClassDecl,CXXCtorInitializer * BaseInit,CXXCtorType CtorType)381 static void EmitBaseInitializer(CodeGenFunction &CGF,
382 const CXXRecordDecl *ClassDecl,
383 CXXCtorInitializer *BaseInit,
384 CXXCtorType CtorType) {
385 assert(BaseInit->isBaseInitializer() &&
386 "Must have base initializer!");
387
388 llvm::Value *ThisPtr = CGF.LoadCXXThis();
389
390 const Type *BaseType = BaseInit->getBaseClass();
391 CXXRecordDecl *BaseClassDecl =
392 cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl());
393
394 bool isBaseVirtual = BaseInit->isBaseVirtual();
395
396 // The base constructor doesn't construct virtual bases.
397 if (CtorType == Ctor_Base && isBaseVirtual)
398 return;
399
400 // If the initializer for the base (other than the constructor
401 // itself) accesses 'this' in any way, we need to initialize the
402 // vtables.
403 if (BaseInitializerUsesThis(CGF.getContext(), BaseInit->getInit()))
404 CGF.InitializeVTablePointers(ClassDecl);
405
406 // We can pretend to be a complete class because it only matters for
407 // virtual bases, and we only do virtual bases for complete ctors.
408 llvm::Value *V =
409 CGF.GetAddressOfDirectBaseInCompleteClass(ThisPtr, ClassDecl,
410 BaseClassDecl,
411 isBaseVirtual);
412 CharUnits Alignment = CGF.getContext().getTypeAlignInChars(BaseType);
413 AggValueSlot AggSlot =
414 AggValueSlot::forAddr(V, Alignment, Qualifiers(),
415 AggValueSlot::IsDestructed,
416 AggValueSlot::DoesNotNeedGCBarriers,
417 AggValueSlot::IsNotAliased);
418
419 CGF.EmitAggExpr(BaseInit->getInit(), AggSlot);
420
421 if (CGF.CGM.getLangOpts().Exceptions &&
422 !BaseClassDecl->hasTrivialDestructor())
423 CGF.EHStack.pushCleanup<CallBaseDtor>(EHCleanup, BaseClassDecl,
424 isBaseVirtual);
425 }
426
EmitAggMemberInitializer(CodeGenFunction & CGF,LValue LHS,Expr * Init,llvm::Value * ArrayIndexVar,QualType T,ArrayRef<VarDecl * > ArrayIndexes,unsigned Index)427 static void EmitAggMemberInitializer(CodeGenFunction &CGF,
428 LValue LHS,
429 Expr *Init,
430 llvm::Value *ArrayIndexVar,
431 QualType T,
432 ArrayRef<VarDecl *> ArrayIndexes,
433 unsigned Index) {
434 if (Index == ArrayIndexes.size()) {
435 LValue LV = LHS;
436
437 if (ArrayIndexVar) {
438 // If we have an array index variable, load it and use it as an offset.
439 // Then, increment the value.
440 llvm::Value *Dest = LHS.getAddress();
441 llvm::Value *ArrayIndex = CGF.Builder.CreateLoad(ArrayIndexVar);
442 Dest = CGF.Builder.CreateInBoundsGEP(Dest, ArrayIndex, "destaddress");
443 llvm::Value *Next = llvm::ConstantInt::get(ArrayIndex->getType(), 1);
444 Next = CGF.Builder.CreateAdd(ArrayIndex, Next, "inc");
445 CGF.Builder.CreateStore(Next, ArrayIndexVar);
446
447 // Update the LValue.
448 LV.setAddress(Dest);
449 CharUnits Align = CGF.getContext().getTypeAlignInChars(T);
450 LV.setAlignment(std::min(Align, LV.getAlignment()));
451 }
452
453 switch (CGF.getEvaluationKind(T)) {
454 case TEK_Scalar:
455 CGF.EmitScalarInit(Init, /*decl*/ 0, LV, false);
456 break;
457 case TEK_Complex:
458 CGF.EmitComplexExprIntoLValue(Init, LV, /*isInit*/ true);
459 break;
460 case TEK_Aggregate: {
461 AggValueSlot Slot =
462 AggValueSlot::forLValue(LV,
463 AggValueSlot::IsDestructed,
464 AggValueSlot::DoesNotNeedGCBarriers,
465 AggValueSlot::IsNotAliased);
466
467 CGF.EmitAggExpr(Init, Slot);
468 break;
469 }
470 }
471
472 return;
473 }
474
475 const ConstantArrayType *Array = CGF.getContext().getAsConstantArrayType(T);
476 assert(Array && "Array initialization without the array type?");
477 llvm::Value *IndexVar
478 = CGF.GetAddrOfLocalVar(ArrayIndexes[Index]);
479 assert(IndexVar && "Array index variable not loaded");
480
481 // Initialize this index variable to zero.
482 llvm::Value* Zero
483 = llvm::Constant::getNullValue(
484 CGF.ConvertType(CGF.getContext().getSizeType()));
485 CGF.Builder.CreateStore(Zero, IndexVar);
486
487 // Start the loop with a block that tests the condition.
488 llvm::BasicBlock *CondBlock = CGF.createBasicBlock("for.cond");
489 llvm::BasicBlock *AfterFor = CGF.createBasicBlock("for.end");
490
491 CGF.EmitBlock(CondBlock);
492
493 llvm::BasicBlock *ForBody = CGF.createBasicBlock("for.body");
494 // Generate: if (loop-index < number-of-elements) fall to the loop body,
495 // otherwise, go to the block after the for-loop.
496 uint64_t NumElements = Array->getSize().getZExtValue();
497 llvm::Value *Counter = CGF.Builder.CreateLoad(IndexVar);
498 llvm::Value *NumElementsPtr =
499 llvm::ConstantInt::get(Counter->getType(), NumElements);
500 llvm::Value *IsLess = CGF.Builder.CreateICmpULT(Counter, NumElementsPtr,
501 "isless");
502
503 // If the condition is true, execute the body.
504 CGF.Builder.CreateCondBr(IsLess, ForBody, AfterFor);
505
506 CGF.EmitBlock(ForBody);
507 llvm::BasicBlock *ContinueBlock = CGF.createBasicBlock("for.inc");
508
509 // Inside the loop body recurse to emit the inner loop or, eventually, the
510 // constructor call.
511 EmitAggMemberInitializer(CGF, LHS, Init, ArrayIndexVar,
512 Array->getElementType(), ArrayIndexes, Index + 1);
513
514 CGF.EmitBlock(ContinueBlock);
515
516 // Emit the increment of the loop counter.
517 llvm::Value *NextVal = llvm::ConstantInt::get(Counter->getType(), 1);
518 Counter = CGF.Builder.CreateLoad(IndexVar);
519 NextVal = CGF.Builder.CreateAdd(Counter, NextVal, "inc");
520 CGF.Builder.CreateStore(NextVal, IndexVar);
521
522 // Finally, branch back up to the condition for the next iteration.
523 CGF.EmitBranch(CondBlock);
524
525 // Emit the fall-through block.
526 CGF.EmitBlock(AfterFor, true);
527 }
528
EmitMemberInitializer(CodeGenFunction & CGF,const CXXRecordDecl * ClassDecl,CXXCtorInitializer * MemberInit,const CXXConstructorDecl * Constructor,FunctionArgList & Args)529 static void EmitMemberInitializer(CodeGenFunction &CGF,
530 const CXXRecordDecl *ClassDecl,
531 CXXCtorInitializer *MemberInit,
532 const CXXConstructorDecl *Constructor,
533 FunctionArgList &Args) {
534 assert(MemberInit->isAnyMemberInitializer() &&
535 "Must have member initializer!");
536 assert(MemberInit->getInit() && "Must have initializer!");
537
538 // non-static data member initializers.
539 FieldDecl *Field = MemberInit->getAnyMember();
540 QualType FieldType = Field->getType();
541
542 llvm::Value *ThisPtr = CGF.LoadCXXThis();
543 QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
544 LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy);
545
546 if (MemberInit->isIndirectMemberInitializer()) {
547 // If we are initializing an anonymous union field, drill down to
548 // the field.
549 IndirectFieldDecl *IndirectField = MemberInit->getIndirectMember();
550 IndirectFieldDecl::chain_iterator I = IndirectField->chain_begin(),
551 IEnd = IndirectField->chain_end();
552 for ( ; I != IEnd; ++I)
553 LHS = CGF.EmitLValueForFieldInitialization(LHS, cast<FieldDecl>(*I));
554 FieldType = MemberInit->getIndirectMember()->getAnonField()->getType();
555 } else {
556 LHS = CGF.EmitLValueForFieldInitialization(LHS, Field);
557 }
558
559 // Special case: if we are in a copy or move constructor, and we are copying
560 // an array of PODs or classes with trivial copy constructors, ignore the
561 // AST and perform the copy we know is equivalent.
562 // FIXME: This is hacky at best... if we had a bit more explicit information
563 // in the AST, we could generalize it more easily.
564 const ConstantArrayType *Array
565 = CGF.getContext().getAsConstantArrayType(FieldType);
566 if (Array && Constructor->isDefaulted() &&
567 Constructor->isCopyOrMoveConstructor()) {
568 QualType BaseElementTy = CGF.getContext().getBaseElementType(Array);
569 CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit());
570 if (BaseElementTy.isPODType(CGF.getContext()) ||
571 (CE && CE->getConstructor()->isTrivial())) {
572 // Find the source pointer. We know it's the last argument because
573 // we know we're in an implicit copy constructor.
574 unsigned SrcArgIndex = Args.size() - 1;
575 llvm::Value *SrcPtr
576 = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(Args[SrcArgIndex]));
577 LValue ThisRHSLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy);
578 LValue Src = CGF.EmitLValueForFieldInitialization(ThisRHSLV, Field);
579
580 // Copy the aggregate.
581 CGF.EmitAggregateCopy(LHS.getAddress(), Src.getAddress(), FieldType,
582 LHS.isVolatileQualified());
583 return;
584 }
585 }
586
587 ArrayRef<VarDecl *> ArrayIndexes;
588 if (MemberInit->getNumArrayIndices())
589 ArrayIndexes = MemberInit->getArrayIndexes();
590 CGF.EmitInitializerForField(Field, LHS, MemberInit->getInit(), ArrayIndexes);
591 }
592
EmitInitializerForField(FieldDecl * Field,LValue LHS,Expr * Init,ArrayRef<VarDecl * > ArrayIndexes)593 void CodeGenFunction::EmitInitializerForField(FieldDecl *Field,
594 LValue LHS, Expr *Init,
595 ArrayRef<VarDecl *> ArrayIndexes) {
596 QualType FieldType = Field->getType();
597 switch (getEvaluationKind(FieldType)) {
598 case TEK_Scalar:
599 if (LHS.isSimple()) {
600 EmitExprAsInit(Init, Field, LHS, false);
601 } else {
602 RValue RHS = RValue::get(EmitScalarExpr(Init));
603 EmitStoreThroughLValue(RHS, LHS);
604 }
605 break;
606 case TEK_Complex:
607 EmitComplexExprIntoLValue(Init, LHS, /*isInit*/ true);
608 break;
609 case TEK_Aggregate: {
610 llvm::Value *ArrayIndexVar = 0;
611 if (ArrayIndexes.size()) {
612 llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
613
614 // The LHS is a pointer to the first object we'll be constructing, as
615 // a flat array.
616 QualType BaseElementTy = getContext().getBaseElementType(FieldType);
617 llvm::Type *BasePtr = ConvertType(BaseElementTy);
618 BasePtr = llvm::PointerType::getUnqual(BasePtr);
619 llvm::Value *BaseAddrPtr = Builder.CreateBitCast(LHS.getAddress(),
620 BasePtr);
621 LHS = MakeAddrLValue(BaseAddrPtr, BaseElementTy);
622
623 // Create an array index that will be used to walk over all of the
624 // objects we're constructing.
625 ArrayIndexVar = CreateTempAlloca(SizeTy, "object.index");
626 llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
627 Builder.CreateStore(Zero, ArrayIndexVar);
628
629
630 // Emit the block variables for the array indices, if any.
631 for (unsigned I = 0, N = ArrayIndexes.size(); I != N; ++I)
632 EmitAutoVarDecl(*ArrayIndexes[I]);
633 }
634
635 EmitAggMemberInitializer(*this, LHS, Init, ArrayIndexVar, FieldType,
636 ArrayIndexes, 0);
637 }
638 }
639
640 // Ensure that we destroy this object if an exception is thrown
641 // later in the constructor.
642 QualType::DestructionKind dtorKind = FieldType.isDestructedType();
643 if (needsEHCleanup(dtorKind))
644 pushEHDestroy(dtorKind, LHS.getAddress(), FieldType);
645 }
646
647 /// Checks whether the given constructor is a valid subject for the
648 /// complete-to-base constructor delegation optimization, i.e.
649 /// emitting the complete constructor as a simple call to the base
650 /// constructor.
IsConstructorDelegationValid(const CXXConstructorDecl * Ctor)651 static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor) {
652
653 // Currently we disable the optimization for classes with virtual
654 // bases because (1) the addresses of parameter variables need to be
655 // consistent across all initializers but (2) the delegate function
656 // call necessarily creates a second copy of the parameter variable.
657 //
658 // The limiting example (purely theoretical AFAIK):
659 // struct A { A(int &c) { c++; } };
660 // struct B : virtual A {
661 // B(int count) : A(count) { printf("%d\n", count); }
662 // };
663 // ...although even this example could in principle be emitted as a
664 // delegation since the address of the parameter doesn't escape.
665 if (Ctor->getParent()->getNumVBases()) {
666 // TODO: white-list trivial vbase initializers. This case wouldn't
667 // be subject to the restrictions below.
668
669 // TODO: white-list cases where:
670 // - there are no non-reference parameters to the constructor
671 // - the initializers don't access any non-reference parameters
672 // - the initializers don't take the address of non-reference
673 // parameters
674 // - etc.
675 // If we ever add any of the above cases, remember that:
676 // - function-try-blocks will always blacklist this optimization
677 // - we need to perform the constructor prologue and cleanup in
678 // EmitConstructorBody.
679
680 return false;
681 }
682
683 // We also disable the optimization for variadic functions because
684 // it's impossible to "re-pass" varargs.
685 if (Ctor->getType()->getAs<FunctionProtoType>()->isVariadic())
686 return false;
687
688 // FIXME: Decide if we can do a delegation of a delegating constructor.
689 if (Ctor->isDelegatingConstructor())
690 return false;
691
692 return true;
693 }
694
695 /// EmitConstructorBody - Emits the body of the current constructor.
EmitConstructorBody(FunctionArgList & Args)696 void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
697 const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(CurGD.getDecl());
698 CXXCtorType CtorType = CurGD.getCtorType();
699
700 // Before we go any further, try the complete->base constructor
701 // delegation optimization.
702 if (CtorType == Ctor_Complete && IsConstructorDelegationValid(Ctor) &&
703 CGM.getTarget().getCXXABI().hasConstructorVariants()) {
704 if (CGDebugInfo *DI = getDebugInfo())
705 DI->EmitLocation(Builder, Ctor->getLocEnd());
706 EmitDelegateCXXConstructorCall(Ctor, Ctor_Base, Args);
707 return;
708 }
709
710 Stmt *Body = Ctor->getBody();
711
712 // Enter the function-try-block before the constructor prologue if
713 // applicable.
714 bool IsTryBody = (Body && isa<CXXTryStmt>(Body));
715 if (IsTryBody)
716 EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true);
717
718 RunCleanupsScope RunCleanups(*this);
719
720 // TODO: in restricted cases, we can emit the vbase initializers of
721 // a complete ctor and then delegate to the base ctor.
722
723 // Emit the constructor prologue, i.e. the base and member
724 // initializers.
725 EmitCtorPrologue(Ctor, CtorType, Args);
726
727 // Emit the body of the statement.
728 if (IsTryBody)
729 EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock());
730 else if (Body)
731 EmitStmt(Body);
732
733 // Emit any cleanup blocks associated with the member or base
734 // initializers, which includes (along the exceptional path) the
735 // destructors for those members and bases that were fully
736 // constructed.
737 RunCleanups.ForceCleanup();
738
739 if (IsTryBody)
740 ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true);
741 }
742
743 namespace {
744 class FieldMemcpyizer {
745 public:
FieldMemcpyizer(CodeGenFunction & CGF,const CXXRecordDecl * ClassDecl,const VarDecl * SrcRec)746 FieldMemcpyizer(CodeGenFunction &CGF, const CXXRecordDecl *ClassDecl,
747 const VarDecl *SrcRec)
748 : CGF(CGF), ClassDecl(ClassDecl), SrcRec(SrcRec),
749 RecLayout(CGF.getContext().getASTRecordLayout(ClassDecl)),
750 FirstField(0), LastField(0), FirstFieldOffset(0), LastFieldOffset(0),
751 LastAddedFieldIndex(0) { }
752
isMemcpyableField(FieldDecl * F)753 static bool isMemcpyableField(FieldDecl *F) {
754 Qualifiers Qual = F->getType().getQualifiers();
755 if (Qual.hasVolatile() || Qual.hasObjCLifetime())
756 return false;
757 return true;
758 }
759
addMemcpyableField(FieldDecl * F)760 void addMemcpyableField(FieldDecl *F) {
761 if (FirstField == 0)
762 addInitialField(F);
763 else
764 addNextField(F);
765 }
766
getMemcpySize() const767 CharUnits getMemcpySize() const {
768 unsigned LastFieldSize =
769 LastField->isBitField() ?
770 LastField->getBitWidthValue(CGF.getContext()) :
771 CGF.getContext().getTypeSize(LastField->getType());
772 uint64_t MemcpySizeBits =
773 LastFieldOffset + LastFieldSize - FirstFieldOffset +
774 CGF.getContext().getCharWidth() - 1;
775 CharUnits MemcpySize =
776 CGF.getContext().toCharUnitsFromBits(MemcpySizeBits);
777 return MemcpySize;
778 }
779
emitMemcpy()780 void emitMemcpy() {
781 // Give the subclass a chance to bail out if it feels the memcpy isn't
782 // worth it (e.g. Hasn't aggregated enough data).
783 if (FirstField == 0) {
784 return;
785 }
786
787 CharUnits Alignment;
788
789 if (FirstField->isBitField()) {
790 const CGRecordLayout &RL =
791 CGF.getTypes().getCGRecordLayout(FirstField->getParent());
792 const CGBitFieldInfo &BFInfo = RL.getBitFieldInfo(FirstField);
793 Alignment = CharUnits::fromQuantity(BFInfo.StorageAlignment);
794 } else {
795 Alignment = CGF.getContext().getDeclAlign(FirstField);
796 }
797
798 assert((CGF.getContext().toCharUnitsFromBits(FirstFieldOffset) %
799 Alignment) == 0 && "Bad field alignment.");
800
801 CharUnits MemcpySize = getMemcpySize();
802 QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
803 llvm::Value *ThisPtr = CGF.LoadCXXThis();
804 LValue DestLV = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy);
805 LValue Dest = CGF.EmitLValueForFieldInitialization(DestLV, FirstField);
806 llvm::Value *SrcPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(SrcRec));
807 LValue SrcLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy);
808 LValue Src = CGF.EmitLValueForFieldInitialization(SrcLV, FirstField);
809
810 emitMemcpyIR(Dest.isBitField() ? Dest.getBitFieldAddr() : Dest.getAddress(),
811 Src.isBitField() ? Src.getBitFieldAddr() : Src.getAddress(),
812 MemcpySize, Alignment);
813 reset();
814 }
815
reset()816 void reset() {
817 FirstField = 0;
818 }
819
820 protected:
821 CodeGenFunction &CGF;
822 const CXXRecordDecl *ClassDecl;
823
824 private:
825
emitMemcpyIR(llvm::Value * DestPtr,llvm::Value * SrcPtr,CharUnits Size,CharUnits Alignment)826 void emitMemcpyIR(llvm::Value *DestPtr, llvm::Value *SrcPtr,
827 CharUnits Size, CharUnits Alignment) {
828 llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType());
829 llvm::Type *DBP =
830 llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), DPT->getAddressSpace());
831 DestPtr = CGF.Builder.CreateBitCast(DestPtr, DBP);
832
833 llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
834 llvm::Type *SBP =
835 llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), SPT->getAddressSpace());
836 SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, SBP);
837
838 CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, Size.getQuantity(),
839 Alignment.getQuantity());
840 }
841
addInitialField(FieldDecl * F)842 void addInitialField(FieldDecl *F) {
843 FirstField = F;
844 LastField = F;
845 FirstFieldOffset = RecLayout.getFieldOffset(F->getFieldIndex());
846 LastFieldOffset = FirstFieldOffset;
847 LastAddedFieldIndex = F->getFieldIndex();
848 return;
849 }
850
addNextField(FieldDecl * F)851 void addNextField(FieldDecl *F) {
852 // For the most part, the following invariant will hold:
853 // F->getFieldIndex() == LastAddedFieldIndex + 1
854 // The one exception is that Sema won't add a copy-initializer for an
855 // unnamed bitfield, which will show up here as a gap in the sequence.
856 assert(F->getFieldIndex() >= LastAddedFieldIndex + 1 &&
857 "Cannot aggregate fields out of order.");
858 LastAddedFieldIndex = F->getFieldIndex();
859
860 // The 'first' and 'last' fields are chosen by offset, rather than field
861 // index. This allows the code to support bitfields, as well as regular
862 // fields.
863 uint64_t FOffset = RecLayout.getFieldOffset(F->getFieldIndex());
864 if (FOffset < FirstFieldOffset) {
865 FirstField = F;
866 FirstFieldOffset = FOffset;
867 } else if (FOffset > LastFieldOffset) {
868 LastField = F;
869 LastFieldOffset = FOffset;
870 }
871 }
872
873 const VarDecl *SrcRec;
874 const ASTRecordLayout &RecLayout;
875 FieldDecl *FirstField;
876 FieldDecl *LastField;
877 uint64_t FirstFieldOffset, LastFieldOffset;
878 unsigned LastAddedFieldIndex;
879 };
880
881 class ConstructorMemcpyizer : public FieldMemcpyizer {
882 private:
883
884 /// Get source argument for copy constructor. Returns null if not a copy
885 /// constructor.
getTrivialCopySource(const CXXConstructorDecl * CD,FunctionArgList & Args)886 static const VarDecl* getTrivialCopySource(const CXXConstructorDecl *CD,
887 FunctionArgList &Args) {
888 if (CD->isCopyOrMoveConstructor() && CD->isDefaulted())
889 return Args[Args.size() - 1];
890 return 0;
891 }
892
893 // Returns true if a CXXCtorInitializer represents a member initialization
894 // that can be rolled into a memcpy.
isMemberInitMemcpyable(CXXCtorInitializer * MemberInit) const895 bool isMemberInitMemcpyable(CXXCtorInitializer *MemberInit) const {
896 if (!MemcpyableCtor)
897 return false;
898 FieldDecl *Field = MemberInit->getMember();
899 assert(Field != 0 && "No field for member init.");
900 QualType FieldType = Field->getType();
901 CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit());
902
903 // Bail out on non-POD, not-trivially-constructable members.
904 if (!(CE && CE->getConstructor()->isTrivial()) &&
905 !(FieldType.isTriviallyCopyableType(CGF.getContext()) ||
906 FieldType->isReferenceType()))
907 return false;
908
909 // Bail out on volatile fields.
910 if (!isMemcpyableField(Field))
911 return false;
912
913 // Otherwise we're good.
914 return true;
915 }
916
917 public:
ConstructorMemcpyizer(CodeGenFunction & CGF,const CXXConstructorDecl * CD,FunctionArgList & Args)918 ConstructorMemcpyizer(CodeGenFunction &CGF, const CXXConstructorDecl *CD,
919 FunctionArgList &Args)
920 : FieldMemcpyizer(CGF, CD->getParent(), getTrivialCopySource(CD, Args)),
921 ConstructorDecl(CD),
922 MemcpyableCtor(CD->isDefaulted() &&
923 CD->isCopyOrMoveConstructor() &&
924 CGF.getLangOpts().getGC() == LangOptions::NonGC),
925 Args(Args) { }
926
addMemberInitializer(CXXCtorInitializer * MemberInit)927 void addMemberInitializer(CXXCtorInitializer *MemberInit) {
928 if (isMemberInitMemcpyable(MemberInit)) {
929 AggregatedInits.push_back(MemberInit);
930 addMemcpyableField(MemberInit->getMember());
931 } else {
932 emitAggregatedInits();
933 EmitMemberInitializer(CGF, ConstructorDecl->getParent(), MemberInit,
934 ConstructorDecl, Args);
935 }
936 }
937
emitAggregatedInits()938 void emitAggregatedInits() {
939 if (AggregatedInits.size() <= 1) {
940 // This memcpy is too small to be worthwhile. Fall back on default
941 // codegen.
942 for (unsigned i = 0; i < AggregatedInits.size(); ++i) {
943 EmitMemberInitializer(CGF, ConstructorDecl->getParent(),
944 AggregatedInits[i], ConstructorDecl, Args);
945 }
946 reset();
947 return;
948 }
949
950 pushEHDestructors();
951 emitMemcpy();
952 AggregatedInits.clear();
953 }
954
pushEHDestructors()955 void pushEHDestructors() {
956 llvm::Value *ThisPtr = CGF.LoadCXXThis();
957 QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
958 LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy);
959
960 for (unsigned i = 0; i < AggregatedInits.size(); ++i) {
961 QualType FieldType = AggregatedInits[i]->getMember()->getType();
962 QualType::DestructionKind dtorKind = FieldType.isDestructedType();
963 if (CGF.needsEHCleanup(dtorKind))
964 CGF.pushEHDestroy(dtorKind, LHS.getAddress(), FieldType);
965 }
966 }
967
finish()968 void finish() {
969 emitAggregatedInits();
970 }
971
972 private:
973 const CXXConstructorDecl *ConstructorDecl;
974 bool MemcpyableCtor;
975 FunctionArgList &Args;
976 SmallVector<CXXCtorInitializer*, 16> AggregatedInits;
977 };
978
979 class AssignmentMemcpyizer : public FieldMemcpyizer {
980 private:
981
982 // Returns the memcpyable field copied by the given statement, if one
983 // exists. Otherwise r
getMemcpyableField(Stmt * S)984 FieldDecl* getMemcpyableField(Stmt *S) {
985 if (!AssignmentsMemcpyable)
986 return 0;
987 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(S)) {
988 // Recognise trivial assignments.
989 if (BO->getOpcode() != BO_Assign)
990 return 0;
991 MemberExpr *ME = dyn_cast<MemberExpr>(BO->getLHS());
992 if (!ME)
993 return 0;
994 FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl());
995 if (!Field || !isMemcpyableField(Field))
996 return 0;
997 Stmt *RHS = BO->getRHS();
998 if (ImplicitCastExpr *EC = dyn_cast<ImplicitCastExpr>(RHS))
999 RHS = EC->getSubExpr();
1000 if (!RHS)
1001 return 0;
1002 MemberExpr *ME2 = dyn_cast<MemberExpr>(RHS);
1003 if (dyn_cast<FieldDecl>(ME2->getMemberDecl()) != Field)
1004 return 0;
1005 return Field;
1006 } else if (CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(S)) {
1007 CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MCE->getCalleeDecl());
1008 if (!(MD && (MD->isCopyAssignmentOperator() ||
1009 MD->isMoveAssignmentOperator()) &&
1010 MD->isTrivial()))
1011 return 0;
1012 MemberExpr *IOA = dyn_cast<MemberExpr>(MCE->getImplicitObjectArgument());
1013 if (!IOA)
1014 return 0;
1015 FieldDecl *Field = dyn_cast<FieldDecl>(IOA->getMemberDecl());
1016 if (!Field || !isMemcpyableField(Field))
1017 return 0;
1018 MemberExpr *Arg0 = dyn_cast<MemberExpr>(MCE->getArg(0));
1019 if (!Arg0 || Field != dyn_cast<FieldDecl>(Arg0->getMemberDecl()))
1020 return 0;
1021 return Field;
1022 } else if (CallExpr *CE = dyn_cast<CallExpr>(S)) {
1023 FunctionDecl *FD = dyn_cast<FunctionDecl>(CE->getCalleeDecl());
1024 if (!FD || FD->getBuiltinID() != Builtin::BI__builtin_memcpy)
1025 return 0;
1026 Expr *DstPtr = CE->getArg(0);
1027 if (ImplicitCastExpr *DC = dyn_cast<ImplicitCastExpr>(DstPtr))
1028 DstPtr = DC->getSubExpr();
1029 UnaryOperator *DUO = dyn_cast<UnaryOperator>(DstPtr);
1030 if (!DUO || DUO->getOpcode() != UO_AddrOf)
1031 return 0;
1032 MemberExpr *ME = dyn_cast<MemberExpr>(DUO->getSubExpr());
1033 if (!ME)
1034 return 0;
1035 FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl());
1036 if (!Field || !isMemcpyableField(Field))
1037 return 0;
1038 Expr *SrcPtr = CE->getArg(1);
1039 if (ImplicitCastExpr *SC = dyn_cast<ImplicitCastExpr>(SrcPtr))
1040 SrcPtr = SC->getSubExpr();
1041 UnaryOperator *SUO = dyn_cast<UnaryOperator>(SrcPtr);
1042 if (!SUO || SUO->getOpcode() != UO_AddrOf)
1043 return 0;
1044 MemberExpr *ME2 = dyn_cast<MemberExpr>(SUO->getSubExpr());
1045 if (!ME2 || Field != dyn_cast<FieldDecl>(ME2->getMemberDecl()))
1046 return 0;
1047 return Field;
1048 }
1049
1050 return 0;
1051 }
1052
1053 bool AssignmentsMemcpyable;
1054 SmallVector<Stmt*, 16> AggregatedStmts;
1055
1056 public:
1057
AssignmentMemcpyizer(CodeGenFunction & CGF,const CXXMethodDecl * AD,FunctionArgList & Args)1058 AssignmentMemcpyizer(CodeGenFunction &CGF, const CXXMethodDecl *AD,
1059 FunctionArgList &Args)
1060 : FieldMemcpyizer(CGF, AD->getParent(), Args[Args.size() - 1]),
1061 AssignmentsMemcpyable(CGF.getLangOpts().getGC() == LangOptions::NonGC) {
1062 assert(Args.size() == 2);
1063 }
1064
emitAssignment(Stmt * S)1065 void emitAssignment(Stmt *S) {
1066 FieldDecl *F = getMemcpyableField(S);
1067 if (F) {
1068 addMemcpyableField(F);
1069 AggregatedStmts.push_back(S);
1070 } else {
1071 emitAggregatedStmts();
1072 CGF.EmitStmt(S);
1073 }
1074 }
1075
emitAggregatedStmts()1076 void emitAggregatedStmts() {
1077 if (AggregatedStmts.size() <= 1) {
1078 for (unsigned i = 0; i < AggregatedStmts.size(); ++i)
1079 CGF.EmitStmt(AggregatedStmts[i]);
1080 reset();
1081 }
1082
1083 emitMemcpy();
1084 AggregatedStmts.clear();
1085 }
1086
finish()1087 void finish() {
1088 emitAggregatedStmts();
1089 }
1090 };
1091
1092 }
1093
1094 /// EmitCtorPrologue - This routine generates necessary code to initialize
1095 /// base classes and non-static data members belonging to this constructor.
EmitCtorPrologue(const CXXConstructorDecl * CD,CXXCtorType CtorType,FunctionArgList & Args)1096 void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
1097 CXXCtorType CtorType,
1098 FunctionArgList &Args) {
1099 if (CD->isDelegatingConstructor())
1100 return EmitDelegatingCXXConstructorCall(CD, Args);
1101
1102 const CXXRecordDecl *ClassDecl = CD->getParent();
1103
1104 CXXConstructorDecl::init_const_iterator B = CD->init_begin(),
1105 E = CD->init_end();
1106
1107 llvm::BasicBlock *BaseCtorContinueBB = 0;
1108 if (ClassDecl->getNumVBases() &&
1109 !CGM.getTarget().getCXXABI().hasConstructorVariants()) {
1110 // The ABIs that don't have constructor variants need to put a branch
1111 // before the virtual base initialization code.
1112 BaseCtorContinueBB =
1113 CGM.getCXXABI().EmitCtorCompleteObjectHandler(*this, ClassDecl);
1114 assert(BaseCtorContinueBB);
1115 }
1116
1117 // Virtual base initializers first.
1118 for (; B != E && (*B)->isBaseInitializer() && (*B)->isBaseVirtual(); B++) {
1119 EmitBaseInitializer(*this, ClassDecl, *B, CtorType);
1120 }
1121
1122 if (BaseCtorContinueBB) {
1123 // Complete object handler should continue to the remaining initializers.
1124 Builder.CreateBr(BaseCtorContinueBB);
1125 EmitBlock(BaseCtorContinueBB);
1126 }
1127
1128 // Then, non-virtual base initializers.
1129 for (; B != E && (*B)->isBaseInitializer(); B++) {
1130 assert(!(*B)->isBaseVirtual());
1131 EmitBaseInitializer(*this, ClassDecl, *B, CtorType);
1132 }
1133
1134 InitializeVTablePointers(ClassDecl);
1135
1136 // And finally, initialize class members.
1137 FieldConstructionScope FCS(*this, CXXThisValue);
1138 ConstructorMemcpyizer CM(*this, CD, Args);
1139 for (; B != E; B++) {
1140 CXXCtorInitializer *Member = (*B);
1141 assert(!Member->isBaseInitializer());
1142 assert(Member->isAnyMemberInitializer() &&
1143 "Delegating initializer on non-delegating constructor");
1144 CM.addMemberInitializer(Member);
1145 }
1146 CM.finish();
1147 }
1148
1149 static bool
1150 FieldHasTrivialDestructorBody(ASTContext &Context, const FieldDecl *Field);
1151
1152 static bool
HasTrivialDestructorBody(ASTContext & Context,const CXXRecordDecl * BaseClassDecl,const CXXRecordDecl * MostDerivedClassDecl)1153 HasTrivialDestructorBody(ASTContext &Context,
1154 const CXXRecordDecl *BaseClassDecl,
1155 const CXXRecordDecl *MostDerivedClassDecl)
1156 {
1157 // If the destructor is trivial we don't have to check anything else.
1158 if (BaseClassDecl->hasTrivialDestructor())
1159 return true;
1160
1161 if (!BaseClassDecl->getDestructor()->hasTrivialBody())
1162 return false;
1163
1164 // Check fields.
1165 for (CXXRecordDecl::field_iterator I = BaseClassDecl->field_begin(),
1166 E = BaseClassDecl->field_end(); I != E; ++I) {
1167 const FieldDecl *Field = *I;
1168
1169 if (!FieldHasTrivialDestructorBody(Context, Field))
1170 return false;
1171 }
1172
1173 // Check non-virtual bases.
1174 for (CXXRecordDecl::base_class_const_iterator I =
1175 BaseClassDecl->bases_begin(), E = BaseClassDecl->bases_end();
1176 I != E; ++I) {
1177 if (I->isVirtual())
1178 continue;
1179
1180 const CXXRecordDecl *NonVirtualBase =
1181 cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
1182 if (!HasTrivialDestructorBody(Context, NonVirtualBase,
1183 MostDerivedClassDecl))
1184 return false;
1185 }
1186
1187 if (BaseClassDecl == MostDerivedClassDecl) {
1188 // Check virtual bases.
1189 for (CXXRecordDecl::base_class_const_iterator I =
1190 BaseClassDecl->vbases_begin(), E = BaseClassDecl->vbases_end();
1191 I != E; ++I) {
1192 const CXXRecordDecl *VirtualBase =
1193 cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
1194 if (!HasTrivialDestructorBody(Context, VirtualBase,
1195 MostDerivedClassDecl))
1196 return false;
1197 }
1198 }
1199
1200 return true;
1201 }
1202
1203 static bool
FieldHasTrivialDestructorBody(ASTContext & Context,const FieldDecl * Field)1204 FieldHasTrivialDestructorBody(ASTContext &Context,
1205 const FieldDecl *Field)
1206 {
1207 QualType FieldBaseElementType = Context.getBaseElementType(Field->getType());
1208
1209 const RecordType *RT = FieldBaseElementType->getAs<RecordType>();
1210 if (!RT)
1211 return true;
1212
1213 CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl());
1214 return HasTrivialDestructorBody(Context, FieldClassDecl, FieldClassDecl);
1215 }
1216
1217 /// CanSkipVTablePointerInitialization - Check whether we need to initialize
1218 /// any vtable pointers before calling this destructor.
CanSkipVTablePointerInitialization(ASTContext & Context,const CXXDestructorDecl * Dtor)1219 static bool CanSkipVTablePointerInitialization(ASTContext &Context,
1220 const CXXDestructorDecl *Dtor) {
1221 if (!Dtor->hasTrivialBody())
1222 return false;
1223
1224 // Check the fields.
1225 const CXXRecordDecl *ClassDecl = Dtor->getParent();
1226 for (CXXRecordDecl::field_iterator I = ClassDecl->field_begin(),
1227 E = ClassDecl->field_end(); I != E; ++I) {
1228 const FieldDecl *Field = *I;
1229
1230 if (!FieldHasTrivialDestructorBody(Context, Field))
1231 return false;
1232 }
1233
1234 return true;
1235 }
1236
1237 /// EmitDestructorBody - Emits the body of the current destructor.
EmitDestructorBody(FunctionArgList & Args)1238 void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
1239 const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CurGD.getDecl());
1240 CXXDtorType DtorType = CurGD.getDtorType();
1241
1242 // The call to operator delete in a deleting destructor happens
1243 // outside of the function-try-block, which means it's always
1244 // possible to delegate the destructor body to the complete
1245 // destructor. Do so.
1246 if (DtorType == Dtor_Deleting) {
1247 EnterDtorCleanups(Dtor, Dtor_Deleting);
1248 EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false,
1249 /*Delegating=*/false, LoadCXXThis());
1250 PopCleanupBlock();
1251 return;
1252 }
1253
1254 Stmt *Body = Dtor->getBody();
1255
1256 // If the body is a function-try-block, enter the try before
1257 // anything else.
1258 bool isTryBody = (Body && isa<CXXTryStmt>(Body));
1259 if (isTryBody)
1260 EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true);
1261
1262 // Enter the epilogue cleanups.
1263 RunCleanupsScope DtorEpilogue(*this);
1264
1265 // If this is the complete variant, just invoke the base variant;
1266 // the epilogue will destruct the virtual bases. But we can't do
1267 // this optimization if the body is a function-try-block, because
1268 // we'd introduce *two* handler blocks. In the Microsoft ABI, we
1269 // always delegate because we might not have a definition in this TU.
1270 switch (DtorType) {
1271 case Dtor_Deleting: llvm_unreachable("already handled deleting case");
1272
1273 case Dtor_Complete:
1274 assert((Body || getTarget().getCXXABI().isMicrosoft()) &&
1275 "can't emit a dtor without a body for non-Microsoft ABIs");
1276
1277 // Enter the cleanup scopes for virtual bases.
1278 EnterDtorCleanups(Dtor, Dtor_Complete);
1279
1280 if (!isTryBody) {
1281 EmitCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false,
1282 /*Delegating=*/false, LoadCXXThis());
1283 break;
1284 }
1285 // Fallthrough: act like we're in the base variant.
1286
1287 case Dtor_Base:
1288 assert(Body);
1289
1290 // Enter the cleanup scopes for fields and non-virtual bases.
1291 EnterDtorCleanups(Dtor, Dtor_Base);
1292
1293 // Initialize the vtable pointers before entering the body.
1294 if (!CanSkipVTablePointerInitialization(getContext(), Dtor))
1295 InitializeVTablePointers(Dtor->getParent());
1296
1297 if (isTryBody)
1298 EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock());
1299 else if (Body)
1300 EmitStmt(Body);
1301 else {
1302 assert(Dtor->isImplicit() && "bodyless dtor not implicit");
1303 // nothing to do besides what's in the epilogue
1304 }
1305 // -fapple-kext must inline any call to this dtor into
1306 // the caller's body.
1307 if (getLangOpts().AppleKext)
1308 CurFn->addFnAttr(llvm::Attribute::AlwaysInline);
1309 break;
1310 }
1311
1312 // Jump out through the epilogue cleanups.
1313 DtorEpilogue.ForceCleanup();
1314
1315 // Exit the try if applicable.
1316 if (isTryBody)
1317 ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true);
1318 }
1319
emitImplicitAssignmentOperatorBody(FunctionArgList & Args)1320 void CodeGenFunction::emitImplicitAssignmentOperatorBody(FunctionArgList &Args) {
1321 const CXXMethodDecl *AssignOp = cast<CXXMethodDecl>(CurGD.getDecl());
1322 const Stmt *RootS = AssignOp->getBody();
1323 assert(isa<CompoundStmt>(RootS) &&
1324 "Body of an implicit assignment operator should be compound stmt.");
1325 const CompoundStmt *RootCS = cast<CompoundStmt>(RootS);
1326
1327 LexicalScope Scope(*this, RootCS->getSourceRange());
1328
1329 AssignmentMemcpyizer AM(*this, AssignOp, Args);
1330 for (CompoundStmt::const_body_iterator I = RootCS->body_begin(),
1331 E = RootCS->body_end();
1332 I != E; ++I) {
1333 AM.emitAssignment(*I);
1334 }
1335 AM.finish();
1336 }
1337
1338 namespace {
1339 /// Call the operator delete associated with the current destructor.
1340 struct CallDtorDelete : EHScopeStack::Cleanup {
CallDtorDelete__anonc19dd22a0311::CallDtorDelete1341 CallDtorDelete() {}
1342
Emit__anonc19dd22a0311::CallDtorDelete1343 void Emit(CodeGenFunction &CGF, Flags flags) {
1344 const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl);
1345 const CXXRecordDecl *ClassDecl = Dtor->getParent();
1346 CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(),
1347 CGF.getContext().getTagDeclType(ClassDecl));
1348 }
1349 };
1350
1351 struct CallDtorDeleteConditional : EHScopeStack::Cleanup {
1352 llvm::Value *ShouldDeleteCondition;
1353 public:
CallDtorDeleteConditional__anonc19dd22a0311::CallDtorDeleteConditional1354 CallDtorDeleteConditional(llvm::Value *ShouldDeleteCondition)
1355 : ShouldDeleteCondition(ShouldDeleteCondition) {
1356 assert(ShouldDeleteCondition != NULL);
1357 }
1358
Emit__anonc19dd22a0311::CallDtorDeleteConditional1359 void Emit(CodeGenFunction &CGF, Flags flags) {
1360 llvm::BasicBlock *callDeleteBB = CGF.createBasicBlock("dtor.call_delete");
1361 llvm::BasicBlock *continueBB = CGF.createBasicBlock("dtor.continue");
1362 llvm::Value *ShouldCallDelete
1363 = CGF.Builder.CreateIsNull(ShouldDeleteCondition);
1364 CGF.Builder.CreateCondBr(ShouldCallDelete, continueBB, callDeleteBB);
1365
1366 CGF.EmitBlock(callDeleteBB);
1367 const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl);
1368 const CXXRecordDecl *ClassDecl = Dtor->getParent();
1369 CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(),
1370 CGF.getContext().getTagDeclType(ClassDecl));
1371 CGF.Builder.CreateBr(continueBB);
1372
1373 CGF.EmitBlock(continueBB);
1374 }
1375 };
1376
1377 class DestroyField : public EHScopeStack::Cleanup {
1378 const FieldDecl *field;
1379 CodeGenFunction::Destroyer *destroyer;
1380 bool useEHCleanupForArray;
1381
1382 public:
DestroyField(const FieldDecl * field,CodeGenFunction::Destroyer * destroyer,bool useEHCleanupForArray)1383 DestroyField(const FieldDecl *field, CodeGenFunction::Destroyer *destroyer,
1384 bool useEHCleanupForArray)
1385 : field(field), destroyer(destroyer),
1386 useEHCleanupForArray(useEHCleanupForArray) {}
1387
Emit(CodeGenFunction & CGF,Flags flags)1388 void Emit(CodeGenFunction &CGF, Flags flags) {
1389 // Find the address of the field.
1390 llvm::Value *thisValue = CGF.LoadCXXThis();
1391 QualType RecordTy = CGF.getContext().getTagDeclType(field->getParent());
1392 LValue ThisLV = CGF.MakeAddrLValue(thisValue, RecordTy);
1393 LValue LV = CGF.EmitLValueForField(ThisLV, field);
1394 assert(LV.isSimple());
1395
1396 CGF.emitDestroy(LV.getAddress(), field->getType(), destroyer,
1397 flags.isForNormalCleanup() && useEHCleanupForArray);
1398 }
1399 };
1400 }
1401
1402 /// EmitDtorEpilogue - Emit all code that comes at the end of class's
1403 /// destructor. This is to call destructors on members and base classes
1404 /// in reverse order of their construction.
EnterDtorCleanups(const CXXDestructorDecl * DD,CXXDtorType DtorType)1405 void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
1406 CXXDtorType DtorType) {
1407 assert(!DD->isTrivial() &&
1408 "Should not emit dtor epilogue for trivial dtor!");
1409
1410 // The deleting-destructor phase just needs to call the appropriate
1411 // operator delete that Sema picked up.
1412 if (DtorType == Dtor_Deleting) {
1413 assert(DD->getOperatorDelete() &&
1414 "operator delete missing - EmitDtorEpilogue");
1415 if (CXXStructorImplicitParamValue) {
1416 // If there is an implicit param to the deleting dtor, it's a boolean
1417 // telling whether we should call delete at the end of the dtor.
1418 EHStack.pushCleanup<CallDtorDeleteConditional>(
1419 NormalAndEHCleanup, CXXStructorImplicitParamValue);
1420 } else {
1421 EHStack.pushCleanup<CallDtorDelete>(NormalAndEHCleanup);
1422 }
1423 return;
1424 }
1425
1426 const CXXRecordDecl *ClassDecl = DD->getParent();
1427
1428 // Unions have no bases and do not call field destructors.
1429 if (ClassDecl->isUnion())
1430 return;
1431
1432 // The complete-destructor phase just destructs all the virtual bases.
1433 if (DtorType == Dtor_Complete) {
1434
1435 // We push them in the forward order so that they'll be popped in
1436 // the reverse order.
1437 for (CXXRecordDecl::base_class_const_iterator I =
1438 ClassDecl->vbases_begin(), E = ClassDecl->vbases_end();
1439 I != E; ++I) {
1440 const CXXBaseSpecifier &Base = *I;
1441 CXXRecordDecl *BaseClassDecl
1442 = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
1443
1444 // Ignore trivial destructors.
1445 if (BaseClassDecl->hasTrivialDestructor())
1446 continue;
1447
1448 EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup,
1449 BaseClassDecl,
1450 /*BaseIsVirtual*/ true);
1451 }
1452
1453 return;
1454 }
1455
1456 assert(DtorType == Dtor_Base);
1457
1458 // Destroy non-virtual bases.
1459 for (CXXRecordDecl::base_class_const_iterator I =
1460 ClassDecl->bases_begin(), E = ClassDecl->bases_end(); I != E; ++I) {
1461 const CXXBaseSpecifier &Base = *I;
1462
1463 // Ignore virtual bases.
1464 if (Base.isVirtual())
1465 continue;
1466
1467 CXXRecordDecl *BaseClassDecl = Base.getType()->getAsCXXRecordDecl();
1468
1469 // Ignore trivial destructors.
1470 if (BaseClassDecl->hasTrivialDestructor())
1471 continue;
1472
1473 EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup,
1474 BaseClassDecl,
1475 /*BaseIsVirtual*/ false);
1476 }
1477
1478 // Destroy direct fields.
1479 SmallVector<const FieldDecl *, 16> FieldDecls;
1480 for (CXXRecordDecl::field_iterator I = ClassDecl->field_begin(),
1481 E = ClassDecl->field_end(); I != E; ++I) {
1482 const FieldDecl *field = *I;
1483 QualType type = field->getType();
1484 QualType::DestructionKind dtorKind = type.isDestructedType();
1485 if (!dtorKind) continue;
1486
1487 // Anonymous union members do not have their destructors called.
1488 const RecordType *RT = type->getAsUnionType();
1489 if (RT && RT->getDecl()->isAnonymousStructOrUnion()) continue;
1490
1491 CleanupKind cleanupKind = getCleanupKind(dtorKind);
1492 EHStack.pushCleanup<DestroyField>(cleanupKind, field,
1493 getDestroyer(dtorKind),
1494 cleanupKind & EHCleanup);
1495 }
1496 }
1497
1498 /// EmitCXXAggrConstructorCall - Emit a loop to call a particular
1499 /// constructor for each of several members of an array.
1500 ///
1501 /// \param ctor the constructor to call for each element
1502 /// \param arrayType the type of the array to initialize
1503 /// \param arrayBegin an arrayType*
1504 /// \param zeroInitialize true if each element should be
1505 /// zero-initialized before it is constructed
1506 void
EmitCXXAggrConstructorCall(const CXXConstructorDecl * ctor,const ConstantArrayType * arrayType,llvm::Value * arrayBegin,CallExpr::const_arg_iterator argBegin,CallExpr::const_arg_iterator argEnd,bool zeroInitialize)1507 CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
1508 const ConstantArrayType *arrayType,
1509 llvm::Value *arrayBegin,
1510 CallExpr::const_arg_iterator argBegin,
1511 CallExpr::const_arg_iterator argEnd,
1512 bool zeroInitialize) {
1513 QualType elementType;
1514 llvm::Value *numElements =
1515 emitArrayLength(arrayType, elementType, arrayBegin);
1516
1517 EmitCXXAggrConstructorCall(ctor, numElements, arrayBegin,
1518 argBegin, argEnd, zeroInitialize);
1519 }
1520
1521 /// EmitCXXAggrConstructorCall - Emit a loop to call a particular
1522 /// constructor for each of several members of an array.
1523 ///
1524 /// \param ctor the constructor to call for each element
1525 /// \param numElements the number of elements in the array;
1526 /// may be zero
1527 /// \param arrayBegin a T*, where T is the type constructed by ctor
1528 /// \param zeroInitialize true if each element should be
1529 /// zero-initialized before it is constructed
1530 void
EmitCXXAggrConstructorCall(const CXXConstructorDecl * ctor,llvm::Value * numElements,llvm::Value * arrayBegin,CallExpr::const_arg_iterator argBegin,CallExpr::const_arg_iterator argEnd,bool zeroInitialize)1531 CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
1532 llvm::Value *numElements,
1533 llvm::Value *arrayBegin,
1534 CallExpr::const_arg_iterator argBegin,
1535 CallExpr::const_arg_iterator argEnd,
1536 bool zeroInitialize) {
1537
1538 // It's legal for numElements to be zero. This can happen both
1539 // dynamically, because x can be zero in 'new A[x]', and statically,
1540 // because of GCC extensions that permit zero-length arrays. There
1541 // are probably legitimate places where we could assume that this
1542 // doesn't happen, but it's not clear that it's worth it.
1543 llvm::BranchInst *zeroCheckBranch = 0;
1544
1545 // Optimize for a constant count.
1546 llvm::ConstantInt *constantCount
1547 = dyn_cast<llvm::ConstantInt>(numElements);
1548 if (constantCount) {
1549 // Just skip out if the constant count is zero.
1550 if (constantCount->isZero()) return;
1551
1552 // Otherwise, emit the check.
1553 } else {
1554 llvm::BasicBlock *loopBB = createBasicBlock("new.ctorloop");
1555 llvm::Value *iszero = Builder.CreateIsNull(numElements, "isempty");
1556 zeroCheckBranch = Builder.CreateCondBr(iszero, loopBB, loopBB);
1557 EmitBlock(loopBB);
1558 }
1559
1560 // Find the end of the array.
1561 llvm::Value *arrayEnd = Builder.CreateInBoundsGEP(arrayBegin, numElements,
1562 "arrayctor.end");
1563
1564 // Enter the loop, setting up a phi for the current location to initialize.
1565 llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
1566 llvm::BasicBlock *loopBB = createBasicBlock("arrayctor.loop");
1567 EmitBlock(loopBB);
1568 llvm::PHINode *cur = Builder.CreatePHI(arrayBegin->getType(), 2,
1569 "arrayctor.cur");
1570 cur->addIncoming(arrayBegin, entryBB);
1571
1572 // Inside the loop body, emit the constructor call on the array element.
1573
1574 QualType type = getContext().getTypeDeclType(ctor->getParent());
1575
1576 // Zero initialize the storage, if requested.
1577 if (zeroInitialize)
1578 EmitNullInitialization(cur, type);
1579
1580 // C++ [class.temporary]p4:
1581 // There are two contexts in which temporaries are destroyed at a different
1582 // point than the end of the full-expression. The first context is when a
1583 // default constructor is called to initialize an element of an array.
1584 // If the constructor has one or more default arguments, the destruction of
1585 // every temporary created in a default argument expression is sequenced
1586 // before the construction of the next array element, if any.
1587
1588 {
1589 RunCleanupsScope Scope(*this);
1590
1591 // Evaluate the constructor and its arguments in a regular
1592 // partial-destroy cleanup.
1593 if (getLangOpts().Exceptions &&
1594 !ctor->getParent()->hasTrivialDestructor()) {
1595 Destroyer *destroyer = destroyCXXObject;
1596 pushRegularPartialArrayCleanup(arrayBegin, cur, type, *destroyer);
1597 }
1598
1599 EmitCXXConstructorCall(ctor, Ctor_Complete, /*ForVirtualBase=*/ false,
1600 /*Delegating=*/false, cur, argBegin, argEnd);
1601 }
1602
1603 // Go to the next element.
1604 llvm::Value *next =
1605 Builder.CreateInBoundsGEP(cur, llvm::ConstantInt::get(SizeTy, 1),
1606 "arrayctor.next");
1607 cur->addIncoming(next, Builder.GetInsertBlock());
1608
1609 // Check whether that's the end of the loop.
1610 llvm::Value *done = Builder.CreateICmpEQ(next, arrayEnd, "arrayctor.done");
1611 llvm::BasicBlock *contBB = createBasicBlock("arrayctor.cont");
1612 Builder.CreateCondBr(done, contBB, loopBB);
1613
1614 // Patch the earlier check to skip over the loop.
1615 if (zeroCheckBranch) zeroCheckBranch->setSuccessor(0, contBB);
1616
1617 EmitBlock(contBB);
1618 }
1619
destroyCXXObject(CodeGenFunction & CGF,llvm::Value * addr,QualType type)1620 void CodeGenFunction::destroyCXXObject(CodeGenFunction &CGF,
1621 llvm::Value *addr,
1622 QualType type) {
1623 const RecordType *rtype = type->castAs<RecordType>();
1624 const CXXRecordDecl *record = cast<CXXRecordDecl>(rtype->getDecl());
1625 const CXXDestructorDecl *dtor = record->getDestructor();
1626 assert(!dtor->isTrivial());
1627 CGF.EmitCXXDestructorCall(dtor, Dtor_Complete, /*for vbase*/ false,
1628 /*Delegating=*/false, addr);
1629 }
1630
1631 void
EmitCXXConstructorCall(const CXXConstructorDecl * D,CXXCtorType Type,bool ForVirtualBase,bool Delegating,llvm::Value * This,CallExpr::const_arg_iterator ArgBeg,CallExpr::const_arg_iterator ArgEnd)1632 CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
1633 CXXCtorType Type, bool ForVirtualBase,
1634 bool Delegating,
1635 llvm::Value *This,
1636 CallExpr::const_arg_iterator ArgBeg,
1637 CallExpr::const_arg_iterator ArgEnd) {
1638 // If this is a trivial constructor, just emit what's needed.
1639 if (D->isTrivial()) {
1640 if (ArgBeg == ArgEnd) {
1641 // Trivial default constructor, no codegen required.
1642 assert(D->isDefaultConstructor() &&
1643 "trivial 0-arg ctor not a default ctor");
1644 return;
1645 }
1646
1647 assert(ArgBeg + 1 == ArgEnd && "unexpected argcount for trivial ctor");
1648 assert(D->isCopyOrMoveConstructor() &&
1649 "trivial 1-arg ctor not a copy/move ctor");
1650
1651 const Expr *E = (*ArgBeg);
1652 QualType Ty = E->getType();
1653 llvm::Value *Src = EmitLValue(E).getAddress();
1654 EmitAggregateCopy(This, Src, Ty);
1655 return;
1656 }
1657
1658 // Non-trivial constructors are handled in an ABI-specific manner.
1659 CGM.getCXXABI().EmitConstructorCall(*this, D, Type, ForVirtualBase,
1660 Delegating, This, ArgBeg, ArgEnd);
1661 }
1662
1663 void
EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl * D,llvm::Value * This,llvm::Value * Src,CallExpr::const_arg_iterator ArgBeg,CallExpr::const_arg_iterator ArgEnd)1664 CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
1665 llvm::Value *This, llvm::Value *Src,
1666 CallExpr::const_arg_iterator ArgBeg,
1667 CallExpr::const_arg_iterator ArgEnd) {
1668 if (D->isTrivial()) {
1669 assert(ArgBeg + 1 == ArgEnd && "unexpected argcount for trivial ctor");
1670 assert(D->isCopyOrMoveConstructor() &&
1671 "trivial 1-arg ctor not a copy/move ctor");
1672 EmitAggregateCopy(This, Src, (*ArgBeg)->getType());
1673 return;
1674 }
1675 llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D,
1676 clang::Ctor_Complete);
1677 assert(D->isInstance() &&
1678 "Trying to emit a member call expr on a static method!");
1679
1680 const FunctionProtoType *FPT = D->getType()->getAs<FunctionProtoType>();
1681
1682 CallArgList Args;
1683
1684 // Push the this ptr.
1685 Args.add(RValue::get(This), D->getThisType(getContext()));
1686
1687
1688 // Push the src ptr.
1689 QualType QT = *(FPT->arg_type_begin());
1690 llvm::Type *t = CGM.getTypes().ConvertType(QT);
1691 Src = Builder.CreateBitCast(Src, t);
1692 Args.add(RValue::get(Src), QT);
1693
1694 // Skip over first argument (Src).
1695 ++ArgBeg;
1696 CallExpr::const_arg_iterator Arg = ArgBeg;
1697 for (FunctionProtoType::arg_type_iterator I = FPT->arg_type_begin()+1,
1698 E = FPT->arg_type_end(); I != E; ++I, ++Arg) {
1699 assert(Arg != ArgEnd && "Running over edge of argument list!");
1700 EmitCallArg(Args, *Arg, *I);
1701 }
1702 // Either we've emitted all the call args, or we have a call to a
1703 // variadic function.
1704 assert((Arg == ArgEnd || FPT->isVariadic()) &&
1705 "Extra arguments in non-variadic function!");
1706 // If we still have any arguments, emit them using the type of the argument.
1707 for (; Arg != ArgEnd; ++Arg) {
1708 QualType ArgType = Arg->getType();
1709 EmitCallArg(Args, *Arg, ArgType);
1710 }
1711
1712 EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, RequiredArgs::All),
1713 Callee, ReturnValueSlot(), Args, D);
1714 }
1715
1716 void
EmitDelegateCXXConstructorCall(const CXXConstructorDecl * Ctor,CXXCtorType CtorType,const FunctionArgList & Args)1717 CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
1718 CXXCtorType CtorType,
1719 const FunctionArgList &Args) {
1720 CallArgList DelegateArgs;
1721
1722 FunctionArgList::const_iterator I = Args.begin(), E = Args.end();
1723 assert(I != E && "no parameters to constructor");
1724
1725 // this
1726 DelegateArgs.add(RValue::get(LoadCXXThis()), (*I)->getType());
1727 ++I;
1728
1729 // vtt
1730 if (llvm::Value *VTT = GetVTTParameter(GlobalDecl(Ctor, CtorType),
1731 /*ForVirtualBase=*/false,
1732 /*Delegating=*/true)) {
1733 QualType VoidPP = getContext().getPointerType(getContext().VoidPtrTy);
1734 DelegateArgs.add(RValue::get(VTT), VoidPP);
1735
1736 if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) {
1737 assert(I != E && "cannot skip vtt parameter, already done with args");
1738 assert((*I)->getType() == VoidPP && "skipping parameter not of vtt type");
1739 ++I;
1740 }
1741 }
1742
1743 // Explicit arguments.
1744 for (; I != E; ++I) {
1745 const VarDecl *param = *I;
1746 EmitDelegateCallArg(DelegateArgs, param);
1747 }
1748
1749 llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(Ctor, CtorType);
1750 EmitCall(CGM.getTypes().arrangeCXXConstructorDeclaration(Ctor, CtorType),
1751 Callee, ReturnValueSlot(), DelegateArgs, Ctor);
1752 }
1753
1754 namespace {
1755 struct CallDelegatingCtorDtor : EHScopeStack::Cleanup {
1756 const CXXDestructorDecl *Dtor;
1757 llvm::Value *Addr;
1758 CXXDtorType Type;
1759
CallDelegatingCtorDtor__anonc19dd22a0411::CallDelegatingCtorDtor1760 CallDelegatingCtorDtor(const CXXDestructorDecl *D, llvm::Value *Addr,
1761 CXXDtorType Type)
1762 : Dtor(D), Addr(Addr), Type(Type) {}
1763
Emit__anonc19dd22a0411::CallDelegatingCtorDtor1764 void Emit(CodeGenFunction &CGF, Flags flags) {
1765 CGF.EmitCXXDestructorCall(Dtor, Type, /*ForVirtualBase=*/false,
1766 /*Delegating=*/true, Addr);
1767 }
1768 };
1769 }
1770
1771 void
EmitDelegatingCXXConstructorCall(const CXXConstructorDecl * Ctor,const FunctionArgList & Args)1772 CodeGenFunction::EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor,
1773 const FunctionArgList &Args) {
1774 assert(Ctor->isDelegatingConstructor());
1775
1776 llvm::Value *ThisPtr = LoadCXXThis();
1777
1778 QualType Ty = getContext().getTagDeclType(Ctor->getParent());
1779 CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
1780 AggValueSlot AggSlot =
1781 AggValueSlot::forAddr(ThisPtr, Alignment, Qualifiers(),
1782 AggValueSlot::IsDestructed,
1783 AggValueSlot::DoesNotNeedGCBarriers,
1784 AggValueSlot::IsNotAliased);
1785
1786 EmitAggExpr(Ctor->init_begin()[0]->getInit(), AggSlot);
1787
1788 const CXXRecordDecl *ClassDecl = Ctor->getParent();
1789 if (CGM.getLangOpts().Exceptions && !ClassDecl->hasTrivialDestructor()) {
1790 CXXDtorType Type =
1791 CurGD.getCtorType() == Ctor_Complete ? Dtor_Complete : Dtor_Base;
1792
1793 EHStack.pushCleanup<CallDelegatingCtorDtor>(EHCleanup,
1794 ClassDecl->getDestructor(),
1795 ThisPtr, Type);
1796 }
1797 }
1798
EmitCXXDestructorCall(const CXXDestructorDecl * DD,CXXDtorType Type,bool ForVirtualBase,bool Delegating,llvm::Value * This)1799 void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD,
1800 CXXDtorType Type,
1801 bool ForVirtualBase,
1802 bool Delegating,
1803 llvm::Value *This) {
1804 llvm::Value *VTT = GetVTTParameter(GlobalDecl(DD, Type),
1805 ForVirtualBase, Delegating);
1806 llvm::Value *Callee = 0;
1807 if (getLangOpts().AppleKext)
1808 Callee = BuildAppleKextVirtualDestructorCall(DD, Type,
1809 DD->getParent());
1810
1811 if (!Callee)
1812 Callee = CGM.GetAddrOfCXXDestructor(DD, Type);
1813
1814 // FIXME: Provide a source location here.
1815 EmitCXXMemberCall(DD, SourceLocation(), Callee, ReturnValueSlot(), This,
1816 VTT, getContext().getPointerType(getContext().VoidPtrTy),
1817 0, 0);
1818 }
1819
1820 namespace {
1821 struct CallLocalDtor : EHScopeStack::Cleanup {
1822 const CXXDestructorDecl *Dtor;
1823 llvm::Value *Addr;
1824
CallLocalDtor__anonc19dd22a0511::CallLocalDtor1825 CallLocalDtor(const CXXDestructorDecl *D, llvm::Value *Addr)
1826 : Dtor(D), Addr(Addr) {}
1827
Emit__anonc19dd22a0511::CallLocalDtor1828 void Emit(CodeGenFunction &CGF, Flags flags) {
1829 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
1830 /*ForVirtualBase=*/false,
1831 /*Delegating=*/false, Addr);
1832 }
1833 };
1834 }
1835
PushDestructorCleanup(const CXXDestructorDecl * D,llvm::Value * Addr)1836 void CodeGenFunction::PushDestructorCleanup(const CXXDestructorDecl *D,
1837 llvm::Value *Addr) {
1838 EHStack.pushCleanup<CallLocalDtor>(NormalAndEHCleanup, D, Addr);
1839 }
1840
PushDestructorCleanup(QualType T,llvm::Value * Addr)1841 void CodeGenFunction::PushDestructorCleanup(QualType T, llvm::Value *Addr) {
1842 CXXRecordDecl *ClassDecl = T->getAsCXXRecordDecl();
1843 if (!ClassDecl) return;
1844 if (ClassDecl->hasTrivialDestructor()) return;
1845
1846 const CXXDestructorDecl *D = ClassDecl->getDestructor();
1847 assert(D && D->isUsed() && "destructor not marked as used!");
1848 PushDestructorCleanup(D, Addr);
1849 }
1850
1851 void
InitializeVTablePointer(BaseSubobject Base,const CXXRecordDecl * NearestVBase,CharUnits OffsetFromNearestVBase,llvm::Constant * VTable,const CXXRecordDecl * VTableClass)1852 CodeGenFunction::InitializeVTablePointer(BaseSubobject Base,
1853 const CXXRecordDecl *NearestVBase,
1854 CharUnits OffsetFromNearestVBase,
1855 llvm::Constant *VTable,
1856 const CXXRecordDecl *VTableClass) {
1857 const CXXRecordDecl *RD = Base.getBase();
1858
1859 // Compute the address point.
1860 llvm::Value *VTableAddressPoint;
1861
1862 bool NeedsVTTParam = CGM.getCXXABI().NeedsVTTParameter(CurGD);
1863
1864 // Check if we need to use a vtable from the VTT.
1865 if (NeedsVTTParam && (RD->getNumVBases() || NearestVBase)) {
1866 // Get the secondary vpointer index.
1867 uint64_t VirtualPointerIndex =
1868 CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
1869
1870 /// Load the VTT.
1871 llvm::Value *VTT = LoadCXXVTT();
1872 if (VirtualPointerIndex)
1873 VTT = Builder.CreateConstInBoundsGEP1_64(VTT, VirtualPointerIndex);
1874
1875 // And load the address point from the VTT.
1876 VTableAddressPoint = Builder.CreateLoad(VTT);
1877 } else {
1878 uint64_t AddressPoint =
1879 CGM.getVTableContext().getVTableLayout(VTableClass).getAddressPoint(Base);
1880 VTableAddressPoint =
1881 Builder.CreateConstInBoundsGEP2_64(VTable, 0, AddressPoint);
1882 }
1883
1884 // Compute where to store the address point.
1885 llvm::Value *VirtualOffset = 0;
1886 CharUnits NonVirtualOffset = CharUnits::Zero();
1887
1888 if (NeedsVTTParam && NearestVBase) {
1889 // We need to use the virtual base offset offset because the virtual base
1890 // might have a different offset in the most derived class.
1891 VirtualOffset = CGM.getCXXABI().GetVirtualBaseClassOffset(*this,
1892 LoadCXXThis(),
1893 VTableClass,
1894 NearestVBase);
1895 NonVirtualOffset = OffsetFromNearestVBase;
1896 } else {
1897 // We can just use the base offset in the complete class.
1898 NonVirtualOffset = Base.getBaseOffset();
1899 }
1900
1901 // Apply the offsets.
1902 llvm::Value *VTableField = LoadCXXThis();
1903
1904 if (!NonVirtualOffset.isZero() || VirtualOffset)
1905 VTableField = ApplyNonVirtualAndVirtualOffset(*this, VTableField,
1906 NonVirtualOffset,
1907 VirtualOffset);
1908
1909 // Finally, store the address point.
1910 llvm::Type *AddressPointPtrTy =
1911 VTableAddressPoint->getType()->getPointerTo();
1912 VTableField = Builder.CreateBitCast(VTableField, AddressPointPtrTy);
1913 llvm::StoreInst *Store = Builder.CreateStore(VTableAddressPoint, VTableField);
1914 CGM.DecorateInstruction(Store, CGM.getTBAAInfoForVTablePtr());
1915 }
1916
1917 void
InitializeVTablePointers(BaseSubobject Base,const CXXRecordDecl * NearestVBase,CharUnits OffsetFromNearestVBase,bool BaseIsNonVirtualPrimaryBase,llvm::Constant * VTable,const CXXRecordDecl * VTableClass,VisitedVirtualBasesSetTy & VBases)1918 CodeGenFunction::InitializeVTablePointers(BaseSubobject Base,
1919 const CXXRecordDecl *NearestVBase,
1920 CharUnits OffsetFromNearestVBase,
1921 bool BaseIsNonVirtualPrimaryBase,
1922 llvm::Constant *VTable,
1923 const CXXRecordDecl *VTableClass,
1924 VisitedVirtualBasesSetTy& VBases) {
1925 // If this base is a non-virtual primary base the address point has already
1926 // been set.
1927 if (!BaseIsNonVirtualPrimaryBase) {
1928 // Initialize the vtable pointer for this base.
1929 InitializeVTablePointer(Base, NearestVBase, OffsetFromNearestVBase,
1930 VTable, VTableClass);
1931 }
1932
1933 const CXXRecordDecl *RD = Base.getBase();
1934
1935 // Traverse bases.
1936 for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
1937 E = RD->bases_end(); I != E; ++I) {
1938 CXXRecordDecl *BaseDecl
1939 = cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
1940
1941 // Ignore classes without a vtable.
1942 if (!BaseDecl->isDynamicClass())
1943 continue;
1944
1945 CharUnits BaseOffset;
1946 CharUnits BaseOffsetFromNearestVBase;
1947 bool BaseDeclIsNonVirtualPrimaryBase;
1948
1949 if (I->isVirtual()) {
1950 // Check if we've visited this virtual base before.
1951 if (!VBases.insert(BaseDecl))
1952 continue;
1953
1954 const ASTRecordLayout &Layout =
1955 getContext().getASTRecordLayout(VTableClass);
1956
1957 BaseOffset = Layout.getVBaseClassOffset(BaseDecl);
1958 BaseOffsetFromNearestVBase = CharUnits::Zero();
1959 BaseDeclIsNonVirtualPrimaryBase = false;
1960 } else {
1961 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
1962
1963 BaseOffset = Base.getBaseOffset() + Layout.getBaseClassOffset(BaseDecl);
1964 BaseOffsetFromNearestVBase =
1965 OffsetFromNearestVBase + Layout.getBaseClassOffset(BaseDecl);
1966 BaseDeclIsNonVirtualPrimaryBase = Layout.getPrimaryBase() == BaseDecl;
1967 }
1968
1969 InitializeVTablePointers(BaseSubobject(BaseDecl, BaseOffset),
1970 I->isVirtual() ? BaseDecl : NearestVBase,
1971 BaseOffsetFromNearestVBase,
1972 BaseDeclIsNonVirtualPrimaryBase,
1973 VTable, VTableClass, VBases);
1974 }
1975 }
1976
InitializeVTablePointers(const CXXRecordDecl * RD)1977 void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) {
1978 // Ignore classes without a vtable.
1979 if (!RD->isDynamicClass())
1980 return;
1981
1982 // Get the VTable.
1983 llvm::Constant *VTable = CGM.getVTables().GetAddrOfVTable(RD);
1984
1985 // Initialize the vtable pointers for this class and all of its bases.
1986 VisitedVirtualBasesSetTy VBases;
1987 InitializeVTablePointers(BaseSubobject(RD, CharUnits::Zero()),
1988 /*NearestVBase=*/0,
1989 /*OffsetFromNearestVBase=*/CharUnits::Zero(),
1990 /*BaseIsNonVirtualPrimaryBase=*/false,
1991 VTable, RD, VBases);
1992 }
1993
GetVTablePtr(llvm::Value * This,llvm::Type * Ty)1994 llvm::Value *CodeGenFunction::GetVTablePtr(llvm::Value *This,
1995 llvm::Type *Ty) {
1996 llvm::Value *VTablePtrSrc = Builder.CreateBitCast(This, Ty->getPointerTo());
1997 llvm::Instruction *VTable = Builder.CreateLoad(VTablePtrSrc, "vtable");
1998 CGM.DecorateInstruction(VTable, CGM.getTBAAInfoForVTablePtr());
1999 return VTable;
2000 }
2001
getMostDerivedClassDecl(const Expr * Base)2002 static const CXXRecordDecl *getMostDerivedClassDecl(const Expr *Base) {
2003 const Expr *E = Base;
2004
2005 while (true) {
2006 E = E->IgnoreParens();
2007 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
2008 if (CE->getCastKind() == CK_DerivedToBase ||
2009 CE->getCastKind() == CK_UncheckedDerivedToBase ||
2010 CE->getCastKind() == CK_NoOp) {
2011 E = CE->getSubExpr();
2012 continue;
2013 }
2014 }
2015
2016 break;
2017 }
2018
2019 QualType DerivedType = E->getType();
2020 if (const PointerType *PTy = DerivedType->getAs<PointerType>())
2021 DerivedType = PTy->getPointeeType();
2022
2023 return cast<CXXRecordDecl>(DerivedType->castAs<RecordType>()->getDecl());
2024 }
2025
2026 // FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do
2027 // quite what we want.
skipNoOpCastsAndParens(const Expr * E)2028 static const Expr *skipNoOpCastsAndParens(const Expr *E) {
2029 while (true) {
2030 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
2031 E = PE->getSubExpr();
2032 continue;
2033 }
2034
2035 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
2036 if (CE->getCastKind() == CK_NoOp) {
2037 E = CE->getSubExpr();
2038 continue;
2039 }
2040 }
2041 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
2042 if (UO->getOpcode() == UO_Extension) {
2043 E = UO->getSubExpr();
2044 continue;
2045 }
2046 }
2047 return E;
2048 }
2049 }
2050
2051 /// canDevirtualizeMemberFunctionCall - Checks whether the given virtual member
2052 /// function call on the given expr can be devirtualized.
canDevirtualizeMemberFunctionCall(const Expr * Base,const CXXMethodDecl * MD)2053 static bool canDevirtualizeMemberFunctionCall(const Expr *Base,
2054 const CXXMethodDecl *MD) {
2055 // If the most derived class is marked final, we know that no subclass can
2056 // override this member function and so we can devirtualize it. For example:
2057 //
2058 // struct A { virtual void f(); }
2059 // struct B final : A { };
2060 //
2061 // void f(B *b) {
2062 // b->f();
2063 // }
2064 //
2065 const CXXRecordDecl *MostDerivedClassDecl = getMostDerivedClassDecl(Base);
2066 if (MostDerivedClassDecl->hasAttr<FinalAttr>())
2067 return true;
2068
2069 // If the member function is marked 'final', we know that it can't be
2070 // overridden and can therefore devirtualize it.
2071 if (MD->hasAttr<FinalAttr>())
2072 return true;
2073
2074 // Similarly, if the class itself is marked 'final' it can't be overridden
2075 // and we can therefore devirtualize the member function call.
2076 if (MD->getParent()->hasAttr<FinalAttr>())
2077 return true;
2078
2079 Base = skipNoOpCastsAndParens(Base);
2080 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
2081 if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
2082 // This is a record decl. We know the type and can devirtualize it.
2083 return VD->getType()->isRecordType();
2084 }
2085
2086 return false;
2087 }
2088
2089 // We can always devirtualize calls on temporary object expressions.
2090 if (isa<CXXConstructExpr>(Base))
2091 return true;
2092
2093 // And calls on bound temporaries.
2094 if (isa<CXXBindTemporaryExpr>(Base))
2095 return true;
2096
2097 // Check if this is a call expr that returns a record type.
2098 if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
2099 return CE->getCallReturnType()->isRecordType();
2100
2101 // We can't devirtualize the call.
2102 return false;
2103 }
2104
UseVirtualCall(ASTContext & Context,const CXXOperatorCallExpr * CE,const CXXMethodDecl * MD)2105 static bool UseVirtualCall(ASTContext &Context,
2106 const CXXOperatorCallExpr *CE,
2107 const CXXMethodDecl *MD) {
2108 if (!MD->isVirtual())
2109 return false;
2110
2111 // When building with -fapple-kext, all calls must go through the vtable since
2112 // the kernel linker can do runtime patching of vtables.
2113 if (Context.getLangOpts().AppleKext)
2114 return true;
2115
2116 return !canDevirtualizeMemberFunctionCall(CE->getArg(0), MD);
2117 }
2118
2119 llvm::Value *
EmitCXXOperatorMemberCallee(const CXXOperatorCallExpr * E,const CXXMethodDecl * MD,llvm::Value * This)2120 CodeGenFunction::EmitCXXOperatorMemberCallee(const CXXOperatorCallExpr *E,
2121 const CXXMethodDecl *MD,
2122 llvm::Value *This) {
2123 llvm::FunctionType *fnType =
2124 CGM.getTypes().GetFunctionType(
2125 CGM.getTypes().arrangeCXXMethodDeclaration(MD));
2126
2127 if (UseVirtualCall(getContext(), E, MD))
2128 return BuildVirtualCall(MD, This, fnType);
2129
2130 return CGM.GetAddrOfFunction(MD, fnType);
2131 }
2132
EmitForwardingCallToLambda(const CXXRecordDecl * lambda,CallArgList & callArgs)2133 void CodeGenFunction::EmitForwardingCallToLambda(const CXXRecordDecl *lambda,
2134 CallArgList &callArgs) {
2135 // Lookup the call operator
2136 DeclarationName operatorName
2137 = getContext().DeclarationNames.getCXXOperatorName(OO_Call);
2138 CXXMethodDecl *callOperator =
2139 cast<CXXMethodDecl>(lambda->lookup(operatorName).front());
2140
2141 // Get the address of the call operator.
2142 const CGFunctionInfo &calleeFnInfo =
2143 CGM.getTypes().arrangeCXXMethodDeclaration(callOperator);
2144 llvm::Value *callee =
2145 CGM.GetAddrOfFunction(GlobalDecl(callOperator),
2146 CGM.getTypes().GetFunctionType(calleeFnInfo));
2147
2148 // Prepare the return slot.
2149 const FunctionProtoType *FPT =
2150 callOperator->getType()->castAs<FunctionProtoType>();
2151 QualType resultType = FPT->getResultType();
2152 ReturnValueSlot returnSlot;
2153 if (!resultType->isVoidType() &&
2154 calleeFnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect &&
2155 !hasScalarEvaluationKind(calleeFnInfo.getReturnType()))
2156 returnSlot = ReturnValueSlot(ReturnValue, resultType.isVolatileQualified());
2157
2158 // We don't need to separately arrange the call arguments because
2159 // the call can't be variadic anyway --- it's impossible to forward
2160 // variadic arguments.
2161
2162 // Now emit our call.
2163 RValue RV = EmitCall(calleeFnInfo, callee, returnSlot,
2164 callArgs, callOperator);
2165
2166 // If necessary, copy the returned value into the slot.
2167 if (!resultType->isVoidType() && returnSlot.isNull())
2168 EmitReturnOfRValue(RV, resultType);
2169 else
2170 EmitBranchThroughCleanup(ReturnBlock);
2171 }
2172
EmitLambdaBlockInvokeBody()2173 void CodeGenFunction::EmitLambdaBlockInvokeBody() {
2174 const BlockDecl *BD = BlockInfo->getBlockDecl();
2175 const VarDecl *variable = BD->capture_begin()->getVariable();
2176 const CXXRecordDecl *Lambda = variable->getType()->getAsCXXRecordDecl();
2177
2178 // Start building arguments for forwarding call
2179 CallArgList CallArgs;
2180
2181 QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda));
2182 llvm::Value *ThisPtr = GetAddrOfBlockDecl(variable, false);
2183 CallArgs.add(RValue::get(ThisPtr), ThisType);
2184
2185 // Add the rest of the parameters.
2186 for (BlockDecl::param_const_iterator I = BD->param_begin(),
2187 E = BD->param_end(); I != E; ++I) {
2188 ParmVarDecl *param = *I;
2189 EmitDelegateCallArg(CallArgs, param);
2190 }
2191
2192 EmitForwardingCallToLambda(Lambda, CallArgs);
2193 }
2194
EmitLambdaToBlockPointerBody(FunctionArgList & Args)2195 void CodeGenFunction::EmitLambdaToBlockPointerBody(FunctionArgList &Args) {
2196 if (cast<CXXMethodDecl>(CurCodeDecl)->isVariadic()) {
2197 // FIXME: Making this work correctly is nasty because it requires either
2198 // cloning the body of the call operator or making the call operator forward.
2199 CGM.ErrorUnsupported(CurCodeDecl, "lambda conversion to variadic function");
2200 return;
2201 }
2202
2203 EmitFunctionBody(Args);
2204 }
2205
EmitLambdaDelegatingInvokeBody(const CXXMethodDecl * MD)2206 void CodeGenFunction::EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) {
2207 const CXXRecordDecl *Lambda = MD->getParent();
2208
2209 // Start building arguments for forwarding call
2210 CallArgList CallArgs;
2211
2212 QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda));
2213 llvm::Value *ThisPtr = llvm::UndefValue::get(getTypes().ConvertType(ThisType));
2214 CallArgs.add(RValue::get(ThisPtr), ThisType);
2215
2216 // Add the rest of the parameters.
2217 for (FunctionDecl::param_const_iterator I = MD->param_begin(),
2218 E = MD->param_end(); I != E; ++I) {
2219 ParmVarDecl *param = *I;
2220 EmitDelegateCallArg(CallArgs, param);
2221 }
2222
2223 EmitForwardingCallToLambda(Lambda, CallArgs);
2224 }
2225
EmitLambdaStaticInvokeFunction(const CXXMethodDecl * MD)2226 void CodeGenFunction::EmitLambdaStaticInvokeFunction(const CXXMethodDecl *MD) {
2227 if (MD->isVariadic()) {
2228 // FIXME: Making this work correctly is nasty because it requires either
2229 // cloning the body of the call operator or making the call operator forward.
2230 CGM.ErrorUnsupported(MD, "lambda conversion to variadic function");
2231 return;
2232 }
2233
2234 EmitLambdaDelegatingInvokeBody(MD);
2235 }
2236