1 //===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code dealing with code generation of C++ expressions
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "CodeGenFunction.h"
15 #include "CGCUDARuntime.h"
16 #include "CGCXXABI.h"
17 #include "CGDebugInfo.h"
18 #include "CGObjCRuntime.h"
19 #include "clang/Frontend/CodeGenOptions.h"
20 #include "llvm/IR/Intrinsics.h"
21 #include "llvm/Support/CallSite.h"
22
23 using namespace clang;
24 using namespace CodeGen;
25
EmitCXXMemberCall(const CXXMethodDecl * MD,SourceLocation CallLoc,llvm::Value * Callee,ReturnValueSlot ReturnValue,llvm::Value * This,llvm::Value * ImplicitParam,QualType ImplicitParamTy,CallExpr::const_arg_iterator ArgBeg,CallExpr::const_arg_iterator ArgEnd)26 RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
27 SourceLocation CallLoc,
28 llvm::Value *Callee,
29 ReturnValueSlot ReturnValue,
30 llvm::Value *This,
31 llvm::Value *ImplicitParam,
32 QualType ImplicitParamTy,
33 CallExpr::const_arg_iterator ArgBeg,
34 CallExpr::const_arg_iterator ArgEnd) {
35 assert(MD->isInstance() &&
36 "Trying to emit a member call expr on a static method!");
37
38 // C++11 [class.mfct.non-static]p2:
39 // If a non-static member function of a class X is called for an object that
40 // is not of type X, or of a type derived from X, the behavior is undefined.
41 EmitTypeCheck(isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall
42 : TCK_MemberCall,
43 CallLoc, This, getContext().getRecordType(MD->getParent()));
44
45 CallArgList Args;
46
47 // Push the this ptr.
48 Args.add(RValue::get(This), MD->getThisType(getContext()));
49
50 // If there is an implicit parameter (e.g. VTT), emit it.
51 if (ImplicitParam) {
52 Args.add(RValue::get(ImplicitParam), ImplicitParamTy);
53 }
54
55 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
56 RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size());
57
58 // And the rest of the call args.
59 EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
60
61 return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required),
62 Callee, ReturnValue, Args, MD);
63 }
64
65 // FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do
66 // quite what we want.
skipNoOpCastsAndParens(const Expr * E)67 static const Expr *skipNoOpCastsAndParens(const Expr *E) {
68 while (true) {
69 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
70 E = PE->getSubExpr();
71 continue;
72 }
73
74 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
75 if (CE->getCastKind() == CK_NoOp) {
76 E = CE->getSubExpr();
77 continue;
78 }
79 }
80 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
81 if (UO->getOpcode() == UO_Extension) {
82 E = UO->getSubExpr();
83 continue;
84 }
85 }
86 return E;
87 }
88 }
89
90 /// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given
91 /// expr can be devirtualized.
canDevirtualizeMemberFunctionCalls(ASTContext & Context,const Expr * Base,const CXXMethodDecl * MD)92 static bool canDevirtualizeMemberFunctionCalls(ASTContext &Context,
93 const Expr *Base,
94 const CXXMethodDecl *MD) {
95
96 // When building with -fapple-kext, all calls must go through the vtable since
97 // the kernel linker can do runtime patching of vtables.
98 if (Context.getLangOpts().AppleKext)
99 return false;
100
101 // If the most derived class is marked final, we know that no subclass can
102 // override this member function and so we can devirtualize it. For example:
103 //
104 // struct A { virtual void f(); }
105 // struct B final : A { };
106 //
107 // void f(B *b) {
108 // b->f();
109 // }
110 //
111 const CXXRecordDecl *MostDerivedClassDecl = Base->getBestDynamicClassType();
112 if (MostDerivedClassDecl->hasAttr<FinalAttr>())
113 return true;
114
115 // If the member function is marked 'final', we know that it can't be
116 // overridden and can therefore devirtualize it.
117 if (MD->hasAttr<FinalAttr>())
118 return true;
119
120 // Similarly, if the class itself is marked 'final' it can't be overridden
121 // and we can therefore devirtualize the member function call.
122 if (MD->getParent()->hasAttr<FinalAttr>())
123 return true;
124
125 Base = skipNoOpCastsAndParens(Base);
126 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
127 if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
128 // This is a record decl. We know the type and can devirtualize it.
129 return VD->getType()->isRecordType();
130 }
131
132 return false;
133 }
134
135 // We can devirtualize calls on an object accessed by a class member access
136 // expression, since by C++11 [basic.life]p6 we know that it can't refer to
137 // a derived class object constructed in the same location.
138 if (const MemberExpr *ME = dyn_cast<MemberExpr>(Base))
139 if (const ValueDecl *VD = dyn_cast<ValueDecl>(ME->getMemberDecl()))
140 return VD->getType()->isRecordType();
141
142 // We can always devirtualize calls on temporary object expressions.
143 if (isa<CXXConstructExpr>(Base))
144 return true;
145
146 // And calls on bound temporaries.
147 if (isa<CXXBindTemporaryExpr>(Base))
148 return true;
149
150 // Check if this is a call expr that returns a record type.
151 if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
152 return CE->getCallReturnType()->isRecordType();
153
154 // We can't devirtualize the call.
155 return false;
156 }
157
getCXXRecord(const Expr * E)158 static CXXRecordDecl *getCXXRecord(const Expr *E) {
159 QualType T = E->getType();
160 if (const PointerType *PTy = T->getAs<PointerType>())
161 T = PTy->getPointeeType();
162 const RecordType *Ty = T->castAs<RecordType>();
163 return cast<CXXRecordDecl>(Ty->getDecl());
164 }
165
166 // Note: This function also emit constructor calls to support a MSVC
167 // extensions allowing explicit constructor function call.
EmitCXXMemberCallExpr(const CXXMemberCallExpr * CE,ReturnValueSlot ReturnValue)168 RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
169 ReturnValueSlot ReturnValue) {
170 const Expr *callee = CE->getCallee()->IgnoreParens();
171
172 if (isa<BinaryOperator>(callee))
173 return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
174
175 const MemberExpr *ME = cast<MemberExpr>(callee);
176 const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
177
178 CGDebugInfo *DI = getDebugInfo();
179 if (DI &&
180 CGM.getCodeGenOpts().getDebugInfo() == CodeGenOptions::LimitedDebugInfo &&
181 !isa<CallExpr>(ME->getBase())) {
182 QualType PQTy = ME->getBase()->IgnoreParenImpCasts()->getType();
183 if (const PointerType * PTy = dyn_cast<PointerType>(PQTy)) {
184 DI->getOrCreateRecordType(PTy->getPointeeType(),
185 MD->getParent()->getLocation());
186 }
187 }
188
189 if (MD->isStatic()) {
190 // The method is static, emit it as we would a regular call.
191 llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
192 return EmitCall(getContext().getPointerType(MD->getType()), Callee,
193 ReturnValue, CE->arg_begin(), CE->arg_end());
194 }
195
196 // Compute the object pointer.
197 const Expr *Base = ME->getBase();
198 bool CanUseVirtualCall = MD->isVirtual() && !ME->hasQualifier();
199
200 const CXXMethodDecl *DevirtualizedMethod = NULL;
201 if (CanUseVirtualCall &&
202 canDevirtualizeMemberFunctionCalls(getContext(), Base, MD)) {
203 const CXXRecordDecl *BestDynamicDecl = Base->getBestDynamicClassType();
204 DevirtualizedMethod = MD->getCorrespondingMethodInClass(BestDynamicDecl);
205 assert(DevirtualizedMethod);
206 const CXXRecordDecl *DevirtualizedClass = DevirtualizedMethod->getParent();
207 const Expr *Inner = Base->ignoreParenBaseCasts();
208 if (getCXXRecord(Inner) == DevirtualizedClass)
209 // If the class of the Inner expression is where the dynamic method
210 // is defined, build the this pointer from it.
211 Base = Inner;
212 else if (getCXXRecord(Base) != DevirtualizedClass) {
213 // If the method is defined in a class that is not the best dynamic
214 // one or the one of the full expression, we would have to build
215 // a derived-to-base cast to compute the correct this pointer, but
216 // we don't have support for that yet, so do a virtual call.
217 DevirtualizedMethod = NULL;
218 }
219 // If the return types are not the same, this might be a case where more
220 // code needs to run to compensate for it. For example, the derived
221 // method might return a type that inherits form from the return
222 // type of MD and has a prefix.
223 // For now we just avoid devirtualizing these covariant cases.
224 if (DevirtualizedMethod &&
225 DevirtualizedMethod->getResultType().getCanonicalType() !=
226 MD->getResultType().getCanonicalType())
227 DevirtualizedMethod = NULL;
228 }
229
230 llvm::Value *This;
231 if (ME->isArrow())
232 This = EmitScalarExpr(Base);
233 else
234 This = EmitLValue(Base).getAddress();
235
236
237 if (MD->isTrivial()) {
238 if (isa<CXXDestructorDecl>(MD)) return RValue::get(0);
239 if (isa<CXXConstructorDecl>(MD) &&
240 cast<CXXConstructorDecl>(MD)->isDefaultConstructor())
241 return RValue::get(0);
242
243 if (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) {
244 // We don't like to generate the trivial copy/move assignment operator
245 // when it isn't necessary; just produce the proper effect here.
246 llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
247 EmitAggregateAssign(This, RHS, CE->getType());
248 return RValue::get(This);
249 }
250
251 if (isa<CXXConstructorDecl>(MD) &&
252 cast<CXXConstructorDecl>(MD)->isCopyOrMoveConstructor()) {
253 // Trivial move and copy ctor are the same.
254 llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
255 EmitSynthesizedCXXCopyCtorCall(cast<CXXConstructorDecl>(MD), This, RHS,
256 CE->arg_begin(), CE->arg_end());
257 return RValue::get(This);
258 }
259 llvm_unreachable("unknown trivial member function");
260 }
261
262 // Compute the function type we're calling.
263 const CXXMethodDecl *CalleeDecl = DevirtualizedMethod ? DevirtualizedMethod : MD;
264 const CGFunctionInfo *FInfo = 0;
265 if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl))
266 FInfo = &CGM.getTypes().arrangeCXXDestructor(Dtor,
267 Dtor_Complete);
268 else if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(CalleeDecl))
269 FInfo = &CGM.getTypes().arrangeCXXConstructorDeclaration(Ctor,
270 Ctor_Complete);
271 else
272 FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(CalleeDecl);
273
274 llvm::Type *Ty = CGM.getTypes().GetFunctionType(*FInfo);
275
276 // C++ [class.virtual]p12:
277 // Explicit qualification with the scope operator (5.1) suppresses the
278 // virtual call mechanism.
279 //
280 // We also don't emit a virtual call if the base expression has a record type
281 // because then we know what the type is.
282 bool UseVirtualCall = CanUseVirtualCall && !DevirtualizedMethod;
283
284 llvm::Value *Callee;
285 if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
286 if (UseVirtualCall) {
287 assert(CE->arg_begin() == CE->arg_end() &&
288 "Virtual destructor shouldn't have explicit parameters");
289 return CGM.getCXXABI().EmitVirtualDestructorCall(*this, Dtor,
290 Dtor_Complete,
291 CE->getExprLoc(),
292 ReturnValue, This);
293 } else {
294 if (getLangOpts().AppleKext &&
295 MD->isVirtual() &&
296 ME->hasQualifier())
297 Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
298 else if (!DevirtualizedMethod)
299 Callee = CGM.GetAddrOfFunction(GlobalDecl(Dtor, Dtor_Complete), Ty);
300 else {
301 const CXXDestructorDecl *DDtor =
302 cast<CXXDestructorDecl>(DevirtualizedMethod);
303 Callee = CGM.GetAddrOfFunction(GlobalDecl(DDtor, Dtor_Complete), Ty);
304 }
305 }
306 } else if (const CXXConstructorDecl *Ctor =
307 dyn_cast<CXXConstructorDecl>(MD)) {
308 Callee = CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty);
309 } else if (UseVirtualCall) {
310 Callee = BuildVirtualCall(MD, This, Ty);
311 } else {
312 if (getLangOpts().AppleKext &&
313 MD->isVirtual() &&
314 ME->hasQualifier())
315 Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
316 else if (!DevirtualizedMethod)
317 Callee = CGM.GetAddrOfFunction(MD, Ty);
318 else {
319 Callee = CGM.GetAddrOfFunction(DevirtualizedMethod, Ty);
320 }
321 }
322
323 return EmitCXXMemberCall(MD, CE->getExprLoc(), Callee, ReturnValue, This,
324 /*ImplicitParam=*/0, QualType(),
325 CE->arg_begin(), CE->arg_end());
326 }
327
328 RValue
EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr * E,ReturnValueSlot ReturnValue)329 CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
330 ReturnValueSlot ReturnValue) {
331 const BinaryOperator *BO =
332 cast<BinaryOperator>(E->getCallee()->IgnoreParens());
333 const Expr *BaseExpr = BO->getLHS();
334 const Expr *MemFnExpr = BO->getRHS();
335
336 const MemberPointerType *MPT =
337 MemFnExpr->getType()->castAs<MemberPointerType>();
338
339 const FunctionProtoType *FPT =
340 MPT->getPointeeType()->castAs<FunctionProtoType>();
341 const CXXRecordDecl *RD =
342 cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
343
344 // Get the member function pointer.
345 llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
346
347 // Emit the 'this' pointer.
348 llvm::Value *This;
349
350 if (BO->getOpcode() == BO_PtrMemI)
351 This = EmitScalarExpr(BaseExpr);
352 else
353 This = EmitLValue(BaseExpr).getAddress();
354
355 EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This,
356 QualType(MPT->getClass(), 0));
357
358 // Ask the ABI to load the callee. Note that This is modified.
359 llvm::Value *Callee =
360 CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, This, MemFnPtr, MPT);
361
362 CallArgList Args;
363
364 QualType ThisType =
365 getContext().getPointerType(getContext().getTagDeclType(RD));
366
367 // Push the this ptr.
368 Args.add(RValue::get(This), ThisType);
369
370 RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, 1);
371
372 // And the rest of the call args
373 EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
374 return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required), Callee,
375 ReturnValue, Args);
376 }
377
378 RValue
EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr * E,const CXXMethodDecl * MD,ReturnValueSlot ReturnValue)379 CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
380 const CXXMethodDecl *MD,
381 ReturnValueSlot ReturnValue) {
382 assert(MD->isInstance() &&
383 "Trying to emit a member call expr on a static method!");
384 LValue LV = EmitLValue(E->getArg(0));
385 llvm::Value *This = LV.getAddress();
386
387 if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) &&
388 MD->isTrivial()) {
389 llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
390 QualType Ty = E->getType();
391 EmitAggregateAssign(This, Src, Ty);
392 return RValue::get(This);
393 }
394
395 llvm::Value *Callee = EmitCXXOperatorMemberCallee(E, MD, This);
396 return EmitCXXMemberCall(MD, E->getExprLoc(), Callee, ReturnValue, This,
397 /*ImplicitParam=*/0, QualType(),
398 E->arg_begin() + 1, E->arg_end());
399 }
400
EmitCUDAKernelCallExpr(const CUDAKernelCallExpr * E,ReturnValueSlot ReturnValue)401 RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
402 ReturnValueSlot ReturnValue) {
403 return CGM.getCUDARuntime().EmitCUDAKernelCallExpr(*this, E, ReturnValue);
404 }
405
EmitNullBaseClassInitialization(CodeGenFunction & CGF,llvm::Value * DestPtr,const CXXRecordDecl * Base)406 static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
407 llvm::Value *DestPtr,
408 const CXXRecordDecl *Base) {
409 if (Base->isEmpty())
410 return;
411
412 DestPtr = CGF.EmitCastToVoidPtr(DestPtr);
413
414 const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base);
415 CharUnits Size = Layout.getNonVirtualSize();
416 CharUnits Align = Layout.getNonVirtualAlign();
417
418 llvm::Value *SizeVal = CGF.CGM.getSize(Size);
419
420 // If the type contains a pointer to data member we can't memset it to zero.
421 // Instead, create a null constant and copy it to the destination.
422 // TODO: there are other patterns besides zero that we can usefully memset,
423 // like -1, which happens to be the pattern used by member-pointers.
424 // TODO: isZeroInitializable can be over-conservative in the case where a
425 // virtual base contains a member pointer.
426 if (!CGF.CGM.getTypes().isZeroInitializable(Base)) {
427 llvm::Constant *NullConstant = CGF.CGM.EmitNullConstantForBase(Base);
428
429 llvm::GlobalVariable *NullVariable =
430 new llvm::GlobalVariable(CGF.CGM.getModule(), NullConstant->getType(),
431 /*isConstant=*/true,
432 llvm::GlobalVariable::PrivateLinkage,
433 NullConstant, Twine());
434 NullVariable->setAlignment(Align.getQuantity());
435 llvm::Value *SrcPtr = CGF.EmitCastToVoidPtr(NullVariable);
436
437 // Get and call the appropriate llvm.memcpy overload.
438 CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity());
439 return;
440 }
441
442 // Otherwise, just memset the whole thing to zero. This is legal
443 // because in LLVM, all default initializers (other than the ones we just
444 // handled above) are guaranteed to have a bit pattern of all zeros.
445 CGF.Builder.CreateMemSet(DestPtr, CGF.Builder.getInt8(0), SizeVal,
446 Align.getQuantity());
447 }
448
449 void
EmitCXXConstructExpr(const CXXConstructExpr * E,AggValueSlot Dest)450 CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
451 AggValueSlot Dest) {
452 assert(!Dest.isIgnored() && "Must have a destination!");
453 const CXXConstructorDecl *CD = E->getConstructor();
454
455 // If we require zero initialization before (or instead of) calling the
456 // constructor, as can be the case with a non-user-provided default
457 // constructor, emit the zero initialization now, unless destination is
458 // already zeroed.
459 if (E->requiresZeroInitialization() && !Dest.isZeroed()) {
460 switch (E->getConstructionKind()) {
461 case CXXConstructExpr::CK_Delegating:
462 case CXXConstructExpr::CK_Complete:
463 EmitNullInitialization(Dest.getAddr(), E->getType());
464 break;
465 case CXXConstructExpr::CK_VirtualBase:
466 case CXXConstructExpr::CK_NonVirtualBase:
467 EmitNullBaseClassInitialization(*this, Dest.getAddr(), CD->getParent());
468 break;
469 }
470 }
471
472 // If this is a call to a trivial default constructor, do nothing.
473 if (CD->isTrivial() && CD->isDefaultConstructor())
474 return;
475
476 // Elide the constructor if we're constructing from a temporary.
477 // The temporary check is required because Sema sets this on NRVO
478 // returns.
479 if (getLangOpts().ElideConstructors && E->isElidable()) {
480 assert(getContext().hasSameUnqualifiedType(E->getType(),
481 E->getArg(0)->getType()));
482 if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
483 EmitAggExpr(E->getArg(0), Dest);
484 return;
485 }
486 }
487
488 if (const ConstantArrayType *arrayType
489 = getContext().getAsConstantArrayType(E->getType())) {
490 EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddr(),
491 E->arg_begin(), E->arg_end());
492 } else {
493 CXXCtorType Type = Ctor_Complete;
494 bool ForVirtualBase = false;
495 bool Delegating = false;
496
497 switch (E->getConstructionKind()) {
498 case CXXConstructExpr::CK_Delegating:
499 // We should be emitting a constructor; GlobalDecl will assert this
500 Type = CurGD.getCtorType();
501 Delegating = true;
502 break;
503
504 case CXXConstructExpr::CK_Complete:
505 Type = Ctor_Complete;
506 break;
507
508 case CXXConstructExpr::CK_VirtualBase:
509 ForVirtualBase = true;
510 // fall-through
511
512 case CXXConstructExpr::CK_NonVirtualBase:
513 Type = Ctor_Base;
514 }
515
516 // Call the constructor.
517 EmitCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest.getAddr(),
518 E->arg_begin(), E->arg_end());
519 }
520 }
521
522 void
EmitSynthesizedCXXCopyCtor(llvm::Value * Dest,llvm::Value * Src,const Expr * Exp)523 CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest,
524 llvm::Value *Src,
525 const Expr *Exp) {
526 if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
527 Exp = E->getSubExpr();
528 assert(isa<CXXConstructExpr>(Exp) &&
529 "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
530 const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);
531 const CXXConstructorDecl *CD = E->getConstructor();
532 RunCleanupsScope Scope(*this);
533
534 // If we require zero initialization before (or instead of) calling the
535 // constructor, as can be the case with a non-user-provided default
536 // constructor, emit the zero initialization now.
537 // FIXME. Do I still need this for a copy ctor synthesis?
538 if (E->requiresZeroInitialization())
539 EmitNullInitialization(Dest, E->getType());
540
541 assert(!getContext().getAsConstantArrayType(E->getType())
542 && "EmitSynthesizedCXXCopyCtor - Copied-in Array");
543 EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src,
544 E->arg_begin(), E->arg_end());
545 }
546
CalculateCookiePadding(CodeGenFunction & CGF,const CXXNewExpr * E)547 static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
548 const CXXNewExpr *E) {
549 if (!E->isArray())
550 return CharUnits::Zero();
551
552 // No cookie is required if the operator new[] being used is the
553 // reserved placement operator new[].
554 if (E->getOperatorNew()->isReservedGlobalPlacementOperator())
555 return CharUnits::Zero();
556
557 return CGF.CGM.getCXXABI().GetArrayCookieSize(E);
558 }
559
EmitCXXNewAllocSize(CodeGenFunction & CGF,const CXXNewExpr * e,unsigned minElements,llvm::Value * & numElements,llvm::Value * & sizeWithoutCookie)560 static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
561 const CXXNewExpr *e,
562 unsigned minElements,
563 llvm::Value *&numElements,
564 llvm::Value *&sizeWithoutCookie) {
565 QualType type = e->getAllocatedType();
566
567 if (!e->isArray()) {
568 CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
569 sizeWithoutCookie
570 = llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity());
571 return sizeWithoutCookie;
572 }
573
574 // The width of size_t.
575 unsigned sizeWidth = CGF.SizeTy->getBitWidth();
576
577 // Figure out the cookie size.
578 llvm::APInt cookieSize(sizeWidth,
579 CalculateCookiePadding(CGF, e).getQuantity());
580
581 // Emit the array size expression.
582 // We multiply the size of all dimensions for NumElements.
583 // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
584 numElements = CGF.EmitScalarExpr(e->getArraySize());
585 assert(isa<llvm::IntegerType>(numElements->getType()));
586
587 // The number of elements can be have an arbitrary integer type;
588 // essentially, we need to multiply it by a constant factor, add a
589 // cookie size, and verify that the result is representable as a
590 // size_t. That's just a gloss, though, and it's wrong in one
591 // important way: if the count is negative, it's an error even if
592 // the cookie size would bring the total size >= 0.
593 bool isSigned
594 = e->getArraySize()->getType()->isSignedIntegerOrEnumerationType();
595 llvm::IntegerType *numElementsType
596 = cast<llvm::IntegerType>(numElements->getType());
597 unsigned numElementsWidth = numElementsType->getBitWidth();
598
599 // Compute the constant factor.
600 llvm::APInt arraySizeMultiplier(sizeWidth, 1);
601 while (const ConstantArrayType *CAT
602 = CGF.getContext().getAsConstantArrayType(type)) {
603 type = CAT->getElementType();
604 arraySizeMultiplier *= CAT->getSize();
605 }
606
607 CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
608 llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity());
609 typeSizeMultiplier *= arraySizeMultiplier;
610
611 // This will be a size_t.
612 llvm::Value *size;
613
614 // If someone is doing 'new int[42]' there is no need to do a dynamic check.
615 // Don't bloat the -O0 code.
616 if (llvm::ConstantInt *numElementsC =
617 dyn_cast<llvm::ConstantInt>(numElements)) {
618 const llvm::APInt &count = numElementsC->getValue();
619
620 bool hasAnyOverflow = false;
621
622 // If 'count' was a negative number, it's an overflow.
623 if (isSigned && count.isNegative())
624 hasAnyOverflow = true;
625
626 // We want to do all this arithmetic in size_t. If numElements is
627 // wider than that, check whether it's already too big, and if so,
628 // overflow.
629 else if (numElementsWidth > sizeWidth &&
630 numElementsWidth - sizeWidth > count.countLeadingZeros())
631 hasAnyOverflow = true;
632
633 // Okay, compute a count at the right width.
634 llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth);
635
636 // If there is a brace-initializer, we cannot allocate fewer elements than
637 // there are initializers. If we do, that's treated like an overflow.
638 if (adjustedCount.ult(minElements))
639 hasAnyOverflow = true;
640
641 // Scale numElements by that. This might overflow, but we don't
642 // care because it only overflows if allocationSize does, too, and
643 // if that overflows then we shouldn't use this.
644 numElements = llvm::ConstantInt::get(CGF.SizeTy,
645 adjustedCount * arraySizeMultiplier);
646
647 // Compute the size before cookie, and track whether it overflowed.
648 bool overflow;
649 llvm::APInt allocationSize
650 = adjustedCount.umul_ov(typeSizeMultiplier, overflow);
651 hasAnyOverflow |= overflow;
652
653 // Add in the cookie, and check whether it's overflowed.
654 if (cookieSize != 0) {
655 // Save the current size without a cookie. This shouldn't be
656 // used if there was overflow.
657 sizeWithoutCookie = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
658
659 allocationSize = allocationSize.uadd_ov(cookieSize, overflow);
660 hasAnyOverflow |= overflow;
661 }
662
663 // On overflow, produce a -1 so operator new will fail.
664 if (hasAnyOverflow) {
665 size = llvm::Constant::getAllOnesValue(CGF.SizeTy);
666 } else {
667 size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
668 }
669
670 // Otherwise, we might need to use the overflow intrinsics.
671 } else {
672 // There are up to five conditions we need to test for:
673 // 1) if isSigned, we need to check whether numElements is negative;
674 // 2) if numElementsWidth > sizeWidth, we need to check whether
675 // numElements is larger than something representable in size_t;
676 // 3) if minElements > 0, we need to check whether numElements is smaller
677 // than that.
678 // 4) we need to compute
679 // sizeWithoutCookie := numElements * typeSizeMultiplier
680 // and check whether it overflows; and
681 // 5) if we need a cookie, we need to compute
682 // size := sizeWithoutCookie + cookieSize
683 // and check whether it overflows.
684
685 llvm::Value *hasOverflow = 0;
686
687 // If numElementsWidth > sizeWidth, then one way or another, we're
688 // going to have to do a comparison for (2), and this happens to
689 // take care of (1), too.
690 if (numElementsWidth > sizeWidth) {
691 llvm::APInt threshold(numElementsWidth, 1);
692 threshold <<= sizeWidth;
693
694 llvm::Value *thresholdV
695 = llvm::ConstantInt::get(numElementsType, threshold);
696
697 hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV);
698 numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy);
699
700 // Otherwise, if we're signed, we want to sext up to size_t.
701 } else if (isSigned) {
702 if (numElementsWidth < sizeWidth)
703 numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy);
704
705 // If there's a non-1 type size multiplier, then we can do the
706 // signedness check at the same time as we do the multiply
707 // because a negative number times anything will cause an
708 // unsigned overflow. Otherwise, we have to do it here. But at least
709 // in this case, we can subsume the >= minElements check.
710 if (typeSizeMultiplier == 1)
711 hasOverflow = CGF.Builder.CreateICmpSLT(numElements,
712 llvm::ConstantInt::get(CGF.SizeTy, minElements));
713
714 // Otherwise, zext up to size_t if necessary.
715 } else if (numElementsWidth < sizeWidth) {
716 numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy);
717 }
718
719 assert(numElements->getType() == CGF.SizeTy);
720
721 if (minElements) {
722 // Don't allow allocation of fewer elements than we have initializers.
723 if (!hasOverflow) {
724 hasOverflow = CGF.Builder.CreateICmpULT(numElements,
725 llvm::ConstantInt::get(CGF.SizeTy, minElements));
726 } else if (numElementsWidth > sizeWidth) {
727 // The other existing overflow subsumes this check.
728 // We do an unsigned comparison, since any signed value < -1 is
729 // taken care of either above or below.
730 hasOverflow = CGF.Builder.CreateOr(hasOverflow,
731 CGF.Builder.CreateICmpULT(numElements,
732 llvm::ConstantInt::get(CGF.SizeTy, minElements)));
733 }
734 }
735
736 size = numElements;
737
738 // Multiply by the type size if necessary. This multiplier
739 // includes all the factors for nested arrays.
740 //
741 // This step also causes numElements to be scaled up by the
742 // nested-array factor if necessary. Overflow on this computation
743 // can be ignored because the result shouldn't be used if
744 // allocation fails.
745 if (typeSizeMultiplier != 1) {
746 llvm::Value *umul_with_overflow
747 = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy);
748
749 llvm::Value *tsmV =
750 llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier);
751 llvm::Value *result =
752 CGF.Builder.CreateCall2(umul_with_overflow, size, tsmV);
753
754 llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
755 if (hasOverflow)
756 hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
757 else
758 hasOverflow = overflowed;
759
760 size = CGF.Builder.CreateExtractValue(result, 0);
761
762 // Also scale up numElements by the array size multiplier.
763 if (arraySizeMultiplier != 1) {
764 // If the base element type size is 1, then we can re-use the
765 // multiply we just did.
766 if (typeSize.isOne()) {
767 assert(arraySizeMultiplier == typeSizeMultiplier);
768 numElements = size;
769
770 // Otherwise we need a separate multiply.
771 } else {
772 llvm::Value *asmV =
773 llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier);
774 numElements = CGF.Builder.CreateMul(numElements, asmV);
775 }
776 }
777 } else {
778 // numElements doesn't need to be scaled.
779 assert(arraySizeMultiplier == 1);
780 }
781
782 // Add in the cookie size if necessary.
783 if (cookieSize != 0) {
784 sizeWithoutCookie = size;
785
786 llvm::Value *uadd_with_overflow
787 = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy);
788
789 llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize);
790 llvm::Value *result =
791 CGF.Builder.CreateCall2(uadd_with_overflow, size, cookieSizeV);
792
793 llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
794 if (hasOverflow)
795 hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
796 else
797 hasOverflow = overflowed;
798
799 size = CGF.Builder.CreateExtractValue(result, 0);
800 }
801
802 // If we had any possibility of dynamic overflow, make a select to
803 // overwrite 'size' with an all-ones value, which should cause
804 // operator new to throw.
805 if (hasOverflow)
806 size = CGF.Builder.CreateSelect(hasOverflow,
807 llvm::Constant::getAllOnesValue(CGF.SizeTy),
808 size);
809 }
810
811 if (cookieSize == 0)
812 sizeWithoutCookie = size;
813 else
814 assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?");
815
816 return size;
817 }
818
StoreAnyExprIntoOneUnit(CodeGenFunction & CGF,const Expr * Init,QualType AllocType,llvm::Value * NewPtr)819 static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
820 QualType AllocType, llvm::Value *NewPtr) {
821
822 CharUnits Alignment = CGF.getContext().getTypeAlignInChars(AllocType);
823 switch (CGF.getEvaluationKind(AllocType)) {
824 case TEK_Scalar:
825 CGF.EmitScalarInit(Init, 0, CGF.MakeAddrLValue(NewPtr, AllocType,
826 Alignment),
827 false);
828 return;
829 case TEK_Complex:
830 CGF.EmitComplexExprIntoLValue(Init, CGF.MakeAddrLValue(NewPtr, AllocType,
831 Alignment),
832 /*isInit*/ true);
833 return;
834 case TEK_Aggregate: {
835 AggValueSlot Slot
836 = AggValueSlot::forAddr(NewPtr, Alignment, AllocType.getQualifiers(),
837 AggValueSlot::IsDestructed,
838 AggValueSlot::DoesNotNeedGCBarriers,
839 AggValueSlot::IsNotAliased);
840 CGF.EmitAggExpr(Init, Slot);
841
842 CGF.MaybeEmitStdInitializerListCleanup(NewPtr, Init);
843 return;
844 }
845 }
846 llvm_unreachable("bad evaluation kind");
847 }
848
849 void
EmitNewArrayInitializer(const CXXNewExpr * E,QualType elementType,llvm::Value * beginPtr,llvm::Value * numElements)850 CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
851 QualType elementType,
852 llvm::Value *beginPtr,
853 llvm::Value *numElements) {
854 if (!E->hasInitializer())
855 return; // We have a POD type.
856
857 llvm::Value *explicitPtr = beginPtr;
858 // Find the end of the array, hoisted out of the loop.
859 llvm::Value *endPtr =
860 Builder.CreateInBoundsGEP(beginPtr, numElements, "array.end");
861
862 unsigned initializerElements = 0;
863
864 const Expr *Init = E->getInitializer();
865 llvm::AllocaInst *endOfInit = 0;
866 QualType::DestructionKind dtorKind = elementType.isDestructedType();
867 EHScopeStack::stable_iterator cleanup;
868 llvm::Instruction *cleanupDominator = 0;
869 // If the initializer is an initializer list, first do the explicit elements.
870 if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
871 initializerElements = ILE->getNumInits();
872
873 // Enter a partial-destruction cleanup if necessary.
874 if (needsEHCleanup(dtorKind)) {
875 // In principle we could tell the cleanup where we are more
876 // directly, but the control flow can get so varied here that it
877 // would actually be quite complex. Therefore we go through an
878 // alloca.
879 endOfInit = CreateTempAlloca(beginPtr->getType(), "array.endOfInit");
880 cleanupDominator = Builder.CreateStore(beginPtr, endOfInit);
881 pushIrregularPartialArrayCleanup(beginPtr, endOfInit, elementType,
882 getDestroyer(dtorKind));
883 cleanup = EHStack.stable_begin();
884 }
885
886 for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) {
887 // Tell the cleanup that it needs to destroy up to this
888 // element. TODO: some of these stores can be trivially
889 // observed to be unnecessary.
890 if (endOfInit) Builder.CreateStore(explicitPtr, endOfInit);
891 StoreAnyExprIntoOneUnit(*this, ILE->getInit(i), elementType, explicitPtr);
892 explicitPtr =Builder.CreateConstGEP1_32(explicitPtr, 1, "array.exp.next");
893 }
894
895 // The remaining elements are filled with the array filler expression.
896 Init = ILE->getArrayFiller();
897 }
898
899 // Create the continuation block.
900 llvm::BasicBlock *contBB = createBasicBlock("new.loop.end");
901
902 // If the number of elements isn't constant, we have to now check if there is
903 // anything left to initialize.
904 if (llvm::ConstantInt *constNum = dyn_cast<llvm::ConstantInt>(numElements)) {
905 // If all elements have already been initialized, skip the whole loop.
906 if (constNum->getZExtValue() <= initializerElements) {
907 // If there was a cleanup, deactivate it.
908 if (cleanupDominator)
909 DeactivateCleanupBlock(cleanup, cleanupDominator);
910 return;
911 }
912 } else {
913 llvm::BasicBlock *nonEmptyBB = createBasicBlock("new.loop.nonempty");
914 llvm::Value *isEmpty = Builder.CreateICmpEQ(explicitPtr, endPtr,
915 "array.isempty");
916 Builder.CreateCondBr(isEmpty, contBB, nonEmptyBB);
917 EmitBlock(nonEmptyBB);
918 }
919
920 // Enter the loop.
921 llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
922 llvm::BasicBlock *loopBB = createBasicBlock("new.loop");
923
924 EmitBlock(loopBB);
925
926 // Set up the current-element phi.
927 llvm::PHINode *curPtr =
928 Builder.CreatePHI(explicitPtr->getType(), 2, "array.cur");
929 curPtr->addIncoming(explicitPtr, entryBB);
930
931 // Store the new cleanup position for irregular cleanups.
932 if (endOfInit) Builder.CreateStore(curPtr, endOfInit);
933
934 // Enter a partial-destruction cleanup if necessary.
935 if (!cleanupDominator && needsEHCleanup(dtorKind)) {
936 pushRegularPartialArrayCleanup(beginPtr, curPtr, elementType,
937 getDestroyer(dtorKind));
938 cleanup = EHStack.stable_begin();
939 cleanupDominator = Builder.CreateUnreachable();
940 }
941
942 // Emit the initializer into this element.
943 StoreAnyExprIntoOneUnit(*this, Init, E->getAllocatedType(), curPtr);
944
945 // Leave the cleanup if we entered one.
946 if (cleanupDominator) {
947 DeactivateCleanupBlock(cleanup, cleanupDominator);
948 cleanupDominator->eraseFromParent();
949 }
950
951 // Advance to the next element.
952 llvm::Value *nextPtr = Builder.CreateConstGEP1_32(curPtr, 1, "array.next");
953
954 // Check whether we've gotten to the end of the array and, if so,
955 // exit the loop.
956 llvm::Value *isEnd = Builder.CreateICmpEQ(nextPtr, endPtr, "array.atend");
957 Builder.CreateCondBr(isEnd, contBB, loopBB);
958 curPtr->addIncoming(nextPtr, Builder.GetInsertBlock());
959
960 EmitBlock(contBB);
961 }
962
EmitZeroMemSet(CodeGenFunction & CGF,QualType T,llvm::Value * NewPtr,llvm::Value * Size)963 static void EmitZeroMemSet(CodeGenFunction &CGF, QualType T,
964 llvm::Value *NewPtr, llvm::Value *Size) {
965 CGF.EmitCastToVoidPtr(NewPtr);
966 CharUnits Alignment = CGF.getContext().getTypeAlignInChars(T);
967 CGF.Builder.CreateMemSet(NewPtr, CGF.Builder.getInt8(0), Size,
968 Alignment.getQuantity(), false);
969 }
970
EmitNewInitializer(CodeGenFunction & CGF,const CXXNewExpr * E,QualType ElementType,llvm::Value * NewPtr,llvm::Value * NumElements,llvm::Value * AllocSizeWithoutCookie)971 static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
972 QualType ElementType,
973 llvm::Value *NewPtr,
974 llvm::Value *NumElements,
975 llvm::Value *AllocSizeWithoutCookie) {
976 const Expr *Init = E->getInitializer();
977 if (E->isArray()) {
978 if (const CXXConstructExpr *CCE = dyn_cast_or_null<CXXConstructExpr>(Init)){
979 CXXConstructorDecl *Ctor = CCE->getConstructor();
980 if (Ctor->isTrivial()) {
981 // If new expression did not specify value-initialization, then there
982 // is no initialization.
983 if (!CCE->requiresZeroInitialization() || Ctor->getParent()->isEmpty())
984 return;
985
986 if (CGF.CGM.getTypes().isZeroInitializable(ElementType)) {
987 // Optimization: since zero initialization will just set the memory
988 // to all zeroes, generate a single memset to do it in one shot.
989 EmitZeroMemSet(CGF, ElementType, NewPtr, AllocSizeWithoutCookie);
990 return;
991 }
992 }
993
994 CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr,
995 CCE->arg_begin(), CCE->arg_end(),
996 CCE->requiresZeroInitialization());
997 return;
998 } else if (Init && isa<ImplicitValueInitExpr>(Init) &&
999 CGF.CGM.getTypes().isZeroInitializable(ElementType)) {
1000 // Optimization: since zero initialization will just set the memory
1001 // to all zeroes, generate a single memset to do it in one shot.
1002 EmitZeroMemSet(CGF, ElementType, NewPtr, AllocSizeWithoutCookie);
1003 return;
1004 }
1005 CGF.EmitNewArrayInitializer(E, ElementType, NewPtr, NumElements);
1006 return;
1007 }
1008
1009 if (!Init)
1010 return;
1011
1012 StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr);
1013 }
1014
1015 namespace {
1016 /// A cleanup to call the given 'operator delete' function upon
1017 /// abnormal exit from a new expression.
1018 class CallDeleteDuringNew : public EHScopeStack::Cleanup {
1019 size_t NumPlacementArgs;
1020 const FunctionDecl *OperatorDelete;
1021 llvm::Value *Ptr;
1022 llvm::Value *AllocSize;
1023
getPlacementArgs()1024 RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); }
1025
1026 public:
getExtraSize(size_t NumPlacementArgs)1027 static size_t getExtraSize(size_t NumPlacementArgs) {
1028 return NumPlacementArgs * sizeof(RValue);
1029 }
1030
CallDeleteDuringNew(size_t NumPlacementArgs,const FunctionDecl * OperatorDelete,llvm::Value * Ptr,llvm::Value * AllocSize)1031 CallDeleteDuringNew(size_t NumPlacementArgs,
1032 const FunctionDecl *OperatorDelete,
1033 llvm::Value *Ptr,
1034 llvm::Value *AllocSize)
1035 : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
1036 Ptr(Ptr), AllocSize(AllocSize) {}
1037
setPlacementArg(unsigned I,RValue Arg)1038 void setPlacementArg(unsigned I, RValue Arg) {
1039 assert(I < NumPlacementArgs && "index out of range");
1040 getPlacementArgs()[I] = Arg;
1041 }
1042
Emit(CodeGenFunction & CGF,Flags flags)1043 void Emit(CodeGenFunction &CGF, Flags flags) {
1044 const FunctionProtoType *FPT
1045 = OperatorDelete->getType()->getAs<FunctionProtoType>();
1046 assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
1047 (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
1048
1049 CallArgList DeleteArgs;
1050
1051 // The first argument is always a void*.
1052 FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
1053 DeleteArgs.add(RValue::get(Ptr), *AI++);
1054
1055 // A member 'operator delete' can take an extra 'size_t' argument.
1056 if (FPT->getNumArgs() == NumPlacementArgs + 2)
1057 DeleteArgs.add(RValue::get(AllocSize), *AI++);
1058
1059 // Pass the rest of the arguments, which must match exactly.
1060 for (unsigned I = 0; I != NumPlacementArgs; ++I)
1061 DeleteArgs.add(getPlacementArgs()[I], *AI++);
1062
1063 // Call 'operator delete'.
1064 CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(DeleteArgs, FPT),
1065 CGF.CGM.GetAddrOfFunction(OperatorDelete),
1066 ReturnValueSlot(), DeleteArgs, OperatorDelete);
1067 }
1068 };
1069
1070 /// A cleanup to call the given 'operator delete' function upon
1071 /// abnormal exit from a new expression when the new expression is
1072 /// conditional.
1073 class CallDeleteDuringConditionalNew : public EHScopeStack::Cleanup {
1074 size_t NumPlacementArgs;
1075 const FunctionDecl *OperatorDelete;
1076 DominatingValue<RValue>::saved_type Ptr;
1077 DominatingValue<RValue>::saved_type AllocSize;
1078
getPlacementArgs()1079 DominatingValue<RValue>::saved_type *getPlacementArgs() {
1080 return reinterpret_cast<DominatingValue<RValue>::saved_type*>(this+1);
1081 }
1082
1083 public:
getExtraSize(size_t NumPlacementArgs)1084 static size_t getExtraSize(size_t NumPlacementArgs) {
1085 return NumPlacementArgs * sizeof(DominatingValue<RValue>::saved_type);
1086 }
1087
CallDeleteDuringConditionalNew(size_t NumPlacementArgs,const FunctionDecl * OperatorDelete,DominatingValue<RValue>::saved_type Ptr,DominatingValue<RValue>::saved_type AllocSize)1088 CallDeleteDuringConditionalNew(size_t NumPlacementArgs,
1089 const FunctionDecl *OperatorDelete,
1090 DominatingValue<RValue>::saved_type Ptr,
1091 DominatingValue<RValue>::saved_type AllocSize)
1092 : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
1093 Ptr(Ptr), AllocSize(AllocSize) {}
1094
setPlacementArg(unsigned I,DominatingValue<RValue>::saved_type Arg)1095 void setPlacementArg(unsigned I, DominatingValue<RValue>::saved_type Arg) {
1096 assert(I < NumPlacementArgs && "index out of range");
1097 getPlacementArgs()[I] = Arg;
1098 }
1099
Emit(CodeGenFunction & CGF,Flags flags)1100 void Emit(CodeGenFunction &CGF, Flags flags) {
1101 const FunctionProtoType *FPT
1102 = OperatorDelete->getType()->getAs<FunctionProtoType>();
1103 assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
1104 (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
1105
1106 CallArgList DeleteArgs;
1107
1108 // The first argument is always a void*.
1109 FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
1110 DeleteArgs.add(Ptr.restore(CGF), *AI++);
1111
1112 // A member 'operator delete' can take an extra 'size_t' argument.
1113 if (FPT->getNumArgs() == NumPlacementArgs + 2) {
1114 RValue RV = AllocSize.restore(CGF);
1115 DeleteArgs.add(RV, *AI++);
1116 }
1117
1118 // Pass the rest of the arguments, which must match exactly.
1119 for (unsigned I = 0; I != NumPlacementArgs; ++I) {
1120 RValue RV = getPlacementArgs()[I].restore(CGF);
1121 DeleteArgs.add(RV, *AI++);
1122 }
1123
1124 // Call 'operator delete'.
1125 CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(DeleteArgs, FPT),
1126 CGF.CGM.GetAddrOfFunction(OperatorDelete),
1127 ReturnValueSlot(), DeleteArgs, OperatorDelete);
1128 }
1129 };
1130 }
1131
1132 /// Enter a cleanup to call 'operator delete' if the initializer in a
1133 /// new-expression throws.
EnterNewDeleteCleanup(CodeGenFunction & CGF,const CXXNewExpr * E,llvm::Value * NewPtr,llvm::Value * AllocSize,const CallArgList & NewArgs)1134 static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
1135 const CXXNewExpr *E,
1136 llvm::Value *NewPtr,
1137 llvm::Value *AllocSize,
1138 const CallArgList &NewArgs) {
1139 // If we're not inside a conditional branch, then the cleanup will
1140 // dominate and we can do the easier (and more efficient) thing.
1141 if (!CGF.isInConditionalBranch()) {
1142 CallDeleteDuringNew *Cleanup = CGF.EHStack
1143 .pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup,
1144 E->getNumPlacementArgs(),
1145 E->getOperatorDelete(),
1146 NewPtr, AllocSize);
1147 for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
1148 Cleanup->setPlacementArg(I, NewArgs[I+1].RV);
1149
1150 return;
1151 }
1152
1153 // Otherwise, we need to save all this stuff.
1154 DominatingValue<RValue>::saved_type SavedNewPtr =
1155 DominatingValue<RValue>::save(CGF, RValue::get(NewPtr));
1156 DominatingValue<RValue>::saved_type SavedAllocSize =
1157 DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
1158
1159 CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack
1160 .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(EHCleanup,
1161 E->getNumPlacementArgs(),
1162 E->getOperatorDelete(),
1163 SavedNewPtr,
1164 SavedAllocSize);
1165 for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
1166 Cleanup->setPlacementArg(I,
1167 DominatingValue<RValue>::save(CGF, NewArgs[I+1].RV));
1168
1169 CGF.initFullExprCleanup();
1170 }
1171
EmitCXXNewExpr(const CXXNewExpr * E)1172 llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
1173 // The element type being allocated.
1174 QualType allocType = getContext().getBaseElementType(E->getAllocatedType());
1175
1176 // 1. Build a call to the allocation function.
1177 FunctionDecl *allocator = E->getOperatorNew();
1178 const FunctionProtoType *allocatorType =
1179 allocator->getType()->castAs<FunctionProtoType>();
1180
1181 CallArgList allocatorArgs;
1182
1183 // The allocation size is the first argument.
1184 QualType sizeType = getContext().getSizeType();
1185
1186 // If there is a brace-initializer, cannot allocate fewer elements than inits.
1187 unsigned minElements = 0;
1188 if (E->isArray() && E->hasInitializer()) {
1189 if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E->getInitializer()))
1190 minElements = ILE->getNumInits();
1191 }
1192
1193 llvm::Value *numElements = 0;
1194 llvm::Value *allocSizeWithoutCookie = 0;
1195 llvm::Value *allocSize =
1196 EmitCXXNewAllocSize(*this, E, minElements, numElements,
1197 allocSizeWithoutCookie);
1198
1199 allocatorArgs.add(RValue::get(allocSize), sizeType);
1200
1201 // Emit the rest of the arguments.
1202 // FIXME: Ideally, this should just use EmitCallArgs.
1203 CXXNewExpr::const_arg_iterator placementArg = E->placement_arg_begin();
1204
1205 // First, use the types from the function type.
1206 // We start at 1 here because the first argument (the allocation size)
1207 // has already been emitted.
1208 for (unsigned i = 1, e = allocatorType->getNumArgs(); i != e;
1209 ++i, ++placementArg) {
1210 QualType argType = allocatorType->getArgType(i);
1211
1212 assert(getContext().hasSameUnqualifiedType(argType.getNonReferenceType(),
1213 placementArg->getType()) &&
1214 "type mismatch in call argument!");
1215
1216 EmitCallArg(allocatorArgs, *placementArg, argType);
1217 }
1218
1219 // Either we've emitted all the call args, or we have a call to a
1220 // variadic function.
1221 assert((placementArg == E->placement_arg_end() ||
1222 allocatorType->isVariadic()) &&
1223 "Extra arguments to non-variadic function!");
1224
1225 // If we still have any arguments, emit them using the type of the argument.
1226 for (CXXNewExpr::const_arg_iterator placementArgsEnd = E->placement_arg_end();
1227 placementArg != placementArgsEnd; ++placementArg) {
1228 EmitCallArg(allocatorArgs, *placementArg, placementArg->getType());
1229 }
1230
1231 // Emit the allocation call. If the allocator is a global placement
1232 // operator, just "inline" it directly.
1233 RValue RV;
1234 if (allocator->isReservedGlobalPlacementOperator()) {
1235 assert(allocatorArgs.size() == 2);
1236 RV = allocatorArgs[1].RV;
1237 // TODO: kill any unnecessary computations done for the size
1238 // argument.
1239 } else {
1240 RV = EmitCall(CGM.getTypes().arrangeFreeFunctionCall(allocatorArgs,
1241 allocatorType),
1242 CGM.GetAddrOfFunction(allocator), ReturnValueSlot(),
1243 allocatorArgs, allocator);
1244 }
1245
1246 // Emit a null check on the allocation result if the allocation
1247 // function is allowed to return null (because it has a non-throwing
1248 // exception spec; for this part, we inline
1249 // CXXNewExpr::shouldNullCheckAllocation()) and we have an
1250 // interesting initializer.
1251 bool nullCheck = allocatorType->isNothrow(getContext()) &&
1252 (!allocType.isPODType(getContext()) || E->hasInitializer());
1253
1254 llvm::BasicBlock *nullCheckBB = 0;
1255 llvm::BasicBlock *contBB = 0;
1256
1257 llvm::Value *allocation = RV.getScalarVal();
1258 unsigned AS = allocation->getType()->getPointerAddressSpace();
1259
1260 // The null-check means that the initializer is conditionally
1261 // evaluated.
1262 ConditionalEvaluation conditional(*this);
1263
1264 if (nullCheck) {
1265 conditional.begin(*this);
1266
1267 nullCheckBB = Builder.GetInsertBlock();
1268 llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull");
1269 contBB = createBasicBlock("new.cont");
1270
1271 llvm::Value *isNull = Builder.CreateIsNull(allocation, "new.isnull");
1272 Builder.CreateCondBr(isNull, contBB, notNullBB);
1273 EmitBlock(notNullBB);
1274 }
1275
1276 // If there's an operator delete, enter a cleanup to call it if an
1277 // exception is thrown.
1278 EHScopeStack::stable_iterator operatorDeleteCleanup;
1279 llvm::Instruction *cleanupDominator = 0;
1280 if (E->getOperatorDelete() &&
1281 !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
1282 EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocatorArgs);
1283 operatorDeleteCleanup = EHStack.stable_begin();
1284 cleanupDominator = Builder.CreateUnreachable();
1285 }
1286
1287 assert((allocSize == allocSizeWithoutCookie) ==
1288 CalculateCookiePadding(*this, E).isZero());
1289 if (allocSize != allocSizeWithoutCookie) {
1290 assert(E->isArray());
1291 allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation,
1292 numElements,
1293 E, allocType);
1294 }
1295
1296 llvm::Type *elementPtrTy
1297 = ConvertTypeForMem(allocType)->getPointerTo(AS);
1298 llvm::Value *result = Builder.CreateBitCast(allocation, elementPtrTy);
1299
1300 EmitNewInitializer(*this, E, allocType, result, numElements,
1301 allocSizeWithoutCookie);
1302 if (E->isArray()) {
1303 // NewPtr is a pointer to the base element type. If we're
1304 // allocating an array of arrays, we'll need to cast back to the
1305 // array pointer type.
1306 llvm::Type *resultType = ConvertTypeForMem(E->getType());
1307 if (result->getType() != resultType)
1308 result = Builder.CreateBitCast(result, resultType);
1309 }
1310
1311 // Deactivate the 'operator delete' cleanup if we finished
1312 // initialization.
1313 if (operatorDeleteCleanup.isValid()) {
1314 DeactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator);
1315 cleanupDominator->eraseFromParent();
1316 }
1317
1318 if (nullCheck) {
1319 conditional.end(*this);
1320
1321 llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
1322 EmitBlock(contBB);
1323
1324 llvm::PHINode *PHI = Builder.CreatePHI(result->getType(), 2);
1325 PHI->addIncoming(result, notNullBB);
1326 PHI->addIncoming(llvm::Constant::getNullValue(result->getType()),
1327 nullCheckBB);
1328
1329 result = PHI;
1330 }
1331
1332 return result;
1333 }
1334
EmitDeleteCall(const FunctionDecl * DeleteFD,llvm::Value * Ptr,QualType DeleteTy)1335 void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
1336 llvm::Value *Ptr,
1337 QualType DeleteTy) {
1338 assert(DeleteFD->getOverloadedOperator() == OO_Delete);
1339
1340 const FunctionProtoType *DeleteFTy =
1341 DeleteFD->getType()->getAs<FunctionProtoType>();
1342
1343 CallArgList DeleteArgs;
1344
1345 // Check if we need to pass the size to the delete operator.
1346 llvm::Value *Size = 0;
1347 QualType SizeTy;
1348 if (DeleteFTy->getNumArgs() == 2) {
1349 SizeTy = DeleteFTy->getArgType(1);
1350 CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
1351 Size = llvm::ConstantInt::get(ConvertType(SizeTy),
1352 DeleteTypeSize.getQuantity());
1353 }
1354
1355 QualType ArgTy = DeleteFTy->getArgType(0);
1356 llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
1357 DeleteArgs.add(RValue::get(DeletePtr), ArgTy);
1358
1359 if (Size)
1360 DeleteArgs.add(RValue::get(Size), SizeTy);
1361
1362 // Emit the call to delete.
1363 EmitCall(CGM.getTypes().arrangeFreeFunctionCall(DeleteArgs, DeleteFTy),
1364 CGM.GetAddrOfFunction(DeleteFD), ReturnValueSlot(),
1365 DeleteArgs, DeleteFD);
1366 }
1367
1368 namespace {
1369 /// Calls the given 'operator delete' on a single object.
1370 struct CallObjectDelete : EHScopeStack::Cleanup {
1371 llvm::Value *Ptr;
1372 const FunctionDecl *OperatorDelete;
1373 QualType ElementType;
1374
CallObjectDelete__anon0cce50c20211::CallObjectDelete1375 CallObjectDelete(llvm::Value *Ptr,
1376 const FunctionDecl *OperatorDelete,
1377 QualType ElementType)
1378 : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
1379
Emit__anon0cce50c20211::CallObjectDelete1380 void Emit(CodeGenFunction &CGF, Flags flags) {
1381 CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
1382 }
1383 };
1384 }
1385
1386 /// Emit the code for deleting a single object.
EmitObjectDelete(CodeGenFunction & CGF,const FunctionDecl * OperatorDelete,llvm::Value * Ptr,QualType ElementType,bool UseGlobalDelete)1387 static void EmitObjectDelete(CodeGenFunction &CGF,
1388 const FunctionDecl *OperatorDelete,
1389 llvm::Value *Ptr,
1390 QualType ElementType,
1391 bool UseGlobalDelete) {
1392 // Find the destructor for the type, if applicable. If the
1393 // destructor is virtual, we'll just emit the vcall and return.
1394 const CXXDestructorDecl *Dtor = 0;
1395 if (const RecordType *RT = ElementType->getAs<RecordType>()) {
1396 CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1397 if (RD->hasDefinition() && !RD->hasTrivialDestructor()) {
1398 Dtor = RD->getDestructor();
1399
1400 if (Dtor->isVirtual()) {
1401 if (UseGlobalDelete) {
1402 // If we're supposed to call the global delete, make sure we do so
1403 // even if the destructor throws.
1404
1405 // Derive the complete-object pointer, which is what we need
1406 // to pass to the deallocation function.
1407 llvm::Value *completePtr =
1408 CGF.CGM.getCXXABI().adjustToCompleteObject(CGF, Ptr, ElementType);
1409
1410 CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1411 completePtr, OperatorDelete,
1412 ElementType);
1413 }
1414
1415 // FIXME: Provide a source location here.
1416 CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
1417 CGF.CGM.getCXXABI().EmitVirtualDestructorCall(CGF, Dtor, DtorType,
1418 SourceLocation(),
1419 ReturnValueSlot(), Ptr);
1420
1421 if (UseGlobalDelete) {
1422 CGF.PopCleanupBlock();
1423 }
1424
1425 return;
1426 }
1427 }
1428 }
1429
1430 // Make sure that we call delete even if the dtor throws.
1431 // This doesn't have to a conditional cleanup because we're going
1432 // to pop it off in a second.
1433 CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1434 Ptr, OperatorDelete, ElementType);
1435
1436 if (Dtor)
1437 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
1438 /*ForVirtualBase=*/false,
1439 /*Delegating=*/false,
1440 Ptr);
1441 else if (CGF.getLangOpts().ObjCAutoRefCount &&
1442 ElementType->isObjCLifetimeType()) {
1443 switch (ElementType.getObjCLifetime()) {
1444 case Qualifiers::OCL_None:
1445 case Qualifiers::OCL_ExplicitNone:
1446 case Qualifiers::OCL_Autoreleasing:
1447 break;
1448
1449 case Qualifiers::OCL_Strong: {
1450 // Load the pointer value.
1451 llvm::Value *PtrValue = CGF.Builder.CreateLoad(Ptr,
1452 ElementType.isVolatileQualified());
1453
1454 CGF.EmitARCRelease(PtrValue, ARCPreciseLifetime);
1455 break;
1456 }
1457
1458 case Qualifiers::OCL_Weak:
1459 CGF.EmitARCDestroyWeak(Ptr);
1460 break;
1461 }
1462 }
1463
1464 CGF.PopCleanupBlock();
1465 }
1466
1467 namespace {
1468 /// Calls the given 'operator delete' on an array of objects.
1469 struct CallArrayDelete : EHScopeStack::Cleanup {
1470 llvm::Value *Ptr;
1471 const FunctionDecl *OperatorDelete;
1472 llvm::Value *NumElements;
1473 QualType ElementType;
1474 CharUnits CookieSize;
1475
CallArrayDelete__anon0cce50c20311::CallArrayDelete1476 CallArrayDelete(llvm::Value *Ptr,
1477 const FunctionDecl *OperatorDelete,
1478 llvm::Value *NumElements,
1479 QualType ElementType,
1480 CharUnits CookieSize)
1481 : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
1482 ElementType(ElementType), CookieSize(CookieSize) {}
1483
Emit__anon0cce50c20311::CallArrayDelete1484 void Emit(CodeGenFunction &CGF, Flags flags) {
1485 const FunctionProtoType *DeleteFTy =
1486 OperatorDelete->getType()->getAs<FunctionProtoType>();
1487 assert(DeleteFTy->getNumArgs() == 1 || DeleteFTy->getNumArgs() == 2);
1488
1489 CallArgList Args;
1490
1491 // Pass the pointer as the first argument.
1492 QualType VoidPtrTy = DeleteFTy->getArgType(0);
1493 llvm::Value *DeletePtr
1494 = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy));
1495 Args.add(RValue::get(DeletePtr), VoidPtrTy);
1496
1497 // Pass the original requested size as the second argument.
1498 if (DeleteFTy->getNumArgs() == 2) {
1499 QualType size_t = DeleteFTy->getArgType(1);
1500 llvm::IntegerType *SizeTy
1501 = cast<llvm::IntegerType>(CGF.ConvertType(size_t));
1502
1503 CharUnits ElementTypeSize =
1504 CGF.CGM.getContext().getTypeSizeInChars(ElementType);
1505
1506 // The size of an element, multiplied by the number of elements.
1507 llvm::Value *Size
1508 = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity());
1509 Size = CGF.Builder.CreateMul(Size, NumElements);
1510
1511 // Plus the size of the cookie if applicable.
1512 if (!CookieSize.isZero()) {
1513 llvm::Value *CookieSizeV
1514 = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
1515 Size = CGF.Builder.CreateAdd(Size, CookieSizeV);
1516 }
1517
1518 Args.add(RValue::get(Size), size_t);
1519 }
1520
1521 // Emit the call to delete.
1522 CGF.EmitCall(CGF.getTypes().arrangeFreeFunctionCall(Args, DeleteFTy),
1523 CGF.CGM.GetAddrOfFunction(OperatorDelete),
1524 ReturnValueSlot(), Args, OperatorDelete);
1525 }
1526 };
1527 }
1528
1529 /// Emit the code for deleting an array of objects.
EmitArrayDelete(CodeGenFunction & CGF,const CXXDeleteExpr * E,llvm::Value * deletedPtr,QualType elementType)1530 static void EmitArrayDelete(CodeGenFunction &CGF,
1531 const CXXDeleteExpr *E,
1532 llvm::Value *deletedPtr,
1533 QualType elementType) {
1534 llvm::Value *numElements = 0;
1535 llvm::Value *allocatedPtr = 0;
1536 CharUnits cookieSize;
1537 CGF.CGM.getCXXABI().ReadArrayCookie(CGF, deletedPtr, E, elementType,
1538 numElements, allocatedPtr, cookieSize);
1539
1540 assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer");
1541
1542 // Make sure that we call delete even if one of the dtors throws.
1543 const FunctionDecl *operatorDelete = E->getOperatorDelete();
1544 CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
1545 allocatedPtr, operatorDelete,
1546 numElements, elementType,
1547 cookieSize);
1548
1549 // Destroy the elements.
1550 if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) {
1551 assert(numElements && "no element count for a type with a destructor!");
1552
1553 llvm::Value *arrayEnd =
1554 CGF.Builder.CreateInBoundsGEP(deletedPtr, numElements, "delete.end");
1555
1556 // Note that it is legal to allocate a zero-length array, and we
1557 // can never fold the check away because the length should always
1558 // come from a cookie.
1559 CGF.emitArrayDestroy(deletedPtr, arrayEnd, elementType,
1560 CGF.getDestroyer(dtorKind),
1561 /*checkZeroLength*/ true,
1562 CGF.needsEHCleanup(dtorKind));
1563 }
1564
1565 // Pop the cleanup block.
1566 CGF.PopCleanupBlock();
1567 }
1568
EmitCXXDeleteExpr(const CXXDeleteExpr * E)1569 void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
1570 const Expr *Arg = E->getArgument();
1571 llvm::Value *Ptr = EmitScalarExpr(Arg);
1572
1573 // Null check the pointer.
1574 llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
1575 llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
1576
1577 llvm::Value *IsNull = Builder.CreateIsNull(Ptr, "isnull");
1578
1579 Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
1580 EmitBlock(DeleteNotNull);
1581
1582 // We might be deleting a pointer to array. If so, GEP down to the
1583 // first non-array element.
1584 // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
1585 QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
1586 if (DeleteTy->isConstantArrayType()) {
1587 llvm::Value *Zero = Builder.getInt32(0);
1588 SmallVector<llvm::Value*,8> GEP;
1589
1590 GEP.push_back(Zero); // point at the outermost array
1591
1592 // For each layer of array type we're pointing at:
1593 while (const ConstantArrayType *Arr
1594 = getContext().getAsConstantArrayType(DeleteTy)) {
1595 // 1. Unpeel the array type.
1596 DeleteTy = Arr->getElementType();
1597
1598 // 2. GEP to the first element of the array.
1599 GEP.push_back(Zero);
1600 }
1601
1602 Ptr = Builder.CreateInBoundsGEP(Ptr, GEP, "del.first");
1603 }
1604
1605 assert(ConvertTypeForMem(DeleteTy) ==
1606 cast<llvm::PointerType>(Ptr->getType())->getElementType());
1607
1608 if (E->isArrayForm()) {
1609 EmitArrayDelete(*this, E, Ptr, DeleteTy);
1610 } else {
1611 EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy,
1612 E->isGlobalDelete());
1613 }
1614
1615 EmitBlock(DeleteEnd);
1616 }
1617
getBadTypeidFn(CodeGenFunction & CGF)1618 static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) {
1619 // void __cxa_bad_typeid();
1620 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1621
1622 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1623 }
1624
EmitBadTypeidCall(CodeGenFunction & CGF)1625 static void EmitBadTypeidCall(CodeGenFunction &CGF) {
1626 llvm::Value *Fn = getBadTypeidFn(CGF);
1627 CGF.EmitRuntimeCallOrInvoke(Fn).setDoesNotReturn();
1628 CGF.Builder.CreateUnreachable();
1629 }
1630
EmitTypeidFromVTable(CodeGenFunction & CGF,const Expr * E,llvm::Type * StdTypeInfoPtrTy)1631 static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF,
1632 const Expr *E,
1633 llvm::Type *StdTypeInfoPtrTy) {
1634 // Get the vtable pointer.
1635 llvm::Value *ThisPtr = CGF.EmitLValue(E).getAddress();
1636
1637 // C++ [expr.typeid]p2:
1638 // If the glvalue expression is obtained by applying the unary * operator to
1639 // a pointer and the pointer is a null pointer value, the typeid expression
1640 // throws the std::bad_typeid exception.
1641 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E->IgnoreParens())) {
1642 if (UO->getOpcode() == UO_Deref) {
1643 llvm::BasicBlock *BadTypeidBlock =
1644 CGF.createBasicBlock("typeid.bad_typeid");
1645 llvm::BasicBlock *EndBlock =
1646 CGF.createBasicBlock("typeid.end");
1647
1648 llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr);
1649 CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock);
1650
1651 CGF.EmitBlock(BadTypeidBlock);
1652 EmitBadTypeidCall(CGF);
1653 CGF.EmitBlock(EndBlock);
1654 }
1655 }
1656
1657 llvm::Value *Value = CGF.GetVTablePtr(ThisPtr,
1658 StdTypeInfoPtrTy->getPointerTo());
1659
1660 // Load the type info.
1661 Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
1662 return CGF.Builder.CreateLoad(Value);
1663 }
1664
EmitCXXTypeidExpr(const CXXTypeidExpr * E)1665 llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
1666 llvm::Type *StdTypeInfoPtrTy =
1667 ConvertType(E->getType())->getPointerTo();
1668
1669 if (E->isTypeOperand()) {
1670 llvm::Constant *TypeInfo =
1671 CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand());
1672 return Builder.CreateBitCast(TypeInfo, StdTypeInfoPtrTy);
1673 }
1674
1675 // C++ [expr.typeid]p2:
1676 // When typeid is applied to a glvalue expression whose type is a
1677 // polymorphic class type, the result refers to a std::type_info object
1678 // representing the type of the most derived object (that is, the dynamic
1679 // type) to which the glvalue refers.
1680 if (E->isPotentiallyEvaluated())
1681 return EmitTypeidFromVTable(*this, E->getExprOperand(),
1682 StdTypeInfoPtrTy);
1683
1684 QualType OperandTy = E->getExprOperand()->getType();
1685 return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy),
1686 StdTypeInfoPtrTy);
1687 }
1688
getDynamicCastFn(CodeGenFunction & CGF)1689 static llvm::Constant *getDynamicCastFn(CodeGenFunction &CGF) {
1690 // void *__dynamic_cast(const void *sub,
1691 // const abi::__class_type_info *src,
1692 // const abi::__class_type_info *dst,
1693 // std::ptrdiff_t src2dst_offset);
1694
1695 llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1696 llvm::Type *PtrDiffTy =
1697 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1698
1699 llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
1700
1701 llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
1702
1703 // Mark the function as nounwind readonly.
1704 llvm::Attribute::AttrKind FuncAttrs[] = { llvm::Attribute::NoUnwind,
1705 llvm::Attribute::ReadOnly };
1706 llvm::AttributeSet Attrs = llvm::AttributeSet::get(
1707 CGF.getLLVMContext(), llvm::AttributeSet::FunctionIndex, FuncAttrs);
1708
1709 return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs);
1710 }
1711
getBadCastFn(CodeGenFunction & CGF)1712 static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) {
1713 // void __cxa_bad_cast();
1714 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1715 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1716 }
1717
EmitBadCastCall(CodeGenFunction & CGF)1718 static void EmitBadCastCall(CodeGenFunction &CGF) {
1719 llvm::Value *Fn = getBadCastFn(CGF);
1720 CGF.EmitRuntimeCallOrInvoke(Fn).setDoesNotReturn();
1721 CGF.Builder.CreateUnreachable();
1722 }
1723
1724 /// \brief Compute the src2dst_offset hint as described in the
1725 /// Itanium C++ ABI [2.9.7]
computeOffsetHint(ASTContext & Context,const CXXRecordDecl * Src,const CXXRecordDecl * Dst)1726 static CharUnits computeOffsetHint(ASTContext &Context,
1727 const CXXRecordDecl *Src,
1728 const CXXRecordDecl *Dst) {
1729 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1730 /*DetectVirtual=*/false);
1731
1732 // If Dst is not derived from Src we can skip the whole computation below and
1733 // return that Src is not a public base of Dst. Record all inheritance paths.
1734 if (!Dst->isDerivedFrom(Src, Paths))
1735 return CharUnits::fromQuantity(-2ULL);
1736
1737 unsigned NumPublicPaths = 0;
1738 CharUnits Offset;
1739
1740 // Now walk all possible inheritance paths.
1741 for (CXXBasePaths::paths_iterator I = Paths.begin(), E = Paths.end();
1742 I != E; ++I) {
1743 if (I->Access != AS_public) // Ignore non-public inheritance.
1744 continue;
1745
1746 ++NumPublicPaths;
1747
1748 for (CXXBasePath::iterator J = I->begin(), JE = I->end(); J != JE; ++J) {
1749 // If the path contains a virtual base class we can't give any hint.
1750 // -1: no hint.
1751 if (J->Base->isVirtual())
1752 return CharUnits::fromQuantity(-1ULL);
1753
1754 if (NumPublicPaths > 1) // Won't use offsets, skip computation.
1755 continue;
1756
1757 // Accumulate the base class offsets.
1758 const ASTRecordLayout &L = Context.getASTRecordLayout(J->Class);
1759 Offset += L.getBaseClassOffset(J->Base->getType()->getAsCXXRecordDecl());
1760 }
1761 }
1762
1763 // -2: Src is not a public base of Dst.
1764 if (NumPublicPaths == 0)
1765 return CharUnits::fromQuantity(-2ULL);
1766
1767 // -3: Src is a multiple public base type but never a virtual base type.
1768 if (NumPublicPaths > 1)
1769 return CharUnits::fromQuantity(-3ULL);
1770
1771 // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
1772 // Return the offset of Src from the origin of Dst.
1773 return Offset;
1774 }
1775
1776 static llvm::Value *
EmitDynamicCastCall(CodeGenFunction & CGF,llvm::Value * Value,QualType SrcTy,QualType DestTy,llvm::BasicBlock * CastEnd)1777 EmitDynamicCastCall(CodeGenFunction &CGF, llvm::Value *Value,
1778 QualType SrcTy, QualType DestTy,
1779 llvm::BasicBlock *CastEnd) {
1780 llvm::Type *PtrDiffLTy =
1781 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1782 llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1783
1784 if (const PointerType *PTy = DestTy->getAs<PointerType>()) {
1785 if (PTy->getPointeeType()->isVoidType()) {
1786 // C++ [expr.dynamic.cast]p7:
1787 // If T is "pointer to cv void," then the result is a pointer to the
1788 // most derived object pointed to by v.
1789
1790 // Get the vtable pointer.
1791 llvm::Value *VTable = CGF.GetVTablePtr(Value, PtrDiffLTy->getPointerTo());
1792
1793 // Get the offset-to-top from the vtable.
1794 llvm::Value *OffsetToTop =
1795 CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
1796 OffsetToTop = CGF.Builder.CreateLoad(OffsetToTop, "offset.to.top");
1797
1798 // Finally, add the offset to the pointer.
1799 Value = CGF.EmitCastToVoidPtr(Value);
1800 Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop);
1801
1802 return CGF.Builder.CreateBitCast(Value, DestLTy);
1803 }
1804 }
1805
1806 QualType SrcRecordTy;
1807 QualType DestRecordTy;
1808
1809 if (const PointerType *DestPTy = DestTy->getAs<PointerType>()) {
1810 SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType();
1811 DestRecordTy = DestPTy->getPointeeType();
1812 } else {
1813 SrcRecordTy = SrcTy;
1814 DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType();
1815 }
1816
1817 assert(SrcRecordTy->isRecordType() && "source type must be a record type!");
1818 assert(DestRecordTy->isRecordType() && "dest type must be a record type!");
1819
1820 llvm::Value *SrcRTTI =
1821 CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
1822 llvm::Value *DestRTTI =
1823 CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
1824
1825 // Compute the offset hint.
1826 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1827 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1828 llvm::Value *OffsetHint =
1829 llvm::ConstantInt::get(PtrDiffLTy,
1830 computeOffsetHint(CGF.getContext(), SrcDecl,
1831 DestDecl).getQuantity());
1832
1833 // Emit the call to __dynamic_cast.
1834 Value = CGF.EmitCastToVoidPtr(Value);
1835
1836 llvm::Value *args[] = { Value, SrcRTTI, DestRTTI, OffsetHint };
1837 Value = CGF.EmitNounwindRuntimeCall(getDynamicCastFn(CGF), args);
1838 Value = CGF.Builder.CreateBitCast(Value, DestLTy);
1839
1840 /// C++ [expr.dynamic.cast]p9:
1841 /// A failed cast to reference type throws std::bad_cast
1842 if (DestTy->isReferenceType()) {
1843 llvm::BasicBlock *BadCastBlock =
1844 CGF.createBasicBlock("dynamic_cast.bad_cast");
1845
1846 llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1847 CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1848
1849 CGF.EmitBlock(BadCastBlock);
1850 EmitBadCastCall(CGF);
1851 }
1852
1853 return Value;
1854 }
1855
EmitDynamicCastToNull(CodeGenFunction & CGF,QualType DestTy)1856 static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
1857 QualType DestTy) {
1858 llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1859 if (DestTy->isPointerType())
1860 return llvm::Constant::getNullValue(DestLTy);
1861
1862 /// C++ [expr.dynamic.cast]p9:
1863 /// A failed cast to reference type throws std::bad_cast
1864 EmitBadCastCall(CGF);
1865
1866 CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end"));
1867 return llvm::UndefValue::get(DestLTy);
1868 }
1869
EmitDynamicCast(llvm::Value * Value,const CXXDynamicCastExpr * DCE)1870 llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *Value,
1871 const CXXDynamicCastExpr *DCE) {
1872 QualType DestTy = DCE->getTypeAsWritten();
1873
1874 if (DCE->isAlwaysNull())
1875 return EmitDynamicCastToNull(*this, DestTy);
1876
1877 QualType SrcTy = DCE->getSubExpr()->getType();
1878
1879 // C++ [expr.dynamic.cast]p4:
1880 // If the value of v is a null pointer value in the pointer case, the result
1881 // is the null pointer value of type T.
1882 bool ShouldNullCheckSrcValue = SrcTy->isPointerType();
1883
1884 llvm::BasicBlock *CastNull = 0;
1885 llvm::BasicBlock *CastNotNull = 0;
1886 llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end");
1887
1888 if (ShouldNullCheckSrcValue) {
1889 CastNull = createBasicBlock("dynamic_cast.null");
1890 CastNotNull = createBasicBlock("dynamic_cast.notnull");
1891
1892 llvm::Value *IsNull = Builder.CreateIsNull(Value);
1893 Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
1894 EmitBlock(CastNotNull);
1895 }
1896
1897 Value = EmitDynamicCastCall(*this, Value, SrcTy, DestTy, CastEnd);
1898
1899 if (ShouldNullCheckSrcValue) {
1900 EmitBranch(CastEnd);
1901
1902 EmitBlock(CastNull);
1903 EmitBranch(CastEnd);
1904 }
1905
1906 EmitBlock(CastEnd);
1907
1908 if (ShouldNullCheckSrcValue) {
1909 llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
1910 PHI->addIncoming(Value, CastNotNull);
1911 PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull);
1912
1913 Value = PHI;
1914 }
1915
1916 return Value;
1917 }
1918
EmitLambdaExpr(const LambdaExpr * E,AggValueSlot Slot)1919 void CodeGenFunction::EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Slot) {
1920 RunCleanupsScope Scope(*this);
1921 LValue SlotLV = MakeAddrLValue(Slot.getAddr(), E->getType(),
1922 Slot.getAlignment());
1923
1924 CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
1925 for (LambdaExpr::capture_init_iterator i = E->capture_init_begin(),
1926 e = E->capture_init_end();
1927 i != e; ++i, ++CurField) {
1928 // Emit initialization
1929
1930 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
1931 ArrayRef<VarDecl *> ArrayIndexes;
1932 if (CurField->getType()->isArrayType())
1933 ArrayIndexes = E->getCaptureInitIndexVars(i);
1934 EmitInitializerForField(*CurField, LV, *i, ArrayIndexes);
1935 }
1936 }
1937