1 //===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code dealing with code generation of C++ expressions
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "CodeGenFunction.h"
15 #include "CGCUDARuntime.h"
16 #include "CGCXXABI.h"
17 #include "CGDebugInfo.h"
18 #include "CGObjCRuntime.h"
19 #include "clang/Frontend/CodeGenOptions.h"
20 #include "llvm/IR/Intrinsics.h"
21 #include "llvm/Support/CallSite.h"
22
23 using namespace clang;
24 using namespace CodeGen;
25
EmitCXXMemberCall(const CXXMethodDecl * MD,SourceLocation CallLoc,llvm::Value * Callee,ReturnValueSlot ReturnValue,llvm::Value * This,llvm::Value * ImplicitParam,QualType ImplicitParamTy,CallExpr::const_arg_iterator ArgBeg,CallExpr::const_arg_iterator ArgEnd)26 RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
27 SourceLocation CallLoc,
28 llvm::Value *Callee,
29 ReturnValueSlot ReturnValue,
30 llvm::Value *This,
31 llvm::Value *ImplicitParam,
32 QualType ImplicitParamTy,
33 CallExpr::const_arg_iterator ArgBeg,
34 CallExpr::const_arg_iterator ArgEnd) {
35 assert(MD->isInstance() &&
36 "Trying to emit a member call expr on a static method!");
37
38 // C++11 [class.mfct.non-static]p2:
39 // If a non-static member function of a class X is called for an object that
40 // is not of type X, or of a type derived from X, the behavior is undefined.
41 EmitTypeCheck(isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall
42 : TCK_MemberCall,
43 CallLoc, This, getContext().getRecordType(MD->getParent()));
44
45 CallArgList Args;
46
47 // Push the this ptr.
48 Args.add(RValue::get(This), MD->getThisType(getContext()));
49
50 // If there is an implicit parameter (e.g. VTT), emit it.
51 if (ImplicitParam) {
52 Args.add(RValue::get(ImplicitParam), ImplicitParamTy);
53 }
54
55 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
56 RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size());
57
58 // And the rest of the call args.
59 EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
60
61 return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required),
62 Callee, ReturnValue, Args, MD);
63 }
64
65 // FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do
66 // quite what we want.
skipNoOpCastsAndParens(const Expr * E)67 static const Expr *skipNoOpCastsAndParens(const Expr *E) {
68 while (true) {
69 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
70 E = PE->getSubExpr();
71 continue;
72 }
73
74 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
75 if (CE->getCastKind() == CK_NoOp) {
76 E = CE->getSubExpr();
77 continue;
78 }
79 }
80 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
81 if (UO->getOpcode() == UO_Extension) {
82 E = UO->getSubExpr();
83 continue;
84 }
85 }
86 return E;
87 }
88 }
89
90 /// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given
91 /// expr can be devirtualized.
canDevirtualizeMemberFunctionCalls(ASTContext & Context,const Expr * Base,const CXXMethodDecl * MD)92 static bool canDevirtualizeMemberFunctionCalls(ASTContext &Context,
93 const Expr *Base,
94 const CXXMethodDecl *MD) {
95
96 // When building with -fapple-kext, all calls must go through the vtable since
97 // the kernel linker can do runtime patching of vtables.
98 if (Context.getLangOpts().AppleKext)
99 return false;
100
101 // If the most derived class is marked final, we know that no subclass can
102 // override this member function and so we can devirtualize it. For example:
103 //
104 // struct A { virtual void f(); }
105 // struct B final : A { };
106 //
107 // void f(B *b) {
108 // b->f();
109 // }
110 //
111 const CXXRecordDecl *MostDerivedClassDecl = Base->getBestDynamicClassType();
112 if (MostDerivedClassDecl->hasAttr<FinalAttr>())
113 return true;
114
115 // If the member function is marked 'final', we know that it can't be
116 // overridden and can therefore devirtualize it.
117 if (MD->hasAttr<FinalAttr>())
118 return true;
119
120 // Similarly, if the class itself is marked 'final' it can't be overridden
121 // and we can therefore devirtualize the member function call.
122 if (MD->getParent()->hasAttr<FinalAttr>())
123 return true;
124
125 Base = skipNoOpCastsAndParens(Base);
126 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
127 if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
128 // This is a record decl. We know the type and can devirtualize it.
129 return VD->getType()->isRecordType();
130 }
131
132 return false;
133 }
134
135 // We can devirtualize calls on an object accessed by a class member access
136 // expression, since by C++11 [basic.life]p6 we know that it can't refer to
137 // a derived class object constructed in the same location.
138 if (const MemberExpr *ME = dyn_cast<MemberExpr>(Base))
139 if (const ValueDecl *VD = dyn_cast<ValueDecl>(ME->getMemberDecl()))
140 return VD->getType()->isRecordType();
141
142 // We can always devirtualize calls on temporary object expressions.
143 if (isa<CXXConstructExpr>(Base))
144 return true;
145
146 // And calls on bound temporaries.
147 if (isa<CXXBindTemporaryExpr>(Base))
148 return true;
149
150 // Check if this is a call expr that returns a record type.
151 if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
152 return CE->getCallReturnType()->isRecordType();
153
154 // We can't devirtualize the call.
155 return false;
156 }
157
getCXXRecord(const Expr * E)158 static CXXRecordDecl *getCXXRecord(const Expr *E) {
159 QualType T = E->getType();
160 if (const PointerType *PTy = T->getAs<PointerType>())
161 T = PTy->getPointeeType();
162 const RecordType *Ty = T->castAs<RecordType>();
163 return cast<CXXRecordDecl>(Ty->getDecl());
164 }
165
166 // Note: This function also emit constructor calls to support a MSVC
167 // extensions allowing explicit constructor function call.
EmitCXXMemberCallExpr(const CXXMemberCallExpr * CE,ReturnValueSlot ReturnValue)168 RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
169 ReturnValueSlot ReturnValue) {
170 const Expr *callee = CE->getCallee()->IgnoreParens();
171
172 if (isa<BinaryOperator>(callee))
173 return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
174
175 const MemberExpr *ME = cast<MemberExpr>(callee);
176 const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
177
178 if (MD->isStatic()) {
179 // The method is static, emit it as we would a regular call.
180 llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
181 return EmitCall(getContext().getPointerType(MD->getType()), Callee,
182 ReturnValue, CE->arg_begin(), CE->arg_end());
183 }
184
185 // Compute the object pointer.
186 const Expr *Base = ME->getBase();
187 bool CanUseVirtualCall = MD->isVirtual() && !ME->hasQualifier();
188
189 const CXXMethodDecl *DevirtualizedMethod = NULL;
190 if (CanUseVirtualCall &&
191 canDevirtualizeMemberFunctionCalls(getContext(), Base, MD)) {
192 const CXXRecordDecl *BestDynamicDecl = Base->getBestDynamicClassType();
193 DevirtualizedMethod = MD->getCorrespondingMethodInClass(BestDynamicDecl);
194 assert(DevirtualizedMethod);
195 const CXXRecordDecl *DevirtualizedClass = DevirtualizedMethod->getParent();
196 const Expr *Inner = Base->ignoreParenBaseCasts();
197 if (getCXXRecord(Inner) == DevirtualizedClass)
198 // If the class of the Inner expression is where the dynamic method
199 // is defined, build the this pointer from it.
200 Base = Inner;
201 else if (getCXXRecord(Base) != DevirtualizedClass) {
202 // If the method is defined in a class that is not the best dynamic
203 // one or the one of the full expression, we would have to build
204 // a derived-to-base cast to compute the correct this pointer, but
205 // we don't have support for that yet, so do a virtual call.
206 DevirtualizedMethod = NULL;
207 }
208 // If the return types are not the same, this might be a case where more
209 // code needs to run to compensate for it. For example, the derived
210 // method might return a type that inherits form from the return
211 // type of MD and has a prefix.
212 // For now we just avoid devirtualizing these covariant cases.
213 if (DevirtualizedMethod &&
214 DevirtualizedMethod->getResultType().getCanonicalType() !=
215 MD->getResultType().getCanonicalType())
216 DevirtualizedMethod = NULL;
217 }
218
219 llvm::Value *This;
220 if (ME->isArrow())
221 This = EmitScalarExpr(Base);
222 else
223 This = EmitLValue(Base).getAddress();
224
225
226 if (MD->isTrivial()) {
227 if (isa<CXXDestructorDecl>(MD)) return RValue::get(0);
228 if (isa<CXXConstructorDecl>(MD) &&
229 cast<CXXConstructorDecl>(MD)->isDefaultConstructor())
230 return RValue::get(0);
231
232 if (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) {
233 // We don't like to generate the trivial copy/move assignment operator
234 // when it isn't necessary; just produce the proper effect here.
235 llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
236 EmitAggregateAssign(This, RHS, CE->getType());
237 return RValue::get(This);
238 }
239
240 if (isa<CXXConstructorDecl>(MD) &&
241 cast<CXXConstructorDecl>(MD)->isCopyOrMoveConstructor()) {
242 // Trivial move and copy ctor are the same.
243 llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
244 EmitSynthesizedCXXCopyCtorCall(cast<CXXConstructorDecl>(MD), This, RHS,
245 CE->arg_begin(), CE->arg_end());
246 return RValue::get(This);
247 }
248 llvm_unreachable("unknown trivial member function");
249 }
250
251 // Compute the function type we're calling.
252 const CXXMethodDecl *CalleeDecl = DevirtualizedMethod ? DevirtualizedMethod : MD;
253 const CGFunctionInfo *FInfo = 0;
254 if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl))
255 FInfo = &CGM.getTypes().arrangeCXXDestructor(Dtor,
256 Dtor_Complete);
257 else if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(CalleeDecl))
258 FInfo = &CGM.getTypes().arrangeCXXConstructorDeclaration(Ctor,
259 Ctor_Complete);
260 else
261 FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(CalleeDecl);
262
263 llvm::FunctionType *Ty = CGM.getTypes().GetFunctionType(*FInfo);
264
265 // C++ [class.virtual]p12:
266 // Explicit qualification with the scope operator (5.1) suppresses the
267 // virtual call mechanism.
268 //
269 // We also don't emit a virtual call if the base expression has a record type
270 // because then we know what the type is.
271 bool UseVirtualCall = CanUseVirtualCall && !DevirtualizedMethod;
272 llvm::Value *Callee;
273
274 if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
275 assert(CE->arg_begin() == CE->arg_end() &&
276 "Destructor shouldn't have explicit parameters");
277 assert(ReturnValue.isNull() && "Destructor shouldn't have return value");
278 if (UseVirtualCall) {
279 CGM.getCXXABI().EmitVirtualDestructorCall(*this, Dtor, Dtor_Complete,
280 CE->getExprLoc(), This);
281 } else {
282 if (getLangOpts().AppleKext &&
283 MD->isVirtual() &&
284 ME->hasQualifier())
285 Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
286 else if (!DevirtualizedMethod)
287 Callee = CGM.GetAddrOfCXXDestructor(Dtor, Dtor_Complete, FInfo, Ty);
288 else {
289 const CXXDestructorDecl *DDtor =
290 cast<CXXDestructorDecl>(DevirtualizedMethod);
291 Callee = CGM.GetAddrOfFunction(GlobalDecl(DDtor, Dtor_Complete), Ty);
292 }
293 EmitCXXMemberCall(MD, CE->getExprLoc(), Callee, ReturnValue, This,
294 /*ImplicitParam=*/0, QualType(), 0, 0);
295 }
296 return RValue::get(0);
297 }
298
299 if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(MD)) {
300 Callee = CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty);
301 } else if (UseVirtualCall) {
302 Callee = BuildVirtualCall(MD, This, Ty);
303 } else {
304 if (getLangOpts().AppleKext &&
305 MD->isVirtual() &&
306 ME->hasQualifier())
307 Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
308 else if (!DevirtualizedMethod)
309 Callee = CGM.GetAddrOfFunction(MD, Ty);
310 else {
311 Callee = CGM.GetAddrOfFunction(DevirtualizedMethod, Ty);
312 }
313 }
314
315 return EmitCXXMemberCall(MD, CE->getExprLoc(), Callee, ReturnValue, This,
316 /*ImplicitParam=*/0, QualType(),
317 CE->arg_begin(), CE->arg_end());
318 }
319
320 RValue
EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr * E,ReturnValueSlot ReturnValue)321 CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
322 ReturnValueSlot ReturnValue) {
323 const BinaryOperator *BO =
324 cast<BinaryOperator>(E->getCallee()->IgnoreParens());
325 const Expr *BaseExpr = BO->getLHS();
326 const Expr *MemFnExpr = BO->getRHS();
327
328 const MemberPointerType *MPT =
329 MemFnExpr->getType()->castAs<MemberPointerType>();
330
331 const FunctionProtoType *FPT =
332 MPT->getPointeeType()->castAs<FunctionProtoType>();
333 const CXXRecordDecl *RD =
334 cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
335
336 // Get the member function pointer.
337 llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
338
339 // Emit the 'this' pointer.
340 llvm::Value *This;
341
342 if (BO->getOpcode() == BO_PtrMemI)
343 This = EmitScalarExpr(BaseExpr);
344 else
345 This = EmitLValue(BaseExpr).getAddress();
346
347 EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This,
348 QualType(MPT->getClass(), 0));
349
350 // Ask the ABI to load the callee. Note that This is modified.
351 llvm::Value *Callee =
352 CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, This, MemFnPtr, MPT);
353
354 CallArgList Args;
355
356 QualType ThisType =
357 getContext().getPointerType(getContext().getTagDeclType(RD));
358
359 // Push the this ptr.
360 Args.add(RValue::get(This), ThisType);
361
362 RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, 1);
363
364 // And the rest of the call args
365 EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
366 return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required), Callee,
367 ReturnValue, Args);
368 }
369
370 RValue
EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr * E,const CXXMethodDecl * MD,ReturnValueSlot ReturnValue)371 CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
372 const CXXMethodDecl *MD,
373 ReturnValueSlot ReturnValue) {
374 assert(MD->isInstance() &&
375 "Trying to emit a member call expr on a static method!");
376 LValue LV = EmitLValue(E->getArg(0));
377 llvm::Value *This = LV.getAddress();
378
379 if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) &&
380 MD->isTrivial()) {
381 llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
382 QualType Ty = E->getType();
383 EmitAggregateAssign(This, Src, Ty);
384 return RValue::get(This);
385 }
386
387 llvm::Value *Callee = EmitCXXOperatorMemberCallee(E, MD, This);
388 return EmitCXXMemberCall(MD, E->getExprLoc(), Callee, ReturnValue, This,
389 /*ImplicitParam=*/0, QualType(),
390 E->arg_begin() + 1, E->arg_end());
391 }
392
EmitCUDAKernelCallExpr(const CUDAKernelCallExpr * E,ReturnValueSlot ReturnValue)393 RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
394 ReturnValueSlot ReturnValue) {
395 return CGM.getCUDARuntime().EmitCUDAKernelCallExpr(*this, E, ReturnValue);
396 }
397
EmitNullBaseClassInitialization(CodeGenFunction & CGF,llvm::Value * DestPtr,const CXXRecordDecl * Base)398 static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
399 llvm::Value *DestPtr,
400 const CXXRecordDecl *Base) {
401 if (Base->isEmpty())
402 return;
403
404 DestPtr = CGF.EmitCastToVoidPtr(DestPtr);
405
406 const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base);
407 CharUnits Size = Layout.getNonVirtualSize();
408 CharUnits Align = Layout.getNonVirtualAlign();
409
410 llvm::Value *SizeVal = CGF.CGM.getSize(Size);
411
412 // If the type contains a pointer to data member we can't memset it to zero.
413 // Instead, create a null constant and copy it to the destination.
414 // TODO: there are other patterns besides zero that we can usefully memset,
415 // like -1, which happens to be the pattern used by member-pointers.
416 // TODO: isZeroInitializable can be over-conservative in the case where a
417 // virtual base contains a member pointer.
418 if (!CGF.CGM.getTypes().isZeroInitializable(Base)) {
419 llvm::Constant *NullConstant = CGF.CGM.EmitNullConstantForBase(Base);
420
421 llvm::GlobalVariable *NullVariable =
422 new llvm::GlobalVariable(CGF.CGM.getModule(), NullConstant->getType(),
423 /*isConstant=*/true,
424 llvm::GlobalVariable::PrivateLinkage,
425 NullConstant, Twine());
426 NullVariable->setAlignment(Align.getQuantity());
427 llvm::Value *SrcPtr = CGF.EmitCastToVoidPtr(NullVariable);
428
429 // Get and call the appropriate llvm.memcpy overload.
430 CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity());
431 return;
432 }
433
434 // Otherwise, just memset the whole thing to zero. This is legal
435 // because in LLVM, all default initializers (other than the ones we just
436 // handled above) are guaranteed to have a bit pattern of all zeros.
437 CGF.Builder.CreateMemSet(DestPtr, CGF.Builder.getInt8(0), SizeVal,
438 Align.getQuantity());
439 }
440
441 void
EmitCXXConstructExpr(const CXXConstructExpr * E,AggValueSlot Dest)442 CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
443 AggValueSlot Dest) {
444 assert(!Dest.isIgnored() && "Must have a destination!");
445 const CXXConstructorDecl *CD = E->getConstructor();
446
447 // If we require zero initialization before (or instead of) calling the
448 // constructor, as can be the case with a non-user-provided default
449 // constructor, emit the zero initialization now, unless destination is
450 // already zeroed.
451 if (E->requiresZeroInitialization() && !Dest.isZeroed()) {
452 switch (E->getConstructionKind()) {
453 case CXXConstructExpr::CK_Delegating:
454 case CXXConstructExpr::CK_Complete:
455 EmitNullInitialization(Dest.getAddr(), E->getType());
456 break;
457 case CXXConstructExpr::CK_VirtualBase:
458 case CXXConstructExpr::CK_NonVirtualBase:
459 EmitNullBaseClassInitialization(*this, Dest.getAddr(), CD->getParent());
460 break;
461 }
462 }
463
464 // If this is a call to a trivial default constructor, do nothing.
465 if (CD->isTrivial() && CD->isDefaultConstructor())
466 return;
467
468 // Elide the constructor if we're constructing from a temporary.
469 // The temporary check is required because Sema sets this on NRVO
470 // returns.
471 if (getLangOpts().ElideConstructors && E->isElidable()) {
472 assert(getContext().hasSameUnqualifiedType(E->getType(),
473 E->getArg(0)->getType()));
474 if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
475 EmitAggExpr(E->getArg(0), Dest);
476 return;
477 }
478 }
479
480 if (const ConstantArrayType *arrayType
481 = getContext().getAsConstantArrayType(E->getType())) {
482 EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddr(),
483 E->arg_begin(), E->arg_end());
484 } else {
485 CXXCtorType Type = Ctor_Complete;
486 bool ForVirtualBase = false;
487 bool Delegating = false;
488
489 switch (E->getConstructionKind()) {
490 case CXXConstructExpr::CK_Delegating:
491 // We should be emitting a constructor; GlobalDecl will assert this
492 Type = CurGD.getCtorType();
493 Delegating = true;
494 break;
495
496 case CXXConstructExpr::CK_Complete:
497 Type = Ctor_Complete;
498 break;
499
500 case CXXConstructExpr::CK_VirtualBase:
501 ForVirtualBase = true;
502 // fall-through
503
504 case CXXConstructExpr::CK_NonVirtualBase:
505 Type = Ctor_Base;
506 }
507
508 // Call the constructor.
509 EmitCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest.getAddr(),
510 E->arg_begin(), E->arg_end());
511 }
512 }
513
514 void
EmitSynthesizedCXXCopyCtor(llvm::Value * Dest,llvm::Value * Src,const Expr * Exp)515 CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest,
516 llvm::Value *Src,
517 const Expr *Exp) {
518 if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
519 Exp = E->getSubExpr();
520 assert(isa<CXXConstructExpr>(Exp) &&
521 "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
522 const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);
523 const CXXConstructorDecl *CD = E->getConstructor();
524 RunCleanupsScope Scope(*this);
525
526 // If we require zero initialization before (or instead of) calling the
527 // constructor, as can be the case with a non-user-provided default
528 // constructor, emit the zero initialization now.
529 // FIXME. Do I still need this for a copy ctor synthesis?
530 if (E->requiresZeroInitialization())
531 EmitNullInitialization(Dest, E->getType());
532
533 assert(!getContext().getAsConstantArrayType(E->getType())
534 && "EmitSynthesizedCXXCopyCtor - Copied-in Array");
535 EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src,
536 E->arg_begin(), E->arg_end());
537 }
538
CalculateCookiePadding(CodeGenFunction & CGF,const CXXNewExpr * E)539 static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
540 const CXXNewExpr *E) {
541 if (!E->isArray())
542 return CharUnits::Zero();
543
544 // No cookie is required if the operator new[] being used is the
545 // reserved placement operator new[].
546 if (E->getOperatorNew()->isReservedGlobalPlacementOperator())
547 return CharUnits::Zero();
548
549 return CGF.CGM.getCXXABI().GetArrayCookieSize(E);
550 }
551
EmitCXXNewAllocSize(CodeGenFunction & CGF,const CXXNewExpr * e,unsigned minElements,llvm::Value * & numElements,llvm::Value * & sizeWithoutCookie)552 static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
553 const CXXNewExpr *e,
554 unsigned minElements,
555 llvm::Value *&numElements,
556 llvm::Value *&sizeWithoutCookie) {
557 QualType type = e->getAllocatedType();
558
559 if (!e->isArray()) {
560 CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
561 sizeWithoutCookie
562 = llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity());
563 return sizeWithoutCookie;
564 }
565
566 // The width of size_t.
567 unsigned sizeWidth = CGF.SizeTy->getBitWidth();
568
569 // Figure out the cookie size.
570 llvm::APInt cookieSize(sizeWidth,
571 CalculateCookiePadding(CGF, e).getQuantity());
572
573 // Emit the array size expression.
574 // We multiply the size of all dimensions for NumElements.
575 // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
576 numElements = CGF.EmitScalarExpr(e->getArraySize());
577 assert(isa<llvm::IntegerType>(numElements->getType()));
578
579 // The number of elements can be have an arbitrary integer type;
580 // essentially, we need to multiply it by a constant factor, add a
581 // cookie size, and verify that the result is representable as a
582 // size_t. That's just a gloss, though, and it's wrong in one
583 // important way: if the count is negative, it's an error even if
584 // the cookie size would bring the total size >= 0.
585 bool isSigned
586 = e->getArraySize()->getType()->isSignedIntegerOrEnumerationType();
587 llvm::IntegerType *numElementsType
588 = cast<llvm::IntegerType>(numElements->getType());
589 unsigned numElementsWidth = numElementsType->getBitWidth();
590
591 // Compute the constant factor.
592 llvm::APInt arraySizeMultiplier(sizeWidth, 1);
593 while (const ConstantArrayType *CAT
594 = CGF.getContext().getAsConstantArrayType(type)) {
595 type = CAT->getElementType();
596 arraySizeMultiplier *= CAT->getSize();
597 }
598
599 CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
600 llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity());
601 typeSizeMultiplier *= arraySizeMultiplier;
602
603 // This will be a size_t.
604 llvm::Value *size;
605
606 // If someone is doing 'new int[42]' there is no need to do a dynamic check.
607 // Don't bloat the -O0 code.
608 if (llvm::ConstantInt *numElementsC =
609 dyn_cast<llvm::ConstantInt>(numElements)) {
610 const llvm::APInt &count = numElementsC->getValue();
611
612 bool hasAnyOverflow = false;
613
614 // If 'count' was a negative number, it's an overflow.
615 if (isSigned && count.isNegative())
616 hasAnyOverflow = true;
617
618 // We want to do all this arithmetic in size_t. If numElements is
619 // wider than that, check whether it's already too big, and if so,
620 // overflow.
621 else if (numElementsWidth > sizeWidth &&
622 numElementsWidth - sizeWidth > count.countLeadingZeros())
623 hasAnyOverflow = true;
624
625 // Okay, compute a count at the right width.
626 llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth);
627
628 // If there is a brace-initializer, we cannot allocate fewer elements than
629 // there are initializers. If we do, that's treated like an overflow.
630 if (adjustedCount.ult(minElements))
631 hasAnyOverflow = true;
632
633 // Scale numElements by that. This might overflow, but we don't
634 // care because it only overflows if allocationSize does, too, and
635 // if that overflows then we shouldn't use this.
636 numElements = llvm::ConstantInt::get(CGF.SizeTy,
637 adjustedCount * arraySizeMultiplier);
638
639 // Compute the size before cookie, and track whether it overflowed.
640 bool overflow;
641 llvm::APInt allocationSize
642 = adjustedCount.umul_ov(typeSizeMultiplier, overflow);
643 hasAnyOverflow |= overflow;
644
645 // Add in the cookie, and check whether it's overflowed.
646 if (cookieSize != 0) {
647 // Save the current size without a cookie. This shouldn't be
648 // used if there was overflow.
649 sizeWithoutCookie = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
650
651 allocationSize = allocationSize.uadd_ov(cookieSize, overflow);
652 hasAnyOverflow |= overflow;
653 }
654
655 // On overflow, produce a -1 so operator new will fail.
656 if (hasAnyOverflow) {
657 size = llvm::Constant::getAllOnesValue(CGF.SizeTy);
658 } else {
659 size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
660 }
661
662 // Otherwise, we might need to use the overflow intrinsics.
663 } else {
664 // There are up to five conditions we need to test for:
665 // 1) if isSigned, we need to check whether numElements is negative;
666 // 2) if numElementsWidth > sizeWidth, we need to check whether
667 // numElements is larger than something representable in size_t;
668 // 3) if minElements > 0, we need to check whether numElements is smaller
669 // than that.
670 // 4) we need to compute
671 // sizeWithoutCookie := numElements * typeSizeMultiplier
672 // and check whether it overflows; and
673 // 5) if we need a cookie, we need to compute
674 // size := sizeWithoutCookie + cookieSize
675 // and check whether it overflows.
676
677 llvm::Value *hasOverflow = 0;
678
679 // If numElementsWidth > sizeWidth, then one way or another, we're
680 // going to have to do a comparison for (2), and this happens to
681 // take care of (1), too.
682 if (numElementsWidth > sizeWidth) {
683 llvm::APInt threshold(numElementsWidth, 1);
684 threshold <<= sizeWidth;
685
686 llvm::Value *thresholdV
687 = llvm::ConstantInt::get(numElementsType, threshold);
688
689 hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV);
690 numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy);
691
692 // Otherwise, if we're signed, we want to sext up to size_t.
693 } else if (isSigned) {
694 if (numElementsWidth < sizeWidth)
695 numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy);
696
697 // If there's a non-1 type size multiplier, then we can do the
698 // signedness check at the same time as we do the multiply
699 // because a negative number times anything will cause an
700 // unsigned overflow. Otherwise, we have to do it here. But at least
701 // in this case, we can subsume the >= minElements check.
702 if (typeSizeMultiplier == 1)
703 hasOverflow = CGF.Builder.CreateICmpSLT(numElements,
704 llvm::ConstantInt::get(CGF.SizeTy, minElements));
705
706 // Otherwise, zext up to size_t if necessary.
707 } else if (numElementsWidth < sizeWidth) {
708 numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy);
709 }
710
711 assert(numElements->getType() == CGF.SizeTy);
712
713 if (minElements) {
714 // Don't allow allocation of fewer elements than we have initializers.
715 if (!hasOverflow) {
716 hasOverflow = CGF.Builder.CreateICmpULT(numElements,
717 llvm::ConstantInt::get(CGF.SizeTy, minElements));
718 } else if (numElementsWidth > sizeWidth) {
719 // The other existing overflow subsumes this check.
720 // We do an unsigned comparison, since any signed value < -1 is
721 // taken care of either above or below.
722 hasOverflow = CGF.Builder.CreateOr(hasOverflow,
723 CGF.Builder.CreateICmpULT(numElements,
724 llvm::ConstantInt::get(CGF.SizeTy, minElements)));
725 }
726 }
727
728 size = numElements;
729
730 // Multiply by the type size if necessary. This multiplier
731 // includes all the factors for nested arrays.
732 //
733 // This step also causes numElements to be scaled up by the
734 // nested-array factor if necessary. Overflow on this computation
735 // can be ignored because the result shouldn't be used if
736 // allocation fails.
737 if (typeSizeMultiplier != 1) {
738 llvm::Value *umul_with_overflow
739 = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy);
740
741 llvm::Value *tsmV =
742 llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier);
743 llvm::Value *result =
744 CGF.Builder.CreateCall2(umul_with_overflow, size, tsmV);
745
746 llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
747 if (hasOverflow)
748 hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
749 else
750 hasOverflow = overflowed;
751
752 size = CGF.Builder.CreateExtractValue(result, 0);
753
754 // Also scale up numElements by the array size multiplier.
755 if (arraySizeMultiplier != 1) {
756 // If the base element type size is 1, then we can re-use the
757 // multiply we just did.
758 if (typeSize.isOne()) {
759 assert(arraySizeMultiplier == typeSizeMultiplier);
760 numElements = size;
761
762 // Otherwise we need a separate multiply.
763 } else {
764 llvm::Value *asmV =
765 llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier);
766 numElements = CGF.Builder.CreateMul(numElements, asmV);
767 }
768 }
769 } else {
770 // numElements doesn't need to be scaled.
771 assert(arraySizeMultiplier == 1);
772 }
773
774 // Add in the cookie size if necessary.
775 if (cookieSize != 0) {
776 sizeWithoutCookie = size;
777
778 llvm::Value *uadd_with_overflow
779 = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy);
780
781 llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize);
782 llvm::Value *result =
783 CGF.Builder.CreateCall2(uadd_with_overflow, size, cookieSizeV);
784
785 llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
786 if (hasOverflow)
787 hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
788 else
789 hasOverflow = overflowed;
790
791 size = CGF.Builder.CreateExtractValue(result, 0);
792 }
793
794 // If we had any possibility of dynamic overflow, make a select to
795 // overwrite 'size' with an all-ones value, which should cause
796 // operator new to throw.
797 if (hasOverflow)
798 size = CGF.Builder.CreateSelect(hasOverflow,
799 llvm::Constant::getAllOnesValue(CGF.SizeTy),
800 size);
801 }
802
803 if (cookieSize == 0)
804 sizeWithoutCookie = size;
805 else
806 assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?");
807
808 return size;
809 }
810
StoreAnyExprIntoOneUnit(CodeGenFunction & CGF,const Expr * Init,QualType AllocType,llvm::Value * NewPtr)811 static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
812 QualType AllocType, llvm::Value *NewPtr) {
813
814 CharUnits Alignment = CGF.getContext().getTypeAlignInChars(AllocType);
815 switch (CGF.getEvaluationKind(AllocType)) {
816 case TEK_Scalar:
817 CGF.EmitScalarInit(Init, 0, CGF.MakeAddrLValue(NewPtr, AllocType,
818 Alignment),
819 false);
820 return;
821 case TEK_Complex:
822 CGF.EmitComplexExprIntoLValue(Init, CGF.MakeAddrLValue(NewPtr, AllocType,
823 Alignment),
824 /*isInit*/ true);
825 return;
826 case TEK_Aggregate: {
827 AggValueSlot Slot
828 = AggValueSlot::forAddr(NewPtr, Alignment, AllocType.getQualifiers(),
829 AggValueSlot::IsDestructed,
830 AggValueSlot::DoesNotNeedGCBarriers,
831 AggValueSlot::IsNotAliased);
832 CGF.EmitAggExpr(Init, Slot);
833 return;
834 }
835 }
836 llvm_unreachable("bad evaluation kind");
837 }
838
839 void
EmitNewArrayInitializer(const CXXNewExpr * E,QualType elementType,llvm::Value * beginPtr,llvm::Value * numElements)840 CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
841 QualType elementType,
842 llvm::Value *beginPtr,
843 llvm::Value *numElements) {
844 if (!E->hasInitializer())
845 return; // We have a POD type.
846
847 llvm::Value *explicitPtr = beginPtr;
848 // Find the end of the array, hoisted out of the loop.
849 llvm::Value *endPtr =
850 Builder.CreateInBoundsGEP(beginPtr, numElements, "array.end");
851
852 unsigned initializerElements = 0;
853
854 const Expr *Init = E->getInitializer();
855 llvm::AllocaInst *endOfInit = 0;
856 QualType::DestructionKind dtorKind = elementType.isDestructedType();
857 EHScopeStack::stable_iterator cleanup;
858 llvm::Instruction *cleanupDominator = 0;
859 // If the initializer is an initializer list, first do the explicit elements.
860 if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
861 initializerElements = ILE->getNumInits();
862
863 // Enter a partial-destruction cleanup if necessary.
864 if (needsEHCleanup(dtorKind)) {
865 // In principle we could tell the cleanup where we are more
866 // directly, but the control flow can get so varied here that it
867 // would actually be quite complex. Therefore we go through an
868 // alloca.
869 endOfInit = CreateTempAlloca(beginPtr->getType(), "array.endOfInit");
870 cleanupDominator = Builder.CreateStore(beginPtr, endOfInit);
871 pushIrregularPartialArrayCleanup(beginPtr, endOfInit, elementType,
872 getDestroyer(dtorKind));
873 cleanup = EHStack.stable_begin();
874 }
875
876 for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) {
877 // Tell the cleanup that it needs to destroy up to this
878 // element. TODO: some of these stores can be trivially
879 // observed to be unnecessary.
880 if (endOfInit) Builder.CreateStore(explicitPtr, endOfInit);
881 StoreAnyExprIntoOneUnit(*this, ILE->getInit(i), elementType, explicitPtr);
882 explicitPtr =Builder.CreateConstGEP1_32(explicitPtr, 1, "array.exp.next");
883 }
884
885 // The remaining elements are filled with the array filler expression.
886 Init = ILE->getArrayFiller();
887 }
888
889 // Create the continuation block.
890 llvm::BasicBlock *contBB = createBasicBlock("new.loop.end");
891
892 // If the number of elements isn't constant, we have to now check if there is
893 // anything left to initialize.
894 if (llvm::ConstantInt *constNum = dyn_cast<llvm::ConstantInt>(numElements)) {
895 // If all elements have already been initialized, skip the whole loop.
896 if (constNum->getZExtValue() <= initializerElements) {
897 // If there was a cleanup, deactivate it.
898 if (cleanupDominator)
899 DeactivateCleanupBlock(cleanup, cleanupDominator);
900 return;
901 }
902 } else {
903 llvm::BasicBlock *nonEmptyBB = createBasicBlock("new.loop.nonempty");
904 llvm::Value *isEmpty = Builder.CreateICmpEQ(explicitPtr, endPtr,
905 "array.isempty");
906 Builder.CreateCondBr(isEmpty, contBB, nonEmptyBB);
907 EmitBlock(nonEmptyBB);
908 }
909
910 // Enter the loop.
911 llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
912 llvm::BasicBlock *loopBB = createBasicBlock("new.loop");
913
914 EmitBlock(loopBB);
915
916 // Set up the current-element phi.
917 llvm::PHINode *curPtr =
918 Builder.CreatePHI(explicitPtr->getType(), 2, "array.cur");
919 curPtr->addIncoming(explicitPtr, entryBB);
920
921 // Store the new cleanup position for irregular cleanups.
922 if (endOfInit) Builder.CreateStore(curPtr, endOfInit);
923
924 // Enter a partial-destruction cleanup if necessary.
925 if (!cleanupDominator && needsEHCleanup(dtorKind)) {
926 pushRegularPartialArrayCleanup(beginPtr, curPtr, elementType,
927 getDestroyer(dtorKind));
928 cleanup = EHStack.stable_begin();
929 cleanupDominator = Builder.CreateUnreachable();
930 }
931
932 // Emit the initializer into this element.
933 StoreAnyExprIntoOneUnit(*this, Init, E->getAllocatedType(), curPtr);
934
935 // Leave the cleanup if we entered one.
936 if (cleanupDominator) {
937 DeactivateCleanupBlock(cleanup, cleanupDominator);
938 cleanupDominator->eraseFromParent();
939 }
940
941 // Advance to the next element.
942 llvm::Value *nextPtr = Builder.CreateConstGEP1_32(curPtr, 1, "array.next");
943
944 // Check whether we've gotten to the end of the array and, if so,
945 // exit the loop.
946 llvm::Value *isEnd = Builder.CreateICmpEQ(nextPtr, endPtr, "array.atend");
947 Builder.CreateCondBr(isEnd, contBB, loopBB);
948 curPtr->addIncoming(nextPtr, Builder.GetInsertBlock());
949
950 EmitBlock(contBB);
951 }
952
EmitZeroMemSet(CodeGenFunction & CGF,QualType T,llvm::Value * NewPtr,llvm::Value * Size)953 static void EmitZeroMemSet(CodeGenFunction &CGF, QualType T,
954 llvm::Value *NewPtr, llvm::Value *Size) {
955 CGF.EmitCastToVoidPtr(NewPtr);
956 CharUnits Alignment = CGF.getContext().getTypeAlignInChars(T);
957 CGF.Builder.CreateMemSet(NewPtr, CGF.Builder.getInt8(0), Size,
958 Alignment.getQuantity(), false);
959 }
960
EmitNewInitializer(CodeGenFunction & CGF,const CXXNewExpr * E,QualType ElementType,llvm::Value * NewPtr,llvm::Value * NumElements,llvm::Value * AllocSizeWithoutCookie)961 static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
962 QualType ElementType,
963 llvm::Value *NewPtr,
964 llvm::Value *NumElements,
965 llvm::Value *AllocSizeWithoutCookie) {
966 const Expr *Init = E->getInitializer();
967 if (E->isArray()) {
968 if (const CXXConstructExpr *CCE = dyn_cast_or_null<CXXConstructExpr>(Init)){
969 CXXConstructorDecl *Ctor = CCE->getConstructor();
970 if (Ctor->isTrivial()) {
971 // If new expression did not specify value-initialization, then there
972 // is no initialization.
973 if (!CCE->requiresZeroInitialization() || Ctor->getParent()->isEmpty())
974 return;
975
976 if (CGF.CGM.getTypes().isZeroInitializable(ElementType)) {
977 // Optimization: since zero initialization will just set the memory
978 // to all zeroes, generate a single memset to do it in one shot.
979 EmitZeroMemSet(CGF, ElementType, NewPtr, AllocSizeWithoutCookie);
980 return;
981 }
982 }
983
984 CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr,
985 CCE->arg_begin(), CCE->arg_end(),
986 CCE->requiresZeroInitialization());
987 return;
988 } else if (Init && isa<ImplicitValueInitExpr>(Init) &&
989 CGF.CGM.getTypes().isZeroInitializable(ElementType)) {
990 // Optimization: since zero initialization will just set the memory
991 // to all zeroes, generate a single memset to do it in one shot.
992 EmitZeroMemSet(CGF, ElementType, NewPtr, AllocSizeWithoutCookie);
993 return;
994 }
995 CGF.EmitNewArrayInitializer(E, ElementType, NewPtr, NumElements);
996 return;
997 }
998
999 if (!Init)
1000 return;
1001
1002 StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr);
1003 }
1004
1005 /// Emit a call to an operator new or operator delete function, as implicitly
1006 /// created by new-expressions and delete-expressions.
EmitNewDeleteCall(CodeGenFunction & CGF,const FunctionDecl * Callee,const FunctionProtoType * CalleeType,const CallArgList & Args)1007 static RValue EmitNewDeleteCall(CodeGenFunction &CGF,
1008 const FunctionDecl *Callee,
1009 const FunctionProtoType *CalleeType,
1010 const CallArgList &Args) {
1011 llvm::Instruction *CallOrInvoke;
1012 llvm::Value *CalleeAddr = CGF.CGM.GetAddrOfFunction(Callee);
1013 RValue RV =
1014 CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(Args, CalleeType),
1015 CalleeAddr, ReturnValueSlot(), Args,
1016 Callee, &CallOrInvoke);
1017
1018 /// C++1y [expr.new]p10:
1019 /// [In a new-expression,] an implementation is allowed to omit a call
1020 /// to a replaceable global allocation function.
1021 ///
1022 /// We model such elidable calls with the 'builtin' attribute.
1023 llvm::Function *Fn = dyn_cast<llvm::Function>(CalleeAddr);
1024 if (Callee->isReplaceableGlobalAllocationFunction() &&
1025 Fn && Fn->hasFnAttribute(llvm::Attribute::NoBuiltin)) {
1026 // FIXME: Add addAttribute to CallSite.
1027 if (llvm::CallInst *CI = dyn_cast<llvm::CallInst>(CallOrInvoke))
1028 CI->addAttribute(llvm::AttributeSet::FunctionIndex,
1029 llvm::Attribute::Builtin);
1030 else if (llvm::InvokeInst *II = dyn_cast<llvm::InvokeInst>(CallOrInvoke))
1031 II->addAttribute(llvm::AttributeSet::FunctionIndex,
1032 llvm::Attribute::Builtin);
1033 else
1034 llvm_unreachable("unexpected kind of call instruction");
1035 }
1036
1037 return RV;
1038 }
1039
1040 namespace {
1041 /// A cleanup to call the given 'operator delete' function upon
1042 /// abnormal exit from a new expression.
1043 class CallDeleteDuringNew : public EHScopeStack::Cleanup {
1044 size_t NumPlacementArgs;
1045 const FunctionDecl *OperatorDelete;
1046 llvm::Value *Ptr;
1047 llvm::Value *AllocSize;
1048
getPlacementArgs()1049 RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); }
1050
1051 public:
getExtraSize(size_t NumPlacementArgs)1052 static size_t getExtraSize(size_t NumPlacementArgs) {
1053 return NumPlacementArgs * sizeof(RValue);
1054 }
1055
CallDeleteDuringNew(size_t NumPlacementArgs,const FunctionDecl * OperatorDelete,llvm::Value * Ptr,llvm::Value * AllocSize)1056 CallDeleteDuringNew(size_t NumPlacementArgs,
1057 const FunctionDecl *OperatorDelete,
1058 llvm::Value *Ptr,
1059 llvm::Value *AllocSize)
1060 : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
1061 Ptr(Ptr), AllocSize(AllocSize) {}
1062
setPlacementArg(unsigned I,RValue Arg)1063 void setPlacementArg(unsigned I, RValue Arg) {
1064 assert(I < NumPlacementArgs && "index out of range");
1065 getPlacementArgs()[I] = Arg;
1066 }
1067
Emit(CodeGenFunction & CGF,Flags flags)1068 void Emit(CodeGenFunction &CGF, Flags flags) {
1069 const FunctionProtoType *FPT
1070 = OperatorDelete->getType()->getAs<FunctionProtoType>();
1071 assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
1072 (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
1073
1074 CallArgList DeleteArgs;
1075
1076 // The first argument is always a void*.
1077 FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
1078 DeleteArgs.add(RValue::get(Ptr), *AI++);
1079
1080 // A member 'operator delete' can take an extra 'size_t' argument.
1081 if (FPT->getNumArgs() == NumPlacementArgs + 2)
1082 DeleteArgs.add(RValue::get(AllocSize), *AI++);
1083
1084 // Pass the rest of the arguments, which must match exactly.
1085 for (unsigned I = 0; I != NumPlacementArgs; ++I)
1086 DeleteArgs.add(getPlacementArgs()[I], *AI++);
1087
1088 // Call 'operator delete'.
1089 EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs);
1090 }
1091 };
1092
1093 /// A cleanup to call the given 'operator delete' function upon
1094 /// abnormal exit from a new expression when the new expression is
1095 /// conditional.
1096 class CallDeleteDuringConditionalNew : public EHScopeStack::Cleanup {
1097 size_t NumPlacementArgs;
1098 const FunctionDecl *OperatorDelete;
1099 DominatingValue<RValue>::saved_type Ptr;
1100 DominatingValue<RValue>::saved_type AllocSize;
1101
getPlacementArgs()1102 DominatingValue<RValue>::saved_type *getPlacementArgs() {
1103 return reinterpret_cast<DominatingValue<RValue>::saved_type*>(this+1);
1104 }
1105
1106 public:
getExtraSize(size_t NumPlacementArgs)1107 static size_t getExtraSize(size_t NumPlacementArgs) {
1108 return NumPlacementArgs * sizeof(DominatingValue<RValue>::saved_type);
1109 }
1110
CallDeleteDuringConditionalNew(size_t NumPlacementArgs,const FunctionDecl * OperatorDelete,DominatingValue<RValue>::saved_type Ptr,DominatingValue<RValue>::saved_type AllocSize)1111 CallDeleteDuringConditionalNew(size_t NumPlacementArgs,
1112 const FunctionDecl *OperatorDelete,
1113 DominatingValue<RValue>::saved_type Ptr,
1114 DominatingValue<RValue>::saved_type AllocSize)
1115 : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
1116 Ptr(Ptr), AllocSize(AllocSize) {}
1117
setPlacementArg(unsigned I,DominatingValue<RValue>::saved_type Arg)1118 void setPlacementArg(unsigned I, DominatingValue<RValue>::saved_type Arg) {
1119 assert(I < NumPlacementArgs && "index out of range");
1120 getPlacementArgs()[I] = Arg;
1121 }
1122
Emit(CodeGenFunction & CGF,Flags flags)1123 void Emit(CodeGenFunction &CGF, Flags flags) {
1124 const FunctionProtoType *FPT
1125 = OperatorDelete->getType()->getAs<FunctionProtoType>();
1126 assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
1127 (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
1128
1129 CallArgList DeleteArgs;
1130
1131 // The first argument is always a void*.
1132 FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
1133 DeleteArgs.add(Ptr.restore(CGF), *AI++);
1134
1135 // A member 'operator delete' can take an extra 'size_t' argument.
1136 if (FPT->getNumArgs() == NumPlacementArgs + 2) {
1137 RValue RV = AllocSize.restore(CGF);
1138 DeleteArgs.add(RV, *AI++);
1139 }
1140
1141 // Pass the rest of the arguments, which must match exactly.
1142 for (unsigned I = 0; I != NumPlacementArgs; ++I) {
1143 RValue RV = getPlacementArgs()[I].restore(CGF);
1144 DeleteArgs.add(RV, *AI++);
1145 }
1146
1147 // Call 'operator delete'.
1148 EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs);
1149 }
1150 };
1151 }
1152
1153 /// Enter a cleanup to call 'operator delete' if the initializer in a
1154 /// new-expression throws.
EnterNewDeleteCleanup(CodeGenFunction & CGF,const CXXNewExpr * E,llvm::Value * NewPtr,llvm::Value * AllocSize,const CallArgList & NewArgs)1155 static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
1156 const CXXNewExpr *E,
1157 llvm::Value *NewPtr,
1158 llvm::Value *AllocSize,
1159 const CallArgList &NewArgs) {
1160 // If we're not inside a conditional branch, then the cleanup will
1161 // dominate and we can do the easier (and more efficient) thing.
1162 if (!CGF.isInConditionalBranch()) {
1163 CallDeleteDuringNew *Cleanup = CGF.EHStack
1164 .pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup,
1165 E->getNumPlacementArgs(),
1166 E->getOperatorDelete(),
1167 NewPtr, AllocSize);
1168 for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
1169 Cleanup->setPlacementArg(I, NewArgs[I+1].RV);
1170
1171 return;
1172 }
1173
1174 // Otherwise, we need to save all this stuff.
1175 DominatingValue<RValue>::saved_type SavedNewPtr =
1176 DominatingValue<RValue>::save(CGF, RValue::get(NewPtr));
1177 DominatingValue<RValue>::saved_type SavedAllocSize =
1178 DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
1179
1180 CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack
1181 .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(EHCleanup,
1182 E->getNumPlacementArgs(),
1183 E->getOperatorDelete(),
1184 SavedNewPtr,
1185 SavedAllocSize);
1186 for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
1187 Cleanup->setPlacementArg(I,
1188 DominatingValue<RValue>::save(CGF, NewArgs[I+1].RV));
1189
1190 CGF.initFullExprCleanup();
1191 }
1192
EmitCXXNewExpr(const CXXNewExpr * E)1193 llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
1194 // The element type being allocated.
1195 QualType allocType = getContext().getBaseElementType(E->getAllocatedType());
1196
1197 // 1. Build a call to the allocation function.
1198 FunctionDecl *allocator = E->getOperatorNew();
1199 const FunctionProtoType *allocatorType =
1200 allocator->getType()->castAs<FunctionProtoType>();
1201
1202 CallArgList allocatorArgs;
1203
1204 // The allocation size is the first argument.
1205 QualType sizeType = getContext().getSizeType();
1206
1207 // If there is a brace-initializer, cannot allocate fewer elements than inits.
1208 unsigned minElements = 0;
1209 if (E->isArray() && E->hasInitializer()) {
1210 if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E->getInitializer()))
1211 minElements = ILE->getNumInits();
1212 }
1213
1214 llvm::Value *numElements = 0;
1215 llvm::Value *allocSizeWithoutCookie = 0;
1216 llvm::Value *allocSize =
1217 EmitCXXNewAllocSize(*this, E, minElements, numElements,
1218 allocSizeWithoutCookie);
1219
1220 allocatorArgs.add(RValue::get(allocSize), sizeType);
1221
1222 // Emit the rest of the arguments.
1223 // FIXME: Ideally, this should just use EmitCallArgs.
1224 CXXNewExpr::const_arg_iterator placementArg = E->placement_arg_begin();
1225
1226 // First, use the types from the function type.
1227 // We start at 1 here because the first argument (the allocation size)
1228 // has already been emitted.
1229 for (unsigned i = 1, e = allocatorType->getNumArgs(); i != e;
1230 ++i, ++placementArg) {
1231 QualType argType = allocatorType->getArgType(i);
1232
1233 assert(getContext().hasSameUnqualifiedType(argType.getNonReferenceType(),
1234 placementArg->getType()) &&
1235 "type mismatch in call argument!");
1236
1237 EmitCallArg(allocatorArgs, *placementArg, argType);
1238 }
1239
1240 // Either we've emitted all the call args, or we have a call to a
1241 // variadic function.
1242 assert((placementArg == E->placement_arg_end() ||
1243 allocatorType->isVariadic()) &&
1244 "Extra arguments to non-variadic function!");
1245
1246 // If we still have any arguments, emit them using the type of the argument.
1247 for (CXXNewExpr::const_arg_iterator placementArgsEnd = E->placement_arg_end();
1248 placementArg != placementArgsEnd; ++placementArg) {
1249 EmitCallArg(allocatorArgs, *placementArg, placementArg->getType());
1250 }
1251
1252 // Emit the allocation call. If the allocator is a global placement
1253 // operator, just "inline" it directly.
1254 RValue RV;
1255 if (allocator->isReservedGlobalPlacementOperator()) {
1256 assert(allocatorArgs.size() == 2);
1257 RV = allocatorArgs[1].RV;
1258 // TODO: kill any unnecessary computations done for the size
1259 // argument.
1260 } else {
1261 RV = EmitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs);
1262 }
1263
1264 // Emit a null check on the allocation result if the allocation
1265 // function is allowed to return null (because it has a non-throwing
1266 // exception spec; for this part, we inline
1267 // CXXNewExpr::shouldNullCheckAllocation()) and we have an
1268 // interesting initializer.
1269 bool nullCheck = allocatorType->isNothrow(getContext()) &&
1270 (!allocType.isPODType(getContext()) || E->hasInitializer());
1271
1272 llvm::BasicBlock *nullCheckBB = 0;
1273 llvm::BasicBlock *contBB = 0;
1274
1275 llvm::Value *allocation = RV.getScalarVal();
1276 unsigned AS = allocation->getType()->getPointerAddressSpace();
1277
1278 // The null-check means that the initializer is conditionally
1279 // evaluated.
1280 ConditionalEvaluation conditional(*this);
1281
1282 if (nullCheck) {
1283 conditional.begin(*this);
1284
1285 nullCheckBB = Builder.GetInsertBlock();
1286 llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull");
1287 contBB = createBasicBlock("new.cont");
1288
1289 llvm::Value *isNull = Builder.CreateIsNull(allocation, "new.isnull");
1290 Builder.CreateCondBr(isNull, contBB, notNullBB);
1291 EmitBlock(notNullBB);
1292 }
1293
1294 // If there's an operator delete, enter a cleanup to call it if an
1295 // exception is thrown.
1296 EHScopeStack::stable_iterator operatorDeleteCleanup;
1297 llvm::Instruction *cleanupDominator = 0;
1298 if (E->getOperatorDelete() &&
1299 !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
1300 EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocatorArgs);
1301 operatorDeleteCleanup = EHStack.stable_begin();
1302 cleanupDominator = Builder.CreateUnreachable();
1303 }
1304
1305 assert((allocSize == allocSizeWithoutCookie) ==
1306 CalculateCookiePadding(*this, E).isZero());
1307 if (allocSize != allocSizeWithoutCookie) {
1308 assert(E->isArray());
1309 allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation,
1310 numElements,
1311 E, allocType);
1312 }
1313
1314 llvm::Type *elementPtrTy
1315 = ConvertTypeForMem(allocType)->getPointerTo(AS);
1316 llvm::Value *result = Builder.CreateBitCast(allocation, elementPtrTy);
1317
1318 EmitNewInitializer(*this, E, allocType, result, numElements,
1319 allocSizeWithoutCookie);
1320 if (E->isArray()) {
1321 // NewPtr is a pointer to the base element type. If we're
1322 // allocating an array of arrays, we'll need to cast back to the
1323 // array pointer type.
1324 llvm::Type *resultType = ConvertTypeForMem(E->getType());
1325 if (result->getType() != resultType)
1326 result = Builder.CreateBitCast(result, resultType);
1327 }
1328
1329 // Deactivate the 'operator delete' cleanup if we finished
1330 // initialization.
1331 if (operatorDeleteCleanup.isValid()) {
1332 DeactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator);
1333 cleanupDominator->eraseFromParent();
1334 }
1335
1336 if (nullCheck) {
1337 conditional.end(*this);
1338
1339 llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
1340 EmitBlock(contBB);
1341
1342 llvm::PHINode *PHI = Builder.CreatePHI(result->getType(), 2);
1343 PHI->addIncoming(result, notNullBB);
1344 PHI->addIncoming(llvm::Constant::getNullValue(result->getType()),
1345 nullCheckBB);
1346
1347 result = PHI;
1348 }
1349
1350 return result;
1351 }
1352
EmitDeleteCall(const FunctionDecl * DeleteFD,llvm::Value * Ptr,QualType DeleteTy)1353 void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
1354 llvm::Value *Ptr,
1355 QualType DeleteTy) {
1356 assert(DeleteFD->getOverloadedOperator() == OO_Delete);
1357
1358 const FunctionProtoType *DeleteFTy =
1359 DeleteFD->getType()->getAs<FunctionProtoType>();
1360
1361 CallArgList DeleteArgs;
1362
1363 // Check if we need to pass the size to the delete operator.
1364 llvm::Value *Size = 0;
1365 QualType SizeTy;
1366 if (DeleteFTy->getNumArgs() == 2) {
1367 SizeTy = DeleteFTy->getArgType(1);
1368 CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
1369 Size = llvm::ConstantInt::get(ConvertType(SizeTy),
1370 DeleteTypeSize.getQuantity());
1371 }
1372
1373 QualType ArgTy = DeleteFTy->getArgType(0);
1374 llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
1375 DeleteArgs.add(RValue::get(DeletePtr), ArgTy);
1376
1377 if (Size)
1378 DeleteArgs.add(RValue::get(Size), SizeTy);
1379
1380 // Emit the call to delete.
1381 EmitNewDeleteCall(*this, DeleteFD, DeleteFTy, DeleteArgs);
1382 }
1383
1384 namespace {
1385 /// Calls the given 'operator delete' on a single object.
1386 struct CallObjectDelete : EHScopeStack::Cleanup {
1387 llvm::Value *Ptr;
1388 const FunctionDecl *OperatorDelete;
1389 QualType ElementType;
1390
CallObjectDelete__anonb4bde6260211::CallObjectDelete1391 CallObjectDelete(llvm::Value *Ptr,
1392 const FunctionDecl *OperatorDelete,
1393 QualType ElementType)
1394 : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
1395
Emit__anonb4bde6260211::CallObjectDelete1396 void Emit(CodeGenFunction &CGF, Flags flags) {
1397 CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
1398 }
1399 };
1400 }
1401
1402 /// Emit the code for deleting a single object.
EmitObjectDelete(CodeGenFunction & CGF,const FunctionDecl * OperatorDelete,llvm::Value * Ptr,QualType ElementType,bool UseGlobalDelete)1403 static void EmitObjectDelete(CodeGenFunction &CGF,
1404 const FunctionDecl *OperatorDelete,
1405 llvm::Value *Ptr,
1406 QualType ElementType,
1407 bool UseGlobalDelete) {
1408 // Find the destructor for the type, if applicable. If the
1409 // destructor is virtual, we'll just emit the vcall and return.
1410 const CXXDestructorDecl *Dtor = 0;
1411 if (const RecordType *RT = ElementType->getAs<RecordType>()) {
1412 CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1413 if (RD->hasDefinition() && !RD->hasTrivialDestructor()) {
1414 Dtor = RD->getDestructor();
1415
1416 if (Dtor->isVirtual()) {
1417 if (UseGlobalDelete) {
1418 // If we're supposed to call the global delete, make sure we do so
1419 // even if the destructor throws.
1420
1421 // Derive the complete-object pointer, which is what we need
1422 // to pass to the deallocation function.
1423 llvm::Value *completePtr =
1424 CGF.CGM.getCXXABI().adjustToCompleteObject(CGF, Ptr, ElementType);
1425
1426 CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1427 completePtr, OperatorDelete,
1428 ElementType);
1429 }
1430
1431 // FIXME: Provide a source location here.
1432 CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
1433 CGF.CGM.getCXXABI().EmitVirtualDestructorCall(CGF, Dtor, DtorType,
1434 SourceLocation(), Ptr);
1435
1436 if (UseGlobalDelete) {
1437 CGF.PopCleanupBlock();
1438 }
1439
1440 return;
1441 }
1442 }
1443 }
1444
1445 // Make sure that we call delete even if the dtor throws.
1446 // This doesn't have to a conditional cleanup because we're going
1447 // to pop it off in a second.
1448 CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1449 Ptr, OperatorDelete, ElementType);
1450
1451 if (Dtor)
1452 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
1453 /*ForVirtualBase=*/false,
1454 /*Delegating=*/false,
1455 Ptr);
1456 else if (CGF.getLangOpts().ObjCAutoRefCount &&
1457 ElementType->isObjCLifetimeType()) {
1458 switch (ElementType.getObjCLifetime()) {
1459 case Qualifiers::OCL_None:
1460 case Qualifiers::OCL_ExplicitNone:
1461 case Qualifiers::OCL_Autoreleasing:
1462 break;
1463
1464 case Qualifiers::OCL_Strong: {
1465 // Load the pointer value.
1466 llvm::Value *PtrValue = CGF.Builder.CreateLoad(Ptr,
1467 ElementType.isVolatileQualified());
1468
1469 CGF.EmitARCRelease(PtrValue, ARCPreciseLifetime);
1470 break;
1471 }
1472
1473 case Qualifiers::OCL_Weak:
1474 CGF.EmitARCDestroyWeak(Ptr);
1475 break;
1476 }
1477 }
1478
1479 CGF.PopCleanupBlock();
1480 }
1481
1482 namespace {
1483 /// Calls the given 'operator delete' on an array of objects.
1484 struct CallArrayDelete : EHScopeStack::Cleanup {
1485 llvm::Value *Ptr;
1486 const FunctionDecl *OperatorDelete;
1487 llvm::Value *NumElements;
1488 QualType ElementType;
1489 CharUnits CookieSize;
1490
CallArrayDelete__anonb4bde6260311::CallArrayDelete1491 CallArrayDelete(llvm::Value *Ptr,
1492 const FunctionDecl *OperatorDelete,
1493 llvm::Value *NumElements,
1494 QualType ElementType,
1495 CharUnits CookieSize)
1496 : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
1497 ElementType(ElementType), CookieSize(CookieSize) {}
1498
Emit__anonb4bde6260311::CallArrayDelete1499 void Emit(CodeGenFunction &CGF, Flags flags) {
1500 const FunctionProtoType *DeleteFTy =
1501 OperatorDelete->getType()->getAs<FunctionProtoType>();
1502 assert(DeleteFTy->getNumArgs() == 1 || DeleteFTy->getNumArgs() == 2);
1503
1504 CallArgList Args;
1505
1506 // Pass the pointer as the first argument.
1507 QualType VoidPtrTy = DeleteFTy->getArgType(0);
1508 llvm::Value *DeletePtr
1509 = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy));
1510 Args.add(RValue::get(DeletePtr), VoidPtrTy);
1511
1512 // Pass the original requested size as the second argument.
1513 if (DeleteFTy->getNumArgs() == 2) {
1514 QualType size_t = DeleteFTy->getArgType(1);
1515 llvm::IntegerType *SizeTy
1516 = cast<llvm::IntegerType>(CGF.ConvertType(size_t));
1517
1518 CharUnits ElementTypeSize =
1519 CGF.CGM.getContext().getTypeSizeInChars(ElementType);
1520
1521 // The size of an element, multiplied by the number of elements.
1522 llvm::Value *Size
1523 = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity());
1524 Size = CGF.Builder.CreateMul(Size, NumElements);
1525
1526 // Plus the size of the cookie if applicable.
1527 if (!CookieSize.isZero()) {
1528 llvm::Value *CookieSizeV
1529 = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
1530 Size = CGF.Builder.CreateAdd(Size, CookieSizeV);
1531 }
1532
1533 Args.add(RValue::get(Size), size_t);
1534 }
1535
1536 // Emit the call to delete.
1537 EmitNewDeleteCall(CGF, OperatorDelete, DeleteFTy, Args);
1538 }
1539 };
1540 }
1541
1542 /// Emit the code for deleting an array of objects.
EmitArrayDelete(CodeGenFunction & CGF,const CXXDeleteExpr * E,llvm::Value * deletedPtr,QualType elementType)1543 static void EmitArrayDelete(CodeGenFunction &CGF,
1544 const CXXDeleteExpr *E,
1545 llvm::Value *deletedPtr,
1546 QualType elementType) {
1547 llvm::Value *numElements = 0;
1548 llvm::Value *allocatedPtr = 0;
1549 CharUnits cookieSize;
1550 CGF.CGM.getCXXABI().ReadArrayCookie(CGF, deletedPtr, E, elementType,
1551 numElements, allocatedPtr, cookieSize);
1552
1553 assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer");
1554
1555 // Make sure that we call delete even if one of the dtors throws.
1556 const FunctionDecl *operatorDelete = E->getOperatorDelete();
1557 CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
1558 allocatedPtr, operatorDelete,
1559 numElements, elementType,
1560 cookieSize);
1561
1562 // Destroy the elements.
1563 if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) {
1564 assert(numElements && "no element count for a type with a destructor!");
1565
1566 llvm::Value *arrayEnd =
1567 CGF.Builder.CreateInBoundsGEP(deletedPtr, numElements, "delete.end");
1568
1569 // Note that it is legal to allocate a zero-length array, and we
1570 // can never fold the check away because the length should always
1571 // come from a cookie.
1572 CGF.emitArrayDestroy(deletedPtr, arrayEnd, elementType,
1573 CGF.getDestroyer(dtorKind),
1574 /*checkZeroLength*/ true,
1575 CGF.needsEHCleanup(dtorKind));
1576 }
1577
1578 // Pop the cleanup block.
1579 CGF.PopCleanupBlock();
1580 }
1581
EmitCXXDeleteExpr(const CXXDeleteExpr * E)1582 void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
1583 const Expr *Arg = E->getArgument();
1584 llvm::Value *Ptr = EmitScalarExpr(Arg);
1585
1586 // Null check the pointer.
1587 llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
1588 llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
1589
1590 llvm::Value *IsNull = Builder.CreateIsNull(Ptr, "isnull");
1591
1592 Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
1593 EmitBlock(DeleteNotNull);
1594
1595 // We might be deleting a pointer to array. If so, GEP down to the
1596 // first non-array element.
1597 // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
1598 QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
1599 if (DeleteTy->isConstantArrayType()) {
1600 llvm::Value *Zero = Builder.getInt32(0);
1601 SmallVector<llvm::Value*,8> GEP;
1602
1603 GEP.push_back(Zero); // point at the outermost array
1604
1605 // For each layer of array type we're pointing at:
1606 while (const ConstantArrayType *Arr
1607 = getContext().getAsConstantArrayType(DeleteTy)) {
1608 // 1. Unpeel the array type.
1609 DeleteTy = Arr->getElementType();
1610
1611 // 2. GEP to the first element of the array.
1612 GEP.push_back(Zero);
1613 }
1614
1615 Ptr = Builder.CreateInBoundsGEP(Ptr, GEP, "del.first");
1616 }
1617
1618 assert(ConvertTypeForMem(DeleteTy) ==
1619 cast<llvm::PointerType>(Ptr->getType())->getElementType());
1620
1621 if (E->isArrayForm()) {
1622 EmitArrayDelete(*this, E, Ptr, DeleteTy);
1623 } else {
1624 EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy,
1625 E->isGlobalDelete());
1626 }
1627
1628 EmitBlock(DeleteEnd);
1629 }
1630
getBadTypeidFn(CodeGenFunction & CGF)1631 static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) {
1632 // void __cxa_bad_typeid();
1633 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1634
1635 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1636 }
1637
EmitBadTypeidCall(CodeGenFunction & CGF)1638 static void EmitBadTypeidCall(CodeGenFunction &CGF) {
1639 llvm::Value *Fn = getBadTypeidFn(CGF);
1640 CGF.EmitRuntimeCallOrInvoke(Fn).setDoesNotReturn();
1641 CGF.Builder.CreateUnreachable();
1642 }
1643
EmitTypeidFromVTable(CodeGenFunction & CGF,const Expr * E,llvm::Type * StdTypeInfoPtrTy)1644 static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF,
1645 const Expr *E,
1646 llvm::Type *StdTypeInfoPtrTy) {
1647 // Get the vtable pointer.
1648 llvm::Value *ThisPtr = CGF.EmitLValue(E).getAddress();
1649
1650 // C++ [expr.typeid]p2:
1651 // If the glvalue expression is obtained by applying the unary * operator to
1652 // a pointer and the pointer is a null pointer value, the typeid expression
1653 // throws the std::bad_typeid exception.
1654 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E->IgnoreParens())) {
1655 if (UO->getOpcode() == UO_Deref) {
1656 llvm::BasicBlock *BadTypeidBlock =
1657 CGF.createBasicBlock("typeid.bad_typeid");
1658 llvm::BasicBlock *EndBlock =
1659 CGF.createBasicBlock("typeid.end");
1660
1661 llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr);
1662 CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock);
1663
1664 CGF.EmitBlock(BadTypeidBlock);
1665 EmitBadTypeidCall(CGF);
1666 CGF.EmitBlock(EndBlock);
1667 }
1668 }
1669
1670 llvm::Value *Value = CGF.GetVTablePtr(ThisPtr,
1671 StdTypeInfoPtrTy->getPointerTo());
1672
1673 // Load the type info.
1674 Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
1675 return CGF.Builder.CreateLoad(Value);
1676 }
1677
EmitCXXTypeidExpr(const CXXTypeidExpr * E)1678 llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
1679 llvm::Type *StdTypeInfoPtrTy =
1680 ConvertType(E->getType())->getPointerTo();
1681
1682 if (E->isTypeOperand()) {
1683 llvm::Constant *TypeInfo =
1684 CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand());
1685 return Builder.CreateBitCast(TypeInfo, StdTypeInfoPtrTy);
1686 }
1687
1688 // C++ [expr.typeid]p2:
1689 // When typeid is applied to a glvalue expression whose type is a
1690 // polymorphic class type, the result refers to a std::type_info object
1691 // representing the type of the most derived object (that is, the dynamic
1692 // type) to which the glvalue refers.
1693 if (E->isPotentiallyEvaluated())
1694 return EmitTypeidFromVTable(*this, E->getExprOperand(),
1695 StdTypeInfoPtrTy);
1696
1697 QualType OperandTy = E->getExprOperand()->getType();
1698 return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy),
1699 StdTypeInfoPtrTy);
1700 }
1701
getDynamicCastFn(CodeGenFunction & CGF)1702 static llvm::Constant *getDynamicCastFn(CodeGenFunction &CGF) {
1703 // void *__dynamic_cast(const void *sub,
1704 // const abi::__class_type_info *src,
1705 // const abi::__class_type_info *dst,
1706 // std::ptrdiff_t src2dst_offset);
1707
1708 llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1709 llvm::Type *PtrDiffTy =
1710 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1711
1712 llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
1713
1714 llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
1715
1716 // Mark the function as nounwind readonly.
1717 llvm::Attribute::AttrKind FuncAttrs[] = { llvm::Attribute::NoUnwind,
1718 llvm::Attribute::ReadOnly };
1719 llvm::AttributeSet Attrs = llvm::AttributeSet::get(
1720 CGF.getLLVMContext(), llvm::AttributeSet::FunctionIndex, FuncAttrs);
1721
1722 return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs);
1723 }
1724
getBadCastFn(CodeGenFunction & CGF)1725 static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) {
1726 // void __cxa_bad_cast();
1727 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1728 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1729 }
1730
EmitBadCastCall(CodeGenFunction & CGF)1731 static void EmitBadCastCall(CodeGenFunction &CGF) {
1732 llvm::Value *Fn = getBadCastFn(CGF);
1733 CGF.EmitRuntimeCallOrInvoke(Fn).setDoesNotReturn();
1734 CGF.Builder.CreateUnreachable();
1735 }
1736
1737 /// \brief Compute the src2dst_offset hint as described in the
1738 /// Itanium C++ ABI [2.9.7]
computeOffsetHint(ASTContext & Context,const CXXRecordDecl * Src,const CXXRecordDecl * Dst)1739 static CharUnits computeOffsetHint(ASTContext &Context,
1740 const CXXRecordDecl *Src,
1741 const CXXRecordDecl *Dst) {
1742 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1743 /*DetectVirtual=*/false);
1744
1745 // If Dst is not derived from Src we can skip the whole computation below and
1746 // return that Src is not a public base of Dst. Record all inheritance paths.
1747 if (!Dst->isDerivedFrom(Src, Paths))
1748 return CharUnits::fromQuantity(-2ULL);
1749
1750 unsigned NumPublicPaths = 0;
1751 CharUnits Offset;
1752
1753 // Now walk all possible inheritance paths.
1754 for (CXXBasePaths::paths_iterator I = Paths.begin(), E = Paths.end();
1755 I != E; ++I) {
1756 if (I->Access != AS_public) // Ignore non-public inheritance.
1757 continue;
1758
1759 ++NumPublicPaths;
1760
1761 for (CXXBasePath::iterator J = I->begin(), JE = I->end(); J != JE; ++J) {
1762 // If the path contains a virtual base class we can't give any hint.
1763 // -1: no hint.
1764 if (J->Base->isVirtual())
1765 return CharUnits::fromQuantity(-1ULL);
1766
1767 if (NumPublicPaths > 1) // Won't use offsets, skip computation.
1768 continue;
1769
1770 // Accumulate the base class offsets.
1771 const ASTRecordLayout &L = Context.getASTRecordLayout(J->Class);
1772 Offset += L.getBaseClassOffset(J->Base->getType()->getAsCXXRecordDecl());
1773 }
1774 }
1775
1776 // -2: Src is not a public base of Dst.
1777 if (NumPublicPaths == 0)
1778 return CharUnits::fromQuantity(-2ULL);
1779
1780 // -3: Src is a multiple public base type but never a virtual base type.
1781 if (NumPublicPaths > 1)
1782 return CharUnits::fromQuantity(-3ULL);
1783
1784 // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
1785 // Return the offset of Src from the origin of Dst.
1786 return Offset;
1787 }
1788
1789 static llvm::Value *
EmitDynamicCastCall(CodeGenFunction & CGF,llvm::Value * Value,QualType SrcTy,QualType DestTy,llvm::BasicBlock * CastEnd)1790 EmitDynamicCastCall(CodeGenFunction &CGF, llvm::Value *Value,
1791 QualType SrcTy, QualType DestTy,
1792 llvm::BasicBlock *CastEnd) {
1793 llvm::Type *PtrDiffLTy =
1794 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1795 llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1796
1797 if (const PointerType *PTy = DestTy->getAs<PointerType>()) {
1798 if (PTy->getPointeeType()->isVoidType()) {
1799 // C++ [expr.dynamic.cast]p7:
1800 // If T is "pointer to cv void," then the result is a pointer to the
1801 // most derived object pointed to by v.
1802
1803 // Get the vtable pointer.
1804 llvm::Value *VTable = CGF.GetVTablePtr(Value, PtrDiffLTy->getPointerTo());
1805
1806 // Get the offset-to-top from the vtable.
1807 llvm::Value *OffsetToTop =
1808 CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
1809 OffsetToTop = CGF.Builder.CreateLoad(OffsetToTop, "offset.to.top");
1810
1811 // Finally, add the offset to the pointer.
1812 Value = CGF.EmitCastToVoidPtr(Value);
1813 Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop);
1814
1815 return CGF.Builder.CreateBitCast(Value, DestLTy);
1816 }
1817 }
1818
1819 QualType SrcRecordTy;
1820 QualType DestRecordTy;
1821
1822 if (const PointerType *DestPTy = DestTy->getAs<PointerType>()) {
1823 SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType();
1824 DestRecordTy = DestPTy->getPointeeType();
1825 } else {
1826 SrcRecordTy = SrcTy;
1827 DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType();
1828 }
1829
1830 assert(SrcRecordTy->isRecordType() && "source type must be a record type!");
1831 assert(DestRecordTy->isRecordType() && "dest type must be a record type!");
1832
1833 llvm::Value *SrcRTTI =
1834 CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
1835 llvm::Value *DestRTTI =
1836 CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
1837
1838 // Compute the offset hint.
1839 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1840 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1841 llvm::Value *OffsetHint =
1842 llvm::ConstantInt::get(PtrDiffLTy,
1843 computeOffsetHint(CGF.getContext(), SrcDecl,
1844 DestDecl).getQuantity());
1845
1846 // Emit the call to __dynamic_cast.
1847 Value = CGF.EmitCastToVoidPtr(Value);
1848
1849 llvm::Value *args[] = { Value, SrcRTTI, DestRTTI, OffsetHint };
1850 Value = CGF.EmitNounwindRuntimeCall(getDynamicCastFn(CGF), args);
1851 Value = CGF.Builder.CreateBitCast(Value, DestLTy);
1852
1853 /// C++ [expr.dynamic.cast]p9:
1854 /// A failed cast to reference type throws std::bad_cast
1855 if (DestTy->isReferenceType()) {
1856 llvm::BasicBlock *BadCastBlock =
1857 CGF.createBasicBlock("dynamic_cast.bad_cast");
1858
1859 llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1860 CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1861
1862 CGF.EmitBlock(BadCastBlock);
1863 EmitBadCastCall(CGF);
1864 }
1865
1866 return Value;
1867 }
1868
EmitDynamicCastToNull(CodeGenFunction & CGF,QualType DestTy)1869 static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
1870 QualType DestTy) {
1871 llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1872 if (DestTy->isPointerType())
1873 return llvm::Constant::getNullValue(DestLTy);
1874
1875 /// C++ [expr.dynamic.cast]p9:
1876 /// A failed cast to reference type throws std::bad_cast
1877 EmitBadCastCall(CGF);
1878
1879 CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end"));
1880 return llvm::UndefValue::get(DestLTy);
1881 }
1882
EmitDynamicCast(llvm::Value * Value,const CXXDynamicCastExpr * DCE)1883 llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *Value,
1884 const CXXDynamicCastExpr *DCE) {
1885 QualType DestTy = DCE->getTypeAsWritten();
1886
1887 if (DCE->isAlwaysNull())
1888 return EmitDynamicCastToNull(*this, DestTy);
1889
1890 QualType SrcTy = DCE->getSubExpr()->getType();
1891
1892 // C++ [expr.dynamic.cast]p4:
1893 // If the value of v is a null pointer value in the pointer case, the result
1894 // is the null pointer value of type T.
1895 bool ShouldNullCheckSrcValue = SrcTy->isPointerType();
1896
1897 llvm::BasicBlock *CastNull = 0;
1898 llvm::BasicBlock *CastNotNull = 0;
1899 llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end");
1900
1901 if (ShouldNullCheckSrcValue) {
1902 CastNull = createBasicBlock("dynamic_cast.null");
1903 CastNotNull = createBasicBlock("dynamic_cast.notnull");
1904
1905 llvm::Value *IsNull = Builder.CreateIsNull(Value);
1906 Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
1907 EmitBlock(CastNotNull);
1908 }
1909
1910 Value = EmitDynamicCastCall(*this, Value, SrcTy, DestTy, CastEnd);
1911
1912 if (ShouldNullCheckSrcValue) {
1913 EmitBranch(CastEnd);
1914
1915 EmitBlock(CastNull);
1916 EmitBranch(CastEnd);
1917 }
1918
1919 EmitBlock(CastEnd);
1920
1921 if (ShouldNullCheckSrcValue) {
1922 llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
1923 PHI->addIncoming(Value, CastNotNull);
1924 PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull);
1925
1926 Value = PHI;
1927 }
1928
1929 return Value;
1930 }
1931
EmitLambdaExpr(const LambdaExpr * E,AggValueSlot Slot)1932 void CodeGenFunction::EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Slot) {
1933 RunCleanupsScope Scope(*this);
1934 LValue SlotLV = MakeAddrLValue(Slot.getAddr(), E->getType(),
1935 Slot.getAlignment());
1936
1937 CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
1938 for (LambdaExpr::capture_init_iterator i = E->capture_init_begin(),
1939 e = E->capture_init_end();
1940 i != e; ++i, ++CurField) {
1941 // Emit initialization
1942
1943 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
1944 ArrayRef<VarDecl *> ArrayIndexes;
1945 if (CurField->getType()->isArrayType())
1946 ArrayIndexes = E->getCaptureInitIndexVars(i);
1947 EmitInitializerForField(*CurField, LV, *i, ArrayIndexes);
1948 }
1949 }
1950