1 //===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code dealing with code generation of C++ expressions
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "clang/Frontend/CodeGenOptions.h"
15 #include "CodeGenFunction.h"
16 #include "CGCUDARuntime.h"
17 #include "CGCXXABI.h"
18 #include "CGObjCRuntime.h"
19 #include "CGDebugInfo.h"
20 #include "llvm/Intrinsics.h"
21 #include "llvm/Support/CallSite.h"
22
23 using namespace clang;
24 using namespace CodeGen;
25
EmitCXXMemberCall(const CXXMethodDecl * MD,llvm::Value * Callee,ReturnValueSlot ReturnValue,llvm::Value * This,llvm::Value * VTT,CallExpr::const_arg_iterator ArgBeg,CallExpr::const_arg_iterator ArgEnd)26 RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
27 llvm::Value *Callee,
28 ReturnValueSlot ReturnValue,
29 llvm::Value *This,
30 llvm::Value *VTT,
31 CallExpr::const_arg_iterator ArgBeg,
32 CallExpr::const_arg_iterator ArgEnd) {
33 assert(MD->isInstance() &&
34 "Trying to emit a member call expr on a static method!");
35
36 CallArgList Args;
37
38 // Push the this ptr.
39 Args.add(RValue::get(This), MD->getThisType(getContext()));
40
41 // If there is a VTT parameter, emit it.
42 if (VTT) {
43 QualType T = getContext().getPointerType(getContext().VoidPtrTy);
44 Args.add(RValue::get(VTT), T);
45 }
46
47 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
48 RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size());
49
50 // And the rest of the call args.
51 EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
52
53 return EmitCall(CGM.getTypes().arrangeFunctionCall(FPT->getResultType(), Args,
54 FPT->getExtInfo(),
55 required),
56 Callee, ReturnValue, Args, MD);
57 }
58
getMostDerivedClassDecl(const Expr * Base)59 static const CXXRecordDecl *getMostDerivedClassDecl(const Expr *Base) {
60 const Expr *E = Base;
61
62 while (true) {
63 E = E->IgnoreParens();
64 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
65 if (CE->getCastKind() == CK_DerivedToBase ||
66 CE->getCastKind() == CK_UncheckedDerivedToBase ||
67 CE->getCastKind() == CK_NoOp) {
68 E = CE->getSubExpr();
69 continue;
70 }
71 }
72
73 break;
74 }
75
76 QualType DerivedType = E->getType();
77 if (const PointerType *PTy = DerivedType->getAs<PointerType>())
78 DerivedType = PTy->getPointeeType();
79
80 return cast<CXXRecordDecl>(DerivedType->castAs<RecordType>()->getDecl());
81 }
82
83 // FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do
84 // quite what we want.
skipNoOpCastsAndParens(const Expr * E)85 static const Expr *skipNoOpCastsAndParens(const Expr *E) {
86 while (true) {
87 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
88 E = PE->getSubExpr();
89 continue;
90 }
91
92 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
93 if (CE->getCastKind() == CK_NoOp) {
94 E = CE->getSubExpr();
95 continue;
96 }
97 }
98 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
99 if (UO->getOpcode() == UO_Extension) {
100 E = UO->getSubExpr();
101 continue;
102 }
103 }
104 return E;
105 }
106 }
107
108 /// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given
109 /// expr can be devirtualized.
canDevirtualizeMemberFunctionCalls(ASTContext & Context,const Expr * Base,const CXXMethodDecl * MD)110 static bool canDevirtualizeMemberFunctionCalls(ASTContext &Context,
111 const Expr *Base,
112 const CXXMethodDecl *MD) {
113
114 // When building with -fapple-kext, all calls must go through the vtable since
115 // the kernel linker can do runtime patching of vtables.
116 if (Context.getLangOpts().AppleKext)
117 return false;
118
119 // If the most derived class is marked final, we know that no subclass can
120 // override this member function and so we can devirtualize it. For example:
121 //
122 // struct A { virtual void f(); }
123 // struct B final : A { };
124 //
125 // void f(B *b) {
126 // b->f();
127 // }
128 //
129 const CXXRecordDecl *MostDerivedClassDecl = getMostDerivedClassDecl(Base);
130 if (MostDerivedClassDecl->hasAttr<FinalAttr>())
131 return true;
132
133 // If the member function is marked 'final', we know that it can't be
134 // overridden and can therefore devirtualize it.
135 if (MD->hasAttr<FinalAttr>())
136 return true;
137
138 // Similarly, if the class itself is marked 'final' it can't be overridden
139 // and we can therefore devirtualize the member function call.
140 if (MD->getParent()->hasAttr<FinalAttr>())
141 return true;
142
143 Base = skipNoOpCastsAndParens(Base);
144 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
145 if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
146 // This is a record decl. We know the type and can devirtualize it.
147 return VD->getType()->isRecordType();
148 }
149
150 return false;
151 }
152
153 // We can always devirtualize calls on temporary object expressions.
154 if (isa<CXXConstructExpr>(Base))
155 return true;
156
157 // And calls on bound temporaries.
158 if (isa<CXXBindTemporaryExpr>(Base))
159 return true;
160
161 // Check if this is a call expr that returns a record type.
162 if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
163 return CE->getCallReturnType()->isRecordType();
164
165 // We can't devirtualize the call.
166 return false;
167 }
168
169 // Note: This function also emit constructor calls to support a MSVC
170 // extensions allowing explicit constructor function call.
EmitCXXMemberCallExpr(const CXXMemberCallExpr * CE,ReturnValueSlot ReturnValue)171 RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
172 ReturnValueSlot ReturnValue) {
173 const Expr *callee = CE->getCallee()->IgnoreParens();
174
175 if (isa<BinaryOperator>(callee))
176 return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
177
178 const MemberExpr *ME = cast<MemberExpr>(callee);
179 const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
180
181 CGDebugInfo *DI = getDebugInfo();
182 if (DI && CGM.getCodeGenOpts().LimitDebugInfo
183 && !isa<CallExpr>(ME->getBase())) {
184 QualType PQTy = ME->getBase()->IgnoreParenImpCasts()->getType();
185 if (const PointerType * PTy = dyn_cast<PointerType>(PQTy)) {
186 DI->getOrCreateRecordType(PTy->getPointeeType(),
187 MD->getParent()->getLocation());
188 }
189 }
190
191 if (MD->isStatic()) {
192 // The method is static, emit it as we would a regular call.
193 llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
194 return EmitCall(getContext().getPointerType(MD->getType()), Callee,
195 ReturnValue, CE->arg_begin(), CE->arg_end());
196 }
197
198 // Compute the object pointer.
199 llvm::Value *This;
200 if (ME->isArrow())
201 This = EmitScalarExpr(ME->getBase());
202 else
203 This = EmitLValue(ME->getBase()).getAddress();
204
205 if (MD->isTrivial()) {
206 if (isa<CXXDestructorDecl>(MD)) return RValue::get(0);
207 if (isa<CXXConstructorDecl>(MD) &&
208 cast<CXXConstructorDecl>(MD)->isDefaultConstructor())
209 return RValue::get(0);
210
211 if (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) {
212 // We don't like to generate the trivial copy/move assignment operator
213 // when it isn't necessary; just produce the proper effect here.
214 llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
215 EmitAggregateCopy(This, RHS, CE->getType());
216 return RValue::get(This);
217 }
218
219 if (isa<CXXConstructorDecl>(MD) &&
220 cast<CXXConstructorDecl>(MD)->isCopyOrMoveConstructor()) {
221 // Trivial move and copy ctor are the same.
222 llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
223 EmitSynthesizedCXXCopyCtorCall(cast<CXXConstructorDecl>(MD), This, RHS,
224 CE->arg_begin(), CE->arg_end());
225 return RValue::get(This);
226 }
227 llvm_unreachable("unknown trivial member function");
228 }
229
230 // Compute the function type we're calling.
231 const CGFunctionInfo *FInfo = 0;
232 if (isa<CXXDestructorDecl>(MD))
233 FInfo = &CGM.getTypes().arrangeCXXDestructor(cast<CXXDestructorDecl>(MD),
234 Dtor_Complete);
235 else if (isa<CXXConstructorDecl>(MD))
236 FInfo = &CGM.getTypes().arrangeCXXConstructorDeclaration(
237 cast<CXXConstructorDecl>(MD),
238 Ctor_Complete);
239 else
240 FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(MD);
241
242 llvm::Type *Ty = CGM.getTypes().GetFunctionType(*FInfo);
243
244 // C++ [class.virtual]p12:
245 // Explicit qualification with the scope operator (5.1) suppresses the
246 // virtual call mechanism.
247 //
248 // We also don't emit a virtual call if the base expression has a record type
249 // because then we know what the type is.
250 bool UseVirtualCall;
251 UseVirtualCall = MD->isVirtual() && !ME->hasQualifier()
252 && !canDevirtualizeMemberFunctionCalls(getContext(),
253 ME->getBase(), MD);
254 llvm::Value *Callee;
255 if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
256 if (UseVirtualCall) {
257 Callee = BuildVirtualCall(Dtor, Dtor_Complete, This, Ty);
258 } else {
259 if (getContext().getLangOpts().AppleKext &&
260 MD->isVirtual() &&
261 ME->hasQualifier())
262 Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
263 else
264 Callee = CGM.GetAddrOfFunction(GlobalDecl(Dtor, Dtor_Complete), Ty);
265 }
266 } else if (const CXXConstructorDecl *Ctor =
267 dyn_cast<CXXConstructorDecl>(MD)) {
268 Callee = CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty);
269 } else if (UseVirtualCall) {
270 Callee = BuildVirtualCall(MD, This, Ty);
271 } else {
272 if (getContext().getLangOpts().AppleKext &&
273 MD->isVirtual() &&
274 ME->hasQualifier())
275 Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
276 else
277 Callee = CGM.GetAddrOfFunction(MD, Ty);
278 }
279
280 return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
281 CE->arg_begin(), CE->arg_end());
282 }
283
284 RValue
EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr * E,ReturnValueSlot ReturnValue)285 CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
286 ReturnValueSlot ReturnValue) {
287 const BinaryOperator *BO =
288 cast<BinaryOperator>(E->getCallee()->IgnoreParens());
289 const Expr *BaseExpr = BO->getLHS();
290 const Expr *MemFnExpr = BO->getRHS();
291
292 const MemberPointerType *MPT =
293 MemFnExpr->getType()->castAs<MemberPointerType>();
294
295 const FunctionProtoType *FPT =
296 MPT->getPointeeType()->castAs<FunctionProtoType>();
297 const CXXRecordDecl *RD =
298 cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
299
300 // Get the member function pointer.
301 llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
302
303 // Emit the 'this' pointer.
304 llvm::Value *This;
305
306 if (BO->getOpcode() == BO_PtrMemI)
307 This = EmitScalarExpr(BaseExpr);
308 else
309 This = EmitLValue(BaseExpr).getAddress();
310
311 // Ask the ABI to load the callee. Note that This is modified.
312 llvm::Value *Callee =
313 CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, This, MemFnPtr, MPT);
314
315 CallArgList Args;
316
317 QualType ThisType =
318 getContext().getPointerType(getContext().getTagDeclType(RD));
319
320 // Push the this ptr.
321 Args.add(RValue::get(This), ThisType);
322
323 // And the rest of the call args
324 EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
325 return EmitCall(CGM.getTypes().arrangeFunctionCall(Args, FPT), Callee,
326 ReturnValue, Args);
327 }
328
329 RValue
EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr * E,const CXXMethodDecl * MD,ReturnValueSlot ReturnValue)330 CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
331 const CXXMethodDecl *MD,
332 ReturnValueSlot ReturnValue) {
333 assert(MD->isInstance() &&
334 "Trying to emit a member call expr on a static method!");
335 LValue LV = EmitLValue(E->getArg(0));
336 llvm::Value *This = LV.getAddress();
337
338 if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) &&
339 MD->isTrivial()) {
340 llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
341 QualType Ty = E->getType();
342 EmitAggregateCopy(This, Src, Ty);
343 return RValue::get(This);
344 }
345
346 llvm::Value *Callee = EmitCXXOperatorMemberCallee(E, MD, This);
347 return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
348 E->arg_begin() + 1, E->arg_end());
349 }
350
EmitCUDAKernelCallExpr(const CUDAKernelCallExpr * E,ReturnValueSlot ReturnValue)351 RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
352 ReturnValueSlot ReturnValue) {
353 return CGM.getCUDARuntime().EmitCUDAKernelCallExpr(*this, E, ReturnValue);
354 }
355
EmitNullBaseClassInitialization(CodeGenFunction & CGF,llvm::Value * DestPtr,const CXXRecordDecl * Base)356 static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
357 llvm::Value *DestPtr,
358 const CXXRecordDecl *Base) {
359 if (Base->isEmpty())
360 return;
361
362 DestPtr = CGF.EmitCastToVoidPtr(DestPtr);
363
364 const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base);
365 CharUnits Size = Layout.getNonVirtualSize();
366 CharUnits Align = Layout.getNonVirtualAlign();
367
368 llvm::Value *SizeVal = CGF.CGM.getSize(Size);
369
370 // If the type contains a pointer to data member we can't memset it to zero.
371 // Instead, create a null constant and copy it to the destination.
372 // TODO: there are other patterns besides zero that we can usefully memset,
373 // like -1, which happens to be the pattern used by member-pointers.
374 // TODO: isZeroInitializable can be over-conservative in the case where a
375 // virtual base contains a member pointer.
376 if (!CGF.CGM.getTypes().isZeroInitializable(Base)) {
377 llvm::Constant *NullConstant = CGF.CGM.EmitNullConstantForBase(Base);
378
379 llvm::GlobalVariable *NullVariable =
380 new llvm::GlobalVariable(CGF.CGM.getModule(), NullConstant->getType(),
381 /*isConstant=*/true,
382 llvm::GlobalVariable::PrivateLinkage,
383 NullConstant, Twine());
384 NullVariable->setAlignment(Align.getQuantity());
385 llvm::Value *SrcPtr = CGF.EmitCastToVoidPtr(NullVariable);
386
387 // Get and call the appropriate llvm.memcpy overload.
388 CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity());
389 return;
390 }
391
392 // Otherwise, just memset the whole thing to zero. This is legal
393 // because in LLVM, all default initializers (other than the ones we just
394 // handled above) are guaranteed to have a bit pattern of all zeros.
395 CGF.Builder.CreateMemSet(DestPtr, CGF.Builder.getInt8(0), SizeVal,
396 Align.getQuantity());
397 }
398
399 void
EmitCXXConstructExpr(const CXXConstructExpr * E,AggValueSlot Dest)400 CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
401 AggValueSlot Dest) {
402 assert(!Dest.isIgnored() && "Must have a destination!");
403 const CXXConstructorDecl *CD = E->getConstructor();
404
405 // If we require zero initialization before (or instead of) calling the
406 // constructor, as can be the case with a non-user-provided default
407 // constructor, emit the zero initialization now, unless destination is
408 // already zeroed.
409 if (E->requiresZeroInitialization() && !Dest.isZeroed()) {
410 switch (E->getConstructionKind()) {
411 case CXXConstructExpr::CK_Delegating:
412 assert(0 && "Delegating constructor should not need zeroing");
413 case CXXConstructExpr::CK_Complete:
414 EmitNullInitialization(Dest.getAddr(), E->getType());
415 break;
416 case CXXConstructExpr::CK_VirtualBase:
417 case CXXConstructExpr::CK_NonVirtualBase:
418 EmitNullBaseClassInitialization(*this, Dest.getAddr(), CD->getParent());
419 break;
420 }
421 }
422
423 // If this is a call to a trivial default constructor, do nothing.
424 if (CD->isTrivial() && CD->isDefaultConstructor())
425 return;
426
427 // Elide the constructor if we're constructing from a temporary.
428 // The temporary check is required because Sema sets this on NRVO
429 // returns.
430 if (getContext().getLangOpts().ElideConstructors && E->isElidable()) {
431 assert(getContext().hasSameUnqualifiedType(E->getType(),
432 E->getArg(0)->getType()));
433 if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
434 EmitAggExpr(E->getArg(0), Dest);
435 return;
436 }
437 }
438
439 if (const ConstantArrayType *arrayType
440 = getContext().getAsConstantArrayType(E->getType())) {
441 EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddr(),
442 E->arg_begin(), E->arg_end());
443 } else {
444 CXXCtorType Type = Ctor_Complete;
445 bool ForVirtualBase = false;
446
447 switch (E->getConstructionKind()) {
448 case CXXConstructExpr::CK_Delegating:
449 // We should be emitting a constructor; GlobalDecl will assert this
450 Type = CurGD.getCtorType();
451 break;
452
453 case CXXConstructExpr::CK_Complete:
454 Type = Ctor_Complete;
455 break;
456
457 case CXXConstructExpr::CK_VirtualBase:
458 ForVirtualBase = true;
459 // fall-through
460
461 case CXXConstructExpr::CK_NonVirtualBase:
462 Type = Ctor_Base;
463 }
464
465 // Call the constructor.
466 EmitCXXConstructorCall(CD, Type, ForVirtualBase, Dest.getAddr(),
467 E->arg_begin(), E->arg_end());
468 }
469 }
470
471 void
EmitSynthesizedCXXCopyCtor(llvm::Value * Dest,llvm::Value * Src,const Expr * Exp)472 CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest,
473 llvm::Value *Src,
474 const Expr *Exp) {
475 if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
476 Exp = E->getSubExpr();
477 assert(isa<CXXConstructExpr>(Exp) &&
478 "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
479 const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);
480 const CXXConstructorDecl *CD = E->getConstructor();
481 RunCleanupsScope Scope(*this);
482
483 // If we require zero initialization before (or instead of) calling the
484 // constructor, as can be the case with a non-user-provided default
485 // constructor, emit the zero initialization now.
486 // FIXME. Do I still need this for a copy ctor synthesis?
487 if (E->requiresZeroInitialization())
488 EmitNullInitialization(Dest, E->getType());
489
490 assert(!getContext().getAsConstantArrayType(E->getType())
491 && "EmitSynthesizedCXXCopyCtor - Copied-in Array");
492 EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src,
493 E->arg_begin(), E->arg_end());
494 }
495
CalculateCookiePadding(CodeGenFunction & CGF,const CXXNewExpr * E)496 static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
497 const CXXNewExpr *E) {
498 if (!E->isArray())
499 return CharUnits::Zero();
500
501 // No cookie is required if the operator new[] being used is the
502 // reserved placement operator new[].
503 if (E->getOperatorNew()->isReservedGlobalPlacementOperator())
504 return CharUnits::Zero();
505
506 return CGF.CGM.getCXXABI().GetArrayCookieSize(E);
507 }
508
EmitCXXNewAllocSize(CodeGenFunction & CGF,const CXXNewExpr * e,unsigned minElements,llvm::Value * & numElements,llvm::Value * & sizeWithoutCookie)509 static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
510 const CXXNewExpr *e,
511 unsigned minElements,
512 llvm::Value *&numElements,
513 llvm::Value *&sizeWithoutCookie) {
514 QualType type = e->getAllocatedType();
515
516 if (!e->isArray()) {
517 CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
518 sizeWithoutCookie
519 = llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity());
520 return sizeWithoutCookie;
521 }
522
523 // The width of size_t.
524 unsigned sizeWidth = CGF.SizeTy->getBitWidth();
525
526 // Figure out the cookie size.
527 llvm::APInt cookieSize(sizeWidth,
528 CalculateCookiePadding(CGF, e).getQuantity());
529
530 // Emit the array size expression.
531 // We multiply the size of all dimensions for NumElements.
532 // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
533 numElements = CGF.EmitScalarExpr(e->getArraySize());
534 assert(isa<llvm::IntegerType>(numElements->getType()));
535
536 // The number of elements can be have an arbitrary integer type;
537 // essentially, we need to multiply it by a constant factor, add a
538 // cookie size, and verify that the result is representable as a
539 // size_t. That's just a gloss, though, and it's wrong in one
540 // important way: if the count is negative, it's an error even if
541 // the cookie size would bring the total size >= 0.
542 bool isSigned
543 = e->getArraySize()->getType()->isSignedIntegerOrEnumerationType();
544 llvm::IntegerType *numElementsType
545 = cast<llvm::IntegerType>(numElements->getType());
546 unsigned numElementsWidth = numElementsType->getBitWidth();
547
548 // Compute the constant factor.
549 llvm::APInt arraySizeMultiplier(sizeWidth, 1);
550 while (const ConstantArrayType *CAT
551 = CGF.getContext().getAsConstantArrayType(type)) {
552 type = CAT->getElementType();
553 arraySizeMultiplier *= CAT->getSize();
554 }
555
556 CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
557 llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity());
558 typeSizeMultiplier *= arraySizeMultiplier;
559
560 // This will be a size_t.
561 llvm::Value *size;
562
563 // If someone is doing 'new int[42]' there is no need to do a dynamic check.
564 // Don't bloat the -O0 code.
565 if (llvm::ConstantInt *numElementsC =
566 dyn_cast<llvm::ConstantInt>(numElements)) {
567 const llvm::APInt &count = numElementsC->getValue();
568
569 bool hasAnyOverflow = false;
570
571 // If 'count' was a negative number, it's an overflow.
572 if (isSigned && count.isNegative())
573 hasAnyOverflow = true;
574
575 // We want to do all this arithmetic in size_t. If numElements is
576 // wider than that, check whether it's already too big, and if so,
577 // overflow.
578 else if (numElementsWidth > sizeWidth &&
579 numElementsWidth - sizeWidth > count.countLeadingZeros())
580 hasAnyOverflow = true;
581
582 // Okay, compute a count at the right width.
583 llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth);
584
585 // If there is a brace-initializer, we cannot allocate fewer elements than
586 // there are initializers. If we do, that's treated like an overflow.
587 if (adjustedCount.ult(minElements))
588 hasAnyOverflow = true;
589
590 // Scale numElements by that. This might overflow, but we don't
591 // care because it only overflows if allocationSize does, too, and
592 // if that overflows then we shouldn't use this.
593 numElements = llvm::ConstantInt::get(CGF.SizeTy,
594 adjustedCount * arraySizeMultiplier);
595
596 // Compute the size before cookie, and track whether it overflowed.
597 bool overflow;
598 llvm::APInt allocationSize
599 = adjustedCount.umul_ov(typeSizeMultiplier, overflow);
600 hasAnyOverflow |= overflow;
601
602 // Add in the cookie, and check whether it's overflowed.
603 if (cookieSize != 0) {
604 // Save the current size without a cookie. This shouldn't be
605 // used if there was overflow.
606 sizeWithoutCookie = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
607
608 allocationSize = allocationSize.uadd_ov(cookieSize, overflow);
609 hasAnyOverflow |= overflow;
610 }
611
612 // On overflow, produce a -1 so operator new will fail.
613 if (hasAnyOverflow) {
614 size = llvm::Constant::getAllOnesValue(CGF.SizeTy);
615 } else {
616 size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
617 }
618
619 // Otherwise, we might need to use the overflow intrinsics.
620 } else {
621 // There are up to five conditions we need to test for:
622 // 1) if isSigned, we need to check whether numElements is negative;
623 // 2) if numElementsWidth > sizeWidth, we need to check whether
624 // numElements is larger than something representable in size_t;
625 // 3) if minElements > 0, we need to check whether numElements is smaller
626 // than that.
627 // 4) we need to compute
628 // sizeWithoutCookie := numElements * typeSizeMultiplier
629 // and check whether it overflows; and
630 // 5) if we need a cookie, we need to compute
631 // size := sizeWithoutCookie + cookieSize
632 // and check whether it overflows.
633
634 llvm::Value *hasOverflow = 0;
635
636 // If numElementsWidth > sizeWidth, then one way or another, we're
637 // going to have to do a comparison for (2), and this happens to
638 // take care of (1), too.
639 if (numElementsWidth > sizeWidth) {
640 llvm::APInt threshold(numElementsWidth, 1);
641 threshold <<= sizeWidth;
642
643 llvm::Value *thresholdV
644 = llvm::ConstantInt::get(numElementsType, threshold);
645
646 hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV);
647 numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy);
648
649 // Otherwise, if we're signed, we want to sext up to size_t.
650 } else if (isSigned) {
651 if (numElementsWidth < sizeWidth)
652 numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy);
653
654 // If there's a non-1 type size multiplier, then we can do the
655 // signedness check at the same time as we do the multiply
656 // because a negative number times anything will cause an
657 // unsigned overflow. Otherwise, we have to do it here. But at least
658 // in this case, we can subsume the >= minElements check.
659 if (typeSizeMultiplier == 1)
660 hasOverflow = CGF.Builder.CreateICmpSLT(numElements,
661 llvm::ConstantInt::get(CGF.SizeTy, minElements));
662
663 // Otherwise, zext up to size_t if necessary.
664 } else if (numElementsWidth < sizeWidth) {
665 numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy);
666 }
667
668 assert(numElements->getType() == CGF.SizeTy);
669
670 if (minElements) {
671 // Don't allow allocation of fewer elements than we have initializers.
672 if (!hasOverflow) {
673 hasOverflow = CGF.Builder.CreateICmpULT(numElements,
674 llvm::ConstantInt::get(CGF.SizeTy, minElements));
675 } else if (numElementsWidth > sizeWidth) {
676 // The other existing overflow subsumes this check.
677 // We do an unsigned comparison, since any signed value < -1 is
678 // taken care of either above or below.
679 hasOverflow = CGF.Builder.CreateOr(hasOverflow,
680 CGF.Builder.CreateICmpULT(numElements,
681 llvm::ConstantInt::get(CGF.SizeTy, minElements)));
682 }
683 }
684
685 size = numElements;
686
687 // Multiply by the type size if necessary. This multiplier
688 // includes all the factors for nested arrays.
689 //
690 // This step also causes numElements to be scaled up by the
691 // nested-array factor if necessary. Overflow on this computation
692 // can be ignored because the result shouldn't be used if
693 // allocation fails.
694 if (typeSizeMultiplier != 1) {
695 llvm::Value *umul_with_overflow
696 = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy);
697
698 llvm::Value *tsmV =
699 llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier);
700 llvm::Value *result =
701 CGF.Builder.CreateCall2(umul_with_overflow, size, tsmV);
702
703 llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
704 if (hasOverflow)
705 hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
706 else
707 hasOverflow = overflowed;
708
709 size = CGF.Builder.CreateExtractValue(result, 0);
710
711 // Also scale up numElements by the array size multiplier.
712 if (arraySizeMultiplier != 1) {
713 // If the base element type size is 1, then we can re-use the
714 // multiply we just did.
715 if (typeSize.isOne()) {
716 assert(arraySizeMultiplier == typeSizeMultiplier);
717 numElements = size;
718
719 // Otherwise we need a separate multiply.
720 } else {
721 llvm::Value *asmV =
722 llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier);
723 numElements = CGF.Builder.CreateMul(numElements, asmV);
724 }
725 }
726 } else {
727 // numElements doesn't need to be scaled.
728 assert(arraySizeMultiplier == 1);
729 }
730
731 // Add in the cookie size if necessary.
732 if (cookieSize != 0) {
733 sizeWithoutCookie = size;
734
735 llvm::Value *uadd_with_overflow
736 = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy);
737
738 llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize);
739 llvm::Value *result =
740 CGF.Builder.CreateCall2(uadd_with_overflow, size, cookieSizeV);
741
742 llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
743 if (hasOverflow)
744 hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
745 else
746 hasOverflow = overflowed;
747
748 size = CGF.Builder.CreateExtractValue(result, 0);
749 }
750
751 // If we had any possibility of dynamic overflow, make a select to
752 // overwrite 'size' with an all-ones value, which should cause
753 // operator new to throw.
754 if (hasOverflow)
755 size = CGF.Builder.CreateSelect(hasOverflow,
756 llvm::Constant::getAllOnesValue(CGF.SizeTy),
757 size);
758 }
759
760 if (cookieSize == 0)
761 sizeWithoutCookie = size;
762 else
763 assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?");
764
765 return size;
766 }
767
StoreAnyExprIntoOneUnit(CodeGenFunction & CGF,const Expr * Init,QualType AllocType,llvm::Value * NewPtr)768 static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
769 QualType AllocType, llvm::Value *NewPtr) {
770
771 CharUnits Alignment = CGF.getContext().getTypeAlignInChars(AllocType);
772 if (!CGF.hasAggregateLLVMType(AllocType))
773 CGF.EmitScalarInit(Init, 0, CGF.MakeAddrLValue(NewPtr, AllocType,
774 Alignment),
775 false);
776 else if (AllocType->isAnyComplexType())
777 CGF.EmitComplexExprIntoAddr(Init, NewPtr,
778 AllocType.isVolatileQualified());
779 else {
780 AggValueSlot Slot
781 = AggValueSlot::forAddr(NewPtr, Alignment, AllocType.getQualifiers(),
782 AggValueSlot::IsDestructed,
783 AggValueSlot::DoesNotNeedGCBarriers,
784 AggValueSlot::IsNotAliased);
785 CGF.EmitAggExpr(Init, Slot);
786
787 CGF.MaybeEmitStdInitializerListCleanup(NewPtr, Init);
788 }
789 }
790
791 void
EmitNewArrayInitializer(const CXXNewExpr * E,QualType elementType,llvm::Value * beginPtr,llvm::Value * numElements)792 CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
793 QualType elementType,
794 llvm::Value *beginPtr,
795 llvm::Value *numElements) {
796 if (!E->hasInitializer())
797 return; // We have a POD type.
798
799 llvm::Value *explicitPtr = beginPtr;
800 // Find the end of the array, hoisted out of the loop.
801 llvm::Value *endPtr =
802 Builder.CreateInBoundsGEP(beginPtr, numElements, "array.end");
803
804 unsigned initializerElements = 0;
805
806 const Expr *Init = E->getInitializer();
807 llvm::AllocaInst *endOfInit = 0;
808 QualType::DestructionKind dtorKind = elementType.isDestructedType();
809 EHScopeStack::stable_iterator cleanup;
810 llvm::Instruction *cleanupDominator = 0;
811 // If the initializer is an initializer list, first do the explicit elements.
812 if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
813 initializerElements = ILE->getNumInits();
814
815 // Enter a partial-destruction cleanup if necessary.
816 if (needsEHCleanup(dtorKind)) {
817 // In principle we could tell the cleanup where we are more
818 // directly, but the control flow can get so varied here that it
819 // would actually be quite complex. Therefore we go through an
820 // alloca.
821 endOfInit = CreateTempAlloca(beginPtr->getType(), "array.endOfInit");
822 cleanupDominator = Builder.CreateStore(beginPtr, endOfInit);
823 pushIrregularPartialArrayCleanup(beginPtr, endOfInit, elementType,
824 getDestroyer(dtorKind));
825 cleanup = EHStack.stable_begin();
826 }
827
828 for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) {
829 // Tell the cleanup that it needs to destroy up to this
830 // element. TODO: some of these stores can be trivially
831 // observed to be unnecessary.
832 if (endOfInit) Builder.CreateStore(explicitPtr, endOfInit);
833 StoreAnyExprIntoOneUnit(*this, ILE->getInit(i), elementType, explicitPtr);
834 explicitPtr =Builder.CreateConstGEP1_32(explicitPtr, 1, "array.exp.next");
835 }
836
837 // The remaining elements are filled with the array filler expression.
838 Init = ILE->getArrayFiller();
839 }
840
841 // Create the continuation block.
842 llvm::BasicBlock *contBB = createBasicBlock("new.loop.end");
843
844 // If the number of elements isn't constant, we have to now check if there is
845 // anything left to initialize.
846 if (llvm::ConstantInt *constNum = dyn_cast<llvm::ConstantInt>(numElements)) {
847 // If all elements have already been initialized, skip the whole loop.
848 if (constNum->getZExtValue() <= initializerElements) {
849 // If there was a cleanup, deactivate it.
850 if (cleanupDominator)
851 DeactivateCleanupBlock(cleanup, cleanupDominator);;
852 return;
853 }
854 } else {
855 llvm::BasicBlock *nonEmptyBB = createBasicBlock("new.loop.nonempty");
856 llvm::Value *isEmpty = Builder.CreateICmpEQ(explicitPtr, endPtr,
857 "array.isempty");
858 Builder.CreateCondBr(isEmpty, contBB, nonEmptyBB);
859 EmitBlock(nonEmptyBB);
860 }
861
862 // Enter the loop.
863 llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
864 llvm::BasicBlock *loopBB = createBasicBlock("new.loop");
865
866 EmitBlock(loopBB);
867
868 // Set up the current-element phi.
869 llvm::PHINode *curPtr =
870 Builder.CreatePHI(explicitPtr->getType(), 2, "array.cur");
871 curPtr->addIncoming(explicitPtr, entryBB);
872
873 // Store the new cleanup position for irregular cleanups.
874 if (endOfInit) Builder.CreateStore(curPtr, endOfInit);
875
876 // Enter a partial-destruction cleanup if necessary.
877 if (!cleanupDominator && needsEHCleanup(dtorKind)) {
878 pushRegularPartialArrayCleanup(beginPtr, curPtr, elementType,
879 getDestroyer(dtorKind));
880 cleanup = EHStack.stable_begin();
881 cleanupDominator = Builder.CreateUnreachable();
882 }
883
884 // Emit the initializer into this element.
885 StoreAnyExprIntoOneUnit(*this, Init, E->getAllocatedType(), curPtr);
886
887 // Leave the cleanup if we entered one.
888 if (cleanupDominator) {
889 DeactivateCleanupBlock(cleanup, cleanupDominator);
890 cleanupDominator->eraseFromParent();
891 }
892
893 // Advance to the next element.
894 llvm::Value *nextPtr = Builder.CreateConstGEP1_32(curPtr, 1, "array.next");
895
896 // Check whether we've gotten to the end of the array and, if so,
897 // exit the loop.
898 llvm::Value *isEnd = Builder.CreateICmpEQ(nextPtr, endPtr, "array.atend");
899 Builder.CreateCondBr(isEnd, contBB, loopBB);
900 curPtr->addIncoming(nextPtr, Builder.GetInsertBlock());
901
902 EmitBlock(contBB);
903 }
904
EmitZeroMemSet(CodeGenFunction & CGF,QualType T,llvm::Value * NewPtr,llvm::Value * Size)905 static void EmitZeroMemSet(CodeGenFunction &CGF, QualType T,
906 llvm::Value *NewPtr, llvm::Value *Size) {
907 CGF.EmitCastToVoidPtr(NewPtr);
908 CharUnits Alignment = CGF.getContext().getTypeAlignInChars(T);
909 CGF.Builder.CreateMemSet(NewPtr, CGF.Builder.getInt8(0), Size,
910 Alignment.getQuantity(), false);
911 }
912
EmitNewInitializer(CodeGenFunction & CGF,const CXXNewExpr * E,QualType ElementType,llvm::Value * NewPtr,llvm::Value * NumElements,llvm::Value * AllocSizeWithoutCookie)913 static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
914 QualType ElementType,
915 llvm::Value *NewPtr,
916 llvm::Value *NumElements,
917 llvm::Value *AllocSizeWithoutCookie) {
918 const Expr *Init = E->getInitializer();
919 if (E->isArray()) {
920 if (const CXXConstructExpr *CCE = dyn_cast_or_null<CXXConstructExpr>(Init)){
921 CXXConstructorDecl *Ctor = CCE->getConstructor();
922 bool RequiresZeroInitialization = false;
923 if (Ctor->isTrivial()) {
924 // If new expression did not specify value-initialization, then there
925 // is no initialization.
926 if (!CCE->requiresZeroInitialization() || Ctor->getParent()->isEmpty())
927 return;
928
929 if (CGF.CGM.getTypes().isZeroInitializable(ElementType)) {
930 // Optimization: since zero initialization will just set the memory
931 // to all zeroes, generate a single memset to do it in one shot.
932 EmitZeroMemSet(CGF, ElementType, NewPtr, AllocSizeWithoutCookie);
933 return;
934 }
935
936 RequiresZeroInitialization = true;
937 }
938
939 CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr,
940 CCE->arg_begin(), CCE->arg_end(),
941 RequiresZeroInitialization);
942 return;
943 } else if (Init && isa<ImplicitValueInitExpr>(Init) &&
944 CGF.CGM.getTypes().isZeroInitializable(ElementType)) {
945 // Optimization: since zero initialization will just set the memory
946 // to all zeroes, generate a single memset to do it in one shot.
947 EmitZeroMemSet(CGF, ElementType, NewPtr, AllocSizeWithoutCookie);
948 return;
949 }
950 CGF.EmitNewArrayInitializer(E, ElementType, NewPtr, NumElements);
951 return;
952 }
953
954 if (!Init)
955 return;
956
957 StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr);
958 }
959
960 namespace {
961 /// A cleanup to call the given 'operator delete' function upon
962 /// abnormal exit from a new expression.
963 class CallDeleteDuringNew : public EHScopeStack::Cleanup {
964 size_t NumPlacementArgs;
965 const FunctionDecl *OperatorDelete;
966 llvm::Value *Ptr;
967 llvm::Value *AllocSize;
968
getPlacementArgs()969 RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); }
970
971 public:
getExtraSize(size_t NumPlacementArgs)972 static size_t getExtraSize(size_t NumPlacementArgs) {
973 return NumPlacementArgs * sizeof(RValue);
974 }
975
CallDeleteDuringNew(size_t NumPlacementArgs,const FunctionDecl * OperatorDelete,llvm::Value * Ptr,llvm::Value * AllocSize)976 CallDeleteDuringNew(size_t NumPlacementArgs,
977 const FunctionDecl *OperatorDelete,
978 llvm::Value *Ptr,
979 llvm::Value *AllocSize)
980 : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
981 Ptr(Ptr), AllocSize(AllocSize) {}
982
setPlacementArg(unsigned I,RValue Arg)983 void setPlacementArg(unsigned I, RValue Arg) {
984 assert(I < NumPlacementArgs && "index out of range");
985 getPlacementArgs()[I] = Arg;
986 }
987
Emit(CodeGenFunction & CGF,Flags flags)988 void Emit(CodeGenFunction &CGF, Flags flags) {
989 const FunctionProtoType *FPT
990 = OperatorDelete->getType()->getAs<FunctionProtoType>();
991 assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
992 (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
993
994 CallArgList DeleteArgs;
995
996 // The first argument is always a void*.
997 FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
998 DeleteArgs.add(RValue::get(Ptr), *AI++);
999
1000 // A member 'operator delete' can take an extra 'size_t' argument.
1001 if (FPT->getNumArgs() == NumPlacementArgs + 2)
1002 DeleteArgs.add(RValue::get(AllocSize), *AI++);
1003
1004 // Pass the rest of the arguments, which must match exactly.
1005 for (unsigned I = 0; I != NumPlacementArgs; ++I)
1006 DeleteArgs.add(getPlacementArgs()[I], *AI++);
1007
1008 // Call 'operator delete'.
1009 CGF.EmitCall(CGF.CGM.getTypes().arrangeFunctionCall(DeleteArgs, FPT),
1010 CGF.CGM.GetAddrOfFunction(OperatorDelete),
1011 ReturnValueSlot(), DeleteArgs, OperatorDelete);
1012 }
1013 };
1014
1015 /// A cleanup to call the given 'operator delete' function upon
1016 /// abnormal exit from a new expression when the new expression is
1017 /// conditional.
1018 class CallDeleteDuringConditionalNew : public EHScopeStack::Cleanup {
1019 size_t NumPlacementArgs;
1020 const FunctionDecl *OperatorDelete;
1021 DominatingValue<RValue>::saved_type Ptr;
1022 DominatingValue<RValue>::saved_type AllocSize;
1023
getPlacementArgs()1024 DominatingValue<RValue>::saved_type *getPlacementArgs() {
1025 return reinterpret_cast<DominatingValue<RValue>::saved_type*>(this+1);
1026 }
1027
1028 public:
getExtraSize(size_t NumPlacementArgs)1029 static size_t getExtraSize(size_t NumPlacementArgs) {
1030 return NumPlacementArgs * sizeof(DominatingValue<RValue>::saved_type);
1031 }
1032
CallDeleteDuringConditionalNew(size_t NumPlacementArgs,const FunctionDecl * OperatorDelete,DominatingValue<RValue>::saved_type Ptr,DominatingValue<RValue>::saved_type AllocSize)1033 CallDeleteDuringConditionalNew(size_t NumPlacementArgs,
1034 const FunctionDecl *OperatorDelete,
1035 DominatingValue<RValue>::saved_type Ptr,
1036 DominatingValue<RValue>::saved_type AllocSize)
1037 : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
1038 Ptr(Ptr), AllocSize(AllocSize) {}
1039
setPlacementArg(unsigned I,DominatingValue<RValue>::saved_type Arg)1040 void setPlacementArg(unsigned I, DominatingValue<RValue>::saved_type Arg) {
1041 assert(I < NumPlacementArgs && "index out of range");
1042 getPlacementArgs()[I] = Arg;
1043 }
1044
Emit(CodeGenFunction & CGF,Flags flags)1045 void Emit(CodeGenFunction &CGF, Flags flags) {
1046 const FunctionProtoType *FPT
1047 = OperatorDelete->getType()->getAs<FunctionProtoType>();
1048 assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
1049 (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
1050
1051 CallArgList DeleteArgs;
1052
1053 // The first argument is always a void*.
1054 FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
1055 DeleteArgs.add(Ptr.restore(CGF), *AI++);
1056
1057 // A member 'operator delete' can take an extra 'size_t' argument.
1058 if (FPT->getNumArgs() == NumPlacementArgs + 2) {
1059 RValue RV = AllocSize.restore(CGF);
1060 DeleteArgs.add(RV, *AI++);
1061 }
1062
1063 // Pass the rest of the arguments, which must match exactly.
1064 for (unsigned I = 0; I != NumPlacementArgs; ++I) {
1065 RValue RV = getPlacementArgs()[I].restore(CGF);
1066 DeleteArgs.add(RV, *AI++);
1067 }
1068
1069 // Call 'operator delete'.
1070 CGF.EmitCall(CGF.CGM.getTypes().arrangeFunctionCall(DeleteArgs, FPT),
1071 CGF.CGM.GetAddrOfFunction(OperatorDelete),
1072 ReturnValueSlot(), DeleteArgs, OperatorDelete);
1073 }
1074 };
1075 }
1076
1077 /// Enter a cleanup to call 'operator delete' if the initializer in a
1078 /// new-expression throws.
EnterNewDeleteCleanup(CodeGenFunction & CGF,const CXXNewExpr * E,llvm::Value * NewPtr,llvm::Value * AllocSize,const CallArgList & NewArgs)1079 static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
1080 const CXXNewExpr *E,
1081 llvm::Value *NewPtr,
1082 llvm::Value *AllocSize,
1083 const CallArgList &NewArgs) {
1084 // If we're not inside a conditional branch, then the cleanup will
1085 // dominate and we can do the easier (and more efficient) thing.
1086 if (!CGF.isInConditionalBranch()) {
1087 CallDeleteDuringNew *Cleanup = CGF.EHStack
1088 .pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup,
1089 E->getNumPlacementArgs(),
1090 E->getOperatorDelete(),
1091 NewPtr, AllocSize);
1092 for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
1093 Cleanup->setPlacementArg(I, NewArgs[I+1].RV);
1094
1095 return;
1096 }
1097
1098 // Otherwise, we need to save all this stuff.
1099 DominatingValue<RValue>::saved_type SavedNewPtr =
1100 DominatingValue<RValue>::save(CGF, RValue::get(NewPtr));
1101 DominatingValue<RValue>::saved_type SavedAllocSize =
1102 DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
1103
1104 CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack
1105 .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(EHCleanup,
1106 E->getNumPlacementArgs(),
1107 E->getOperatorDelete(),
1108 SavedNewPtr,
1109 SavedAllocSize);
1110 for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
1111 Cleanup->setPlacementArg(I,
1112 DominatingValue<RValue>::save(CGF, NewArgs[I+1].RV));
1113
1114 CGF.initFullExprCleanup();
1115 }
1116
EmitCXXNewExpr(const CXXNewExpr * E)1117 llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
1118 // The element type being allocated.
1119 QualType allocType = getContext().getBaseElementType(E->getAllocatedType());
1120
1121 // 1. Build a call to the allocation function.
1122 FunctionDecl *allocator = E->getOperatorNew();
1123 const FunctionProtoType *allocatorType =
1124 allocator->getType()->castAs<FunctionProtoType>();
1125
1126 CallArgList allocatorArgs;
1127
1128 // The allocation size is the first argument.
1129 QualType sizeType = getContext().getSizeType();
1130
1131 // If there is a brace-initializer, cannot allocate fewer elements than inits.
1132 unsigned minElements = 0;
1133 if (E->isArray() && E->hasInitializer()) {
1134 if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E->getInitializer()))
1135 minElements = ILE->getNumInits();
1136 }
1137
1138 llvm::Value *numElements = 0;
1139 llvm::Value *allocSizeWithoutCookie = 0;
1140 llvm::Value *allocSize =
1141 EmitCXXNewAllocSize(*this, E, minElements, numElements,
1142 allocSizeWithoutCookie);
1143
1144 allocatorArgs.add(RValue::get(allocSize), sizeType);
1145
1146 // Emit the rest of the arguments.
1147 // FIXME: Ideally, this should just use EmitCallArgs.
1148 CXXNewExpr::const_arg_iterator placementArg = E->placement_arg_begin();
1149
1150 // First, use the types from the function type.
1151 // We start at 1 here because the first argument (the allocation size)
1152 // has already been emitted.
1153 for (unsigned i = 1, e = allocatorType->getNumArgs(); i != e;
1154 ++i, ++placementArg) {
1155 QualType argType = allocatorType->getArgType(i);
1156
1157 assert(getContext().hasSameUnqualifiedType(argType.getNonReferenceType(),
1158 placementArg->getType()) &&
1159 "type mismatch in call argument!");
1160
1161 EmitCallArg(allocatorArgs, *placementArg, argType);
1162 }
1163
1164 // Either we've emitted all the call args, or we have a call to a
1165 // variadic function.
1166 assert((placementArg == E->placement_arg_end() ||
1167 allocatorType->isVariadic()) &&
1168 "Extra arguments to non-variadic function!");
1169
1170 // If we still have any arguments, emit them using the type of the argument.
1171 for (CXXNewExpr::const_arg_iterator placementArgsEnd = E->placement_arg_end();
1172 placementArg != placementArgsEnd; ++placementArg) {
1173 EmitCallArg(allocatorArgs, *placementArg, placementArg->getType());
1174 }
1175
1176 // Emit the allocation call. If the allocator is a global placement
1177 // operator, just "inline" it directly.
1178 RValue RV;
1179 if (allocator->isReservedGlobalPlacementOperator()) {
1180 assert(allocatorArgs.size() == 2);
1181 RV = allocatorArgs[1].RV;
1182 // TODO: kill any unnecessary computations done for the size
1183 // argument.
1184 } else {
1185 RV = EmitCall(CGM.getTypes().arrangeFunctionCall(allocatorArgs,
1186 allocatorType),
1187 CGM.GetAddrOfFunction(allocator), ReturnValueSlot(),
1188 allocatorArgs, allocator);
1189 }
1190
1191 // Emit a null check on the allocation result if the allocation
1192 // function is allowed to return null (because it has a non-throwing
1193 // exception spec; for this part, we inline
1194 // CXXNewExpr::shouldNullCheckAllocation()) and we have an
1195 // interesting initializer.
1196 bool nullCheck = allocatorType->isNothrow(getContext()) &&
1197 (!allocType.isPODType(getContext()) || E->hasInitializer());
1198
1199 llvm::BasicBlock *nullCheckBB = 0;
1200 llvm::BasicBlock *contBB = 0;
1201
1202 llvm::Value *allocation = RV.getScalarVal();
1203 unsigned AS =
1204 cast<llvm::PointerType>(allocation->getType())->getAddressSpace();
1205
1206 // The null-check means that the initializer is conditionally
1207 // evaluated.
1208 ConditionalEvaluation conditional(*this);
1209
1210 if (nullCheck) {
1211 conditional.begin(*this);
1212
1213 nullCheckBB = Builder.GetInsertBlock();
1214 llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull");
1215 contBB = createBasicBlock("new.cont");
1216
1217 llvm::Value *isNull = Builder.CreateIsNull(allocation, "new.isnull");
1218 Builder.CreateCondBr(isNull, contBB, notNullBB);
1219 EmitBlock(notNullBB);
1220 }
1221
1222 // If there's an operator delete, enter a cleanup to call it if an
1223 // exception is thrown.
1224 EHScopeStack::stable_iterator operatorDeleteCleanup;
1225 llvm::Instruction *cleanupDominator = 0;
1226 if (E->getOperatorDelete() &&
1227 !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
1228 EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocatorArgs);
1229 operatorDeleteCleanup = EHStack.stable_begin();
1230 cleanupDominator = Builder.CreateUnreachable();
1231 }
1232
1233 assert((allocSize == allocSizeWithoutCookie) ==
1234 CalculateCookiePadding(*this, E).isZero());
1235 if (allocSize != allocSizeWithoutCookie) {
1236 assert(E->isArray());
1237 allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation,
1238 numElements,
1239 E, allocType);
1240 }
1241
1242 llvm::Type *elementPtrTy
1243 = ConvertTypeForMem(allocType)->getPointerTo(AS);
1244 llvm::Value *result = Builder.CreateBitCast(allocation, elementPtrTy);
1245
1246 EmitNewInitializer(*this, E, allocType, result, numElements,
1247 allocSizeWithoutCookie);
1248 if (E->isArray()) {
1249 // NewPtr is a pointer to the base element type. If we're
1250 // allocating an array of arrays, we'll need to cast back to the
1251 // array pointer type.
1252 llvm::Type *resultType = ConvertTypeForMem(E->getType());
1253 if (result->getType() != resultType)
1254 result = Builder.CreateBitCast(result, resultType);
1255 }
1256
1257 // Deactivate the 'operator delete' cleanup if we finished
1258 // initialization.
1259 if (operatorDeleteCleanup.isValid()) {
1260 DeactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator);
1261 cleanupDominator->eraseFromParent();
1262 }
1263
1264 if (nullCheck) {
1265 conditional.end(*this);
1266
1267 llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
1268 EmitBlock(contBB);
1269
1270 llvm::PHINode *PHI = Builder.CreatePHI(result->getType(), 2);
1271 PHI->addIncoming(result, notNullBB);
1272 PHI->addIncoming(llvm::Constant::getNullValue(result->getType()),
1273 nullCheckBB);
1274
1275 result = PHI;
1276 }
1277
1278 return result;
1279 }
1280
EmitDeleteCall(const FunctionDecl * DeleteFD,llvm::Value * Ptr,QualType DeleteTy)1281 void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
1282 llvm::Value *Ptr,
1283 QualType DeleteTy) {
1284 assert(DeleteFD->getOverloadedOperator() == OO_Delete);
1285
1286 const FunctionProtoType *DeleteFTy =
1287 DeleteFD->getType()->getAs<FunctionProtoType>();
1288
1289 CallArgList DeleteArgs;
1290
1291 // Check if we need to pass the size to the delete operator.
1292 llvm::Value *Size = 0;
1293 QualType SizeTy;
1294 if (DeleteFTy->getNumArgs() == 2) {
1295 SizeTy = DeleteFTy->getArgType(1);
1296 CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
1297 Size = llvm::ConstantInt::get(ConvertType(SizeTy),
1298 DeleteTypeSize.getQuantity());
1299 }
1300
1301 QualType ArgTy = DeleteFTy->getArgType(0);
1302 llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
1303 DeleteArgs.add(RValue::get(DeletePtr), ArgTy);
1304
1305 if (Size)
1306 DeleteArgs.add(RValue::get(Size), SizeTy);
1307
1308 // Emit the call to delete.
1309 EmitCall(CGM.getTypes().arrangeFunctionCall(DeleteArgs, DeleteFTy),
1310 CGM.GetAddrOfFunction(DeleteFD), ReturnValueSlot(),
1311 DeleteArgs, DeleteFD);
1312 }
1313
1314 namespace {
1315 /// Calls the given 'operator delete' on a single object.
1316 struct CallObjectDelete : EHScopeStack::Cleanup {
1317 llvm::Value *Ptr;
1318 const FunctionDecl *OperatorDelete;
1319 QualType ElementType;
1320
CallObjectDelete__anon7f54e8600211::CallObjectDelete1321 CallObjectDelete(llvm::Value *Ptr,
1322 const FunctionDecl *OperatorDelete,
1323 QualType ElementType)
1324 : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
1325
Emit__anon7f54e8600211::CallObjectDelete1326 void Emit(CodeGenFunction &CGF, Flags flags) {
1327 CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
1328 }
1329 };
1330 }
1331
1332 /// Emit the code for deleting a single object.
EmitObjectDelete(CodeGenFunction & CGF,const FunctionDecl * OperatorDelete,llvm::Value * Ptr,QualType ElementType,bool UseGlobalDelete)1333 static void EmitObjectDelete(CodeGenFunction &CGF,
1334 const FunctionDecl *OperatorDelete,
1335 llvm::Value *Ptr,
1336 QualType ElementType,
1337 bool UseGlobalDelete) {
1338 // Find the destructor for the type, if applicable. If the
1339 // destructor is virtual, we'll just emit the vcall and return.
1340 const CXXDestructorDecl *Dtor = 0;
1341 if (const RecordType *RT = ElementType->getAs<RecordType>()) {
1342 CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1343 if (RD->hasDefinition() && !RD->hasTrivialDestructor()) {
1344 Dtor = RD->getDestructor();
1345
1346 if (Dtor->isVirtual()) {
1347 if (UseGlobalDelete) {
1348 // If we're supposed to call the global delete, make sure we do so
1349 // even if the destructor throws.
1350 CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1351 Ptr, OperatorDelete,
1352 ElementType);
1353 }
1354
1355 llvm::Type *Ty =
1356 CGF.getTypes().GetFunctionType(
1357 CGF.getTypes().arrangeCXXDestructor(Dtor, Dtor_Complete));
1358
1359 llvm::Value *Callee
1360 = CGF.BuildVirtualCall(Dtor,
1361 UseGlobalDelete? Dtor_Complete : Dtor_Deleting,
1362 Ptr, Ty);
1363 CGF.EmitCXXMemberCall(Dtor, Callee, ReturnValueSlot(), Ptr, /*VTT=*/0,
1364 0, 0);
1365
1366 if (UseGlobalDelete) {
1367 CGF.PopCleanupBlock();
1368 }
1369
1370 return;
1371 }
1372 }
1373 }
1374
1375 // Make sure that we call delete even if the dtor throws.
1376 // This doesn't have to a conditional cleanup because we're going
1377 // to pop it off in a second.
1378 CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1379 Ptr, OperatorDelete, ElementType);
1380
1381 if (Dtor)
1382 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
1383 /*ForVirtualBase=*/false, Ptr);
1384 else if (CGF.getLangOpts().ObjCAutoRefCount &&
1385 ElementType->isObjCLifetimeType()) {
1386 switch (ElementType.getObjCLifetime()) {
1387 case Qualifiers::OCL_None:
1388 case Qualifiers::OCL_ExplicitNone:
1389 case Qualifiers::OCL_Autoreleasing:
1390 break;
1391
1392 case Qualifiers::OCL_Strong: {
1393 // Load the pointer value.
1394 llvm::Value *PtrValue = CGF.Builder.CreateLoad(Ptr,
1395 ElementType.isVolatileQualified());
1396
1397 CGF.EmitARCRelease(PtrValue, /*precise*/ true);
1398 break;
1399 }
1400
1401 case Qualifiers::OCL_Weak:
1402 CGF.EmitARCDestroyWeak(Ptr);
1403 break;
1404 }
1405 }
1406
1407 CGF.PopCleanupBlock();
1408 }
1409
1410 namespace {
1411 /// Calls the given 'operator delete' on an array of objects.
1412 struct CallArrayDelete : EHScopeStack::Cleanup {
1413 llvm::Value *Ptr;
1414 const FunctionDecl *OperatorDelete;
1415 llvm::Value *NumElements;
1416 QualType ElementType;
1417 CharUnits CookieSize;
1418
CallArrayDelete__anon7f54e8600311::CallArrayDelete1419 CallArrayDelete(llvm::Value *Ptr,
1420 const FunctionDecl *OperatorDelete,
1421 llvm::Value *NumElements,
1422 QualType ElementType,
1423 CharUnits CookieSize)
1424 : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
1425 ElementType(ElementType), CookieSize(CookieSize) {}
1426
Emit__anon7f54e8600311::CallArrayDelete1427 void Emit(CodeGenFunction &CGF, Flags flags) {
1428 const FunctionProtoType *DeleteFTy =
1429 OperatorDelete->getType()->getAs<FunctionProtoType>();
1430 assert(DeleteFTy->getNumArgs() == 1 || DeleteFTy->getNumArgs() == 2);
1431
1432 CallArgList Args;
1433
1434 // Pass the pointer as the first argument.
1435 QualType VoidPtrTy = DeleteFTy->getArgType(0);
1436 llvm::Value *DeletePtr
1437 = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy));
1438 Args.add(RValue::get(DeletePtr), VoidPtrTy);
1439
1440 // Pass the original requested size as the second argument.
1441 if (DeleteFTy->getNumArgs() == 2) {
1442 QualType size_t = DeleteFTy->getArgType(1);
1443 llvm::IntegerType *SizeTy
1444 = cast<llvm::IntegerType>(CGF.ConvertType(size_t));
1445
1446 CharUnits ElementTypeSize =
1447 CGF.CGM.getContext().getTypeSizeInChars(ElementType);
1448
1449 // The size of an element, multiplied by the number of elements.
1450 llvm::Value *Size
1451 = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity());
1452 Size = CGF.Builder.CreateMul(Size, NumElements);
1453
1454 // Plus the size of the cookie if applicable.
1455 if (!CookieSize.isZero()) {
1456 llvm::Value *CookieSizeV
1457 = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
1458 Size = CGF.Builder.CreateAdd(Size, CookieSizeV);
1459 }
1460
1461 Args.add(RValue::get(Size), size_t);
1462 }
1463
1464 // Emit the call to delete.
1465 CGF.EmitCall(CGF.getTypes().arrangeFunctionCall(Args, DeleteFTy),
1466 CGF.CGM.GetAddrOfFunction(OperatorDelete),
1467 ReturnValueSlot(), Args, OperatorDelete);
1468 }
1469 };
1470 }
1471
1472 /// Emit the code for deleting an array of objects.
EmitArrayDelete(CodeGenFunction & CGF,const CXXDeleteExpr * E,llvm::Value * deletedPtr,QualType elementType)1473 static void EmitArrayDelete(CodeGenFunction &CGF,
1474 const CXXDeleteExpr *E,
1475 llvm::Value *deletedPtr,
1476 QualType elementType) {
1477 llvm::Value *numElements = 0;
1478 llvm::Value *allocatedPtr = 0;
1479 CharUnits cookieSize;
1480 CGF.CGM.getCXXABI().ReadArrayCookie(CGF, deletedPtr, E, elementType,
1481 numElements, allocatedPtr, cookieSize);
1482
1483 assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer");
1484
1485 // Make sure that we call delete even if one of the dtors throws.
1486 const FunctionDecl *operatorDelete = E->getOperatorDelete();
1487 CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
1488 allocatedPtr, operatorDelete,
1489 numElements, elementType,
1490 cookieSize);
1491
1492 // Destroy the elements.
1493 if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) {
1494 assert(numElements && "no element count for a type with a destructor!");
1495
1496 llvm::Value *arrayEnd =
1497 CGF.Builder.CreateInBoundsGEP(deletedPtr, numElements, "delete.end");
1498
1499 // Note that it is legal to allocate a zero-length array, and we
1500 // can never fold the check away because the length should always
1501 // come from a cookie.
1502 CGF.emitArrayDestroy(deletedPtr, arrayEnd, elementType,
1503 CGF.getDestroyer(dtorKind),
1504 /*checkZeroLength*/ true,
1505 CGF.needsEHCleanup(dtorKind));
1506 }
1507
1508 // Pop the cleanup block.
1509 CGF.PopCleanupBlock();
1510 }
1511
EmitCXXDeleteExpr(const CXXDeleteExpr * E)1512 void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
1513
1514 // Get at the argument before we performed the implicit conversion
1515 // to void*.
1516 const Expr *Arg = E->getArgument();
1517 while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
1518 if (ICE->getCastKind() != CK_UserDefinedConversion &&
1519 ICE->getType()->isVoidPointerType())
1520 Arg = ICE->getSubExpr();
1521 else
1522 break;
1523 }
1524
1525 llvm::Value *Ptr = EmitScalarExpr(Arg);
1526
1527 // Null check the pointer.
1528 llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
1529 llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
1530
1531 llvm::Value *IsNull = Builder.CreateIsNull(Ptr, "isnull");
1532
1533 Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
1534 EmitBlock(DeleteNotNull);
1535
1536 // We might be deleting a pointer to array. If so, GEP down to the
1537 // first non-array element.
1538 // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
1539 QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
1540 if (DeleteTy->isConstantArrayType()) {
1541 llvm::Value *Zero = Builder.getInt32(0);
1542 SmallVector<llvm::Value*,8> GEP;
1543
1544 GEP.push_back(Zero); // point at the outermost array
1545
1546 // For each layer of array type we're pointing at:
1547 while (const ConstantArrayType *Arr
1548 = getContext().getAsConstantArrayType(DeleteTy)) {
1549 // 1. Unpeel the array type.
1550 DeleteTy = Arr->getElementType();
1551
1552 // 2. GEP to the first element of the array.
1553 GEP.push_back(Zero);
1554 }
1555
1556 Ptr = Builder.CreateInBoundsGEP(Ptr, GEP, "del.first");
1557 }
1558
1559 assert(ConvertTypeForMem(DeleteTy) ==
1560 cast<llvm::PointerType>(Ptr->getType())->getElementType());
1561
1562 if (E->isArrayForm()) {
1563 EmitArrayDelete(*this, E, Ptr, DeleteTy);
1564 } else {
1565 EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy,
1566 E->isGlobalDelete());
1567 }
1568
1569 EmitBlock(DeleteEnd);
1570 }
1571
getBadTypeidFn(CodeGenFunction & CGF)1572 static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) {
1573 // void __cxa_bad_typeid();
1574 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1575
1576 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1577 }
1578
EmitBadTypeidCall(CodeGenFunction & CGF)1579 static void EmitBadTypeidCall(CodeGenFunction &CGF) {
1580 llvm::Value *Fn = getBadTypeidFn(CGF);
1581 CGF.EmitCallOrInvoke(Fn).setDoesNotReturn();
1582 CGF.Builder.CreateUnreachable();
1583 }
1584
EmitTypeidFromVTable(CodeGenFunction & CGF,const Expr * E,llvm::Type * StdTypeInfoPtrTy)1585 static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF,
1586 const Expr *E,
1587 llvm::Type *StdTypeInfoPtrTy) {
1588 // Get the vtable pointer.
1589 llvm::Value *ThisPtr = CGF.EmitLValue(E).getAddress();
1590
1591 // C++ [expr.typeid]p2:
1592 // If the glvalue expression is obtained by applying the unary * operator to
1593 // a pointer and the pointer is a null pointer value, the typeid expression
1594 // throws the std::bad_typeid exception.
1595 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E->IgnoreParens())) {
1596 if (UO->getOpcode() == UO_Deref) {
1597 llvm::BasicBlock *BadTypeidBlock =
1598 CGF.createBasicBlock("typeid.bad_typeid");
1599 llvm::BasicBlock *EndBlock =
1600 CGF.createBasicBlock("typeid.end");
1601
1602 llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr);
1603 CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock);
1604
1605 CGF.EmitBlock(BadTypeidBlock);
1606 EmitBadTypeidCall(CGF);
1607 CGF.EmitBlock(EndBlock);
1608 }
1609 }
1610
1611 llvm::Value *Value = CGF.GetVTablePtr(ThisPtr,
1612 StdTypeInfoPtrTy->getPointerTo());
1613
1614 // Load the type info.
1615 Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
1616 return CGF.Builder.CreateLoad(Value);
1617 }
1618
EmitCXXTypeidExpr(const CXXTypeidExpr * E)1619 llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
1620 llvm::Type *StdTypeInfoPtrTy =
1621 ConvertType(E->getType())->getPointerTo();
1622
1623 if (E->isTypeOperand()) {
1624 llvm::Constant *TypeInfo =
1625 CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand());
1626 return Builder.CreateBitCast(TypeInfo, StdTypeInfoPtrTy);
1627 }
1628
1629 // C++ [expr.typeid]p2:
1630 // When typeid is applied to a glvalue expression whose type is a
1631 // polymorphic class type, the result refers to a std::type_info object
1632 // representing the type of the most derived object (that is, the dynamic
1633 // type) to which the glvalue refers.
1634 if (E->getExprOperand()->isGLValue()) {
1635 if (const RecordType *RT =
1636 E->getExprOperand()->getType()->getAs<RecordType>()) {
1637 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1638 if (RD->isPolymorphic())
1639 return EmitTypeidFromVTable(*this, E->getExprOperand(),
1640 StdTypeInfoPtrTy);
1641 }
1642 }
1643
1644 QualType OperandTy = E->getExprOperand()->getType();
1645 return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy),
1646 StdTypeInfoPtrTy);
1647 }
1648
getDynamicCastFn(CodeGenFunction & CGF)1649 static llvm::Constant *getDynamicCastFn(CodeGenFunction &CGF) {
1650 // void *__dynamic_cast(const void *sub,
1651 // const abi::__class_type_info *src,
1652 // const abi::__class_type_info *dst,
1653 // std::ptrdiff_t src2dst_offset);
1654
1655 llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1656 llvm::Type *PtrDiffTy =
1657 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1658
1659 llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
1660
1661 llvm::FunctionType *FTy =
1662 llvm::FunctionType::get(Int8PtrTy, Args, false);
1663
1664 return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast");
1665 }
1666
getBadCastFn(CodeGenFunction & CGF)1667 static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) {
1668 // void __cxa_bad_cast();
1669 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1670 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1671 }
1672
EmitBadCastCall(CodeGenFunction & CGF)1673 static void EmitBadCastCall(CodeGenFunction &CGF) {
1674 llvm::Value *Fn = getBadCastFn(CGF);
1675 CGF.EmitCallOrInvoke(Fn).setDoesNotReturn();
1676 CGF.Builder.CreateUnreachable();
1677 }
1678
1679 static llvm::Value *
EmitDynamicCastCall(CodeGenFunction & CGF,llvm::Value * Value,QualType SrcTy,QualType DestTy,llvm::BasicBlock * CastEnd)1680 EmitDynamicCastCall(CodeGenFunction &CGF, llvm::Value *Value,
1681 QualType SrcTy, QualType DestTy,
1682 llvm::BasicBlock *CastEnd) {
1683 llvm::Type *PtrDiffLTy =
1684 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1685 llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1686
1687 if (const PointerType *PTy = DestTy->getAs<PointerType>()) {
1688 if (PTy->getPointeeType()->isVoidType()) {
1689 // C++ [expr.dynamic.cast]p7:
1690 // If T is "pointer to cv void," then the result is a pointer to the
1691 // most derived object pointed to by v.
1692
1693 // Get the vtable pointer.
1694 llvm::Value *VTable = CGF.GetVTablePtr(Value, PtrDiffLTy->getPointerTo());
1695
1696 // Get the offset-to-top from the vtable.
1697 llvm::Value *OffsetToTop =
1698 CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
1699 OffsetToTop = CGF.Builder.CreateLoad(OffsetToTop, "offset.to.top");
1700
1701 // Finally, add the offset to the pointer.
1702 Value = CGF.EmitCastToVoidPtr(Value);
1703 Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop);
1704
1705 return CGF.Builder.CreateBitCast(Value, DestLTy);
1706 }
1707 }
1708
1709 QualType SrcRecordTy;
1710 QualType DestRecordTy;
1711
1712 if (const PointerType *DestPTy = DestTy->getAs<PointerType>()) {
1713 SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType();
1714 DestRecordTy = DestPTy->getPointeeType();
1715 } else {
1716 SrcRecordTy = SrcTy;
1717 DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType();
1718 }
1719
1720 assert(SrcRecordTy->isRecordType() && "source type must be a record type!");
1721 assert(DestRecordTy->isRecordType() && "dest type must be a record type!");
1722
1723 llvm::Value *SrcRTTI =
1724 CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
1725 llvm::Value *DestRTTI =
1726 CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
1727
1728 // FIXME: Actually compute a hint here.
1729 llvm::Value *OffsetHint = llvm::ConstantInt::get(PtrDiffLTy, -1ULL);
1730
1731 // Emit the call to __dynamic_cast.
1732 Value = CGF.EmitCastToVoidPtr(Value);
1733 Value = CGF.Builder.CreateCall4(getDynamicCastFn(CGF), Value,
1734 SrcRTTI, DestRTTI, OffsetHint);
1735 Value = CGF.Builder.CreateBitCast(Value, DestLTy);
1736
1737 /// C++ [expr.dynamic.cast]p9:
1738 /// A failed cast to reference type throws std::bad_cast
1739 if (DestTy->isReferenceType()) {
1740 llvm::BasicBlock *BadCastBlock =
1741 CGF.createBasicBlock("dynamic_cast.bad_cast");
1742
1743 llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1744 CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1745
1746 CGF.EmitBlock(BadCastBlock);
1747 EmitBadCastCall(CGF);
1748 }
1749
1750 return Value;
1751 }
1752
EmitDynamicCastToNull(CodeGenFunction & CGF,QualType DestTy)1753 static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
1754 QualType DestTy) {
1755 llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1756 if (DestTy->isPointerType())
1757 return llvm::Constant::getNullValue(DestLTy);
1758
1759 /// C++ [expr.dynamic.cast]p9:
1760 /// A failed cast to reference type throws std::bad_cast
1761 EmitBadCastCall(CGF);
1762
1763 CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end"));
1764 return llvm::UndefValue::get(DestLTy);
1765 }
1766
EmitDynamicCast(llvm::Value * Value,const CXXDynamicCastExpr * DCE)1767 llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *Value,
1768 const CXXDynamicCastExpr *DCE) {
1769 QualType DestTy = DCE->getTypeAsWritten();
1770
1771 if (DCE->isAlwaysNull())
1772 return EmitDynamicCastToNull(*this, DestTy);
1773
1774 QualType SrcTy = DCE->getSubExpr()->getType();
1775
1776 // C++ [expr.dynamic.cast]p4:
1777 // If the value of v is a null pointer value in the pointer case, the result
1778 // is the null pointer value of type T.
1779 bool ShouldNullCheckSrcValue = SrcTy->isPointerType();
1780
1781 llvm::BasicBlock *CastNull = 0;
1782 llvm::BasicBlock *CastNotNull = 0;
1783 llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end");
1784
1785 if (ShouldNullCheckSrcValue) {
1786 CastNull = createBasicBlock("dynamic_cast.null");
1787 CastNotNull = createBasicBlock("dynamic_cast.notnull");
1788
1789 llvm::Value *IsNull = Builder.CreateIsNull(Value);
1790 Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
1791 EmitBlock(CastNotNull);
1792 }
1793
1794 Value = EmitDynamicCastCall(*this, Value, SrcTy, DestTy, CastEnd);
1795
1796 if (ShouldNullCheckSrcValue) {
1797 EmitBranch(CastEnd);
1798
1799 EmitBlock(CastNull);
1800 EmitBranch(CastEnd);
1801 }
1802
1803 EmitBlock(CastEnd);
1804
1805 if (ShouldNullCheckSrcValue) {
1806 llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
1807 PHI->addIncoming(Value, CastNotNull);
1808 PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull);
1809
1810 Value = PHI;
1811 }
1812
1813 return Value;
1814 }
1815
EmitLambdaExpr(const LambdaExpr * E,AggValueSlot Slot)1816 void CodeGenFunction::EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Slot) {
1817 RunCleanupsScope Scope(*this);
1818 LValue SlotLV = MakeAddrLValue(Slot.getAddr(), E->getType(),
1819 Slot.getAlignment());
1820
1821 CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
1822 for (LambdaExpr::capture_init_iterator i = E->capture_init_begin(),
1823 e = E->capture_init_end();
1824 i != e; ++i, ++CurField) {
1825 // Emit initialization
1826
1827 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
1828 ArrayRef<VarDecl *> ArrayIndexes;
1829 if (CurField->getType()->isArrayType())
1830 ArrayIndexes = E->getCaptureInitIndexVars(i);
1831 EmitInitializerForField(*CurField, LV, *i, ArrayIndexes);
1832 }
1833 }
1834