• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  //===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
2  //
3  //                     The LLVM Compiler Infrastructure
4  //
5  // This file is distributed under the University of Illinois Open Source
6  // License. See LICENSE.TXT for details.
7  //
8  //===----------------------------------------------------------------------===//
9  //
10  // This contains code dealing with code generation of C++ expressions
11  //
12  //===----------------------------------------------------------------------===//
13  
14  #include "CodeGenFunction.h"
15  #include "CGCUDARuntime.h"
16  #include "CGCXXABI.h"
17  #include "CGDebugInfo.h"
18  #include "CGObjCRuntime.h"
19  #include "clang/CodeGen/CGFunctionInfo.h"
20  #include "clang/Frontend/CodeGenOptions.h"
21  #include "llvm/IR/CallSite.h"
22  #include "llvm/IR/Intrinsics.h"
23  
24  using namespace clang;
25  using namespace CodeGen;
26  
27  static RequiredArgs
commonEmitCXXMemberOrOperatorCall(CodeGenFunction & CGF,const CXXMethodDecl * MD,llvm::Value * This,llvm::Value * ImplicitParam,QualType ImplicitParamTy,const CallExpr * CE,CallArgList & Args)28  commonEmitCXXMemberOrOperatorCall(CodeGenFunction &CGF, const CXXMethodDecl *MD,
29                                    llvm::Value *This, llvm::Value *ImplicitParam,
30                                    QualType ImplicitParamTy, const CallExpr *CE,
31                                    CallArgList &Args) {
32    assert(CE == nullptr || isa<CXXMemberCallExpr>(CE) ||
33           isa<CXXOperatorCallExpr>(CE));
34    assert(MD->isInstance() &&
35           "Trying to emit a member or operator call expr on a static method!");
36  
37    // C++11 [class.mfct.non-static]p2:
38    //   If a non-static member function of a class X is called for an object that
39    //   is not of type X, or of a type derived from X, the behavior is undefined.
40    SourceLocation CallLoc;
41    if (CE)
42      CallLoc = CE->getExprLoc();
43    CGF.EmitTypeCheck(
44        isa<CXXConstructorDecl>(MD) ? CodeGenFunction::TCK_ConstructorCall
45                                    : CodeGenFunction::TCK_MemberCall,
46        CallLoc, This, CGF.getContext().getRecordType(MD->getParent()));
47  
48    // Push the this ptr.
49    Args.add(RValue::get(This), MD->getThisType(CGF.getContext()));
50  
51    // If there is an implicit parameter (e.g. VTT), emit it.
52    if (ImplicitParam) {
53      Args.add(RValue::get(ImplicitParam), ImplicitParamTy);
54    }
55  
56    const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
57    RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size(), MD);
58  
59    // And the rest of the call args.
60    if (CE) {
61      // Special case: skip first argument of CXXOperatorCall (it is "this").
62      unsigned ArgsToSkip = isa<CXXOperatorCallExpr>(CE) ? 1 : 0;
63      CGF.EmitCallArgs(Args, FPT, drop_begin(CE->arguments(), ArgsToSkip),
64                       CE->getDirectCallee());
65    } else {
66      assert(
67          FPT->getNumParams() == 0 &&
68          "No CallExpr specified for function with non-zero number of arguments");
69    }
70    return required;
71  }
72  
EmitCXXMemberOrOperatorCall(const CXXMethodDecl * MD,llvm::Value * Callee,ReturnValueSlot ReturnValue,llvm::Value * This,llvm::Value * ImplicitParam,QualType ImplicitParamTy,const CallExpr * CE)73  RValue CodeGenFunction::EmitCXXMemberOrOperatorCall(
74      const CXXMethodDecl *MD, llvm::Value *Callee, ReturnValueSlot ReturnValue,
75      llvm::Value *This, llvm::Value *ImplicitParam, QualType ImplicitParamTy,
76      const CallExpr *CE) {
77    const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
78    CallArgList Args;
79    RequiredArgs required = commonEmitCXXMemberOrOperatorCall(
80        *this, MD, This, ImplicitParam, ImplicitParamTy, CE, Args);
81    return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required),
82                    Callee, ReturnValue, Args, MD);
83  }
84  
EmitCXXDestructorCall(const CXXDestructorDecl * DD,llvm::Value * Callee,llvm::Value * This,llvm::Value * ImplicitParam,QualType ImplicitParamTy,const CallExpr * CE,StructorType Type)85  RValue CodeGenFunction::EmitCXXDestructorCall(
86      const CXXDestructorDecl *DD, llvm::Value *Callee, llvm::Value *This,
87      llvm::Value *ImplicitParam, QualType ImplicitParamTy, const CallExpr *CE,
88      StructorType Type) {
89    CallArgList Args;
90    commonEmitCXXMemberOrOperatorCall(*this, DD, This, ImplicitParam,
91                                      ImplicitParamTy, CE, Args);
92    return EmitCall(CGM.getTypes().arrangeCXXStructorDeclaration(DD, Type),
93                    Callee, ReturnValueSlot(), Args, DD);
94  }
95  
getCXXRecord(const Expr * E)96  static CXXRecordDecl *getCXXRecord(const Expr *E) {
97    QualType T = E->getType();
98    if (const PointerType *PTy = T->getAs<PointerType>())
99      T = PTy->getPointeeType();
100    const RecordType *Ty = T->castAs<RecordType>();
101    return cast<CXXRecordDecl>(Ty->getDecl());
102  }
103  
104  // Note: This function also emit constructor calls to support a MSVC
105  // extensions allowing explicit constructor function call.
EmitCXXMemberCallExpr(const CXXMemberCallExpr * CE,ReturnValueSlot ReturnValue)106  RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
107                                                ReturnValueSlot ReturnValue) {
108    const Expr *callee = CE->getCallee()->IgnoreParens();
109  
110    if (isa<BinaryOperator>(callee))
111      return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
112  
113    const MemberExpr *ME = cast<MemberExpr>(callee);
114    const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
115  
116    if (MD->isStatic()) {
117      // The method is static, emit it as we would a regular call.
118      llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
119      return EmitCall(getContext().getPointerType(MD->getType()), Callee, CE,
120                      ReturnValue);
121    }
122  
123    bool HasQualifier = ME->hasQualifier();
124    NestedNameSpecifier *Qualifier = HasQualifier ? ME->getQualifier() : nullptr;
125    bool IsArrow = ME->isArrow();
126    const Expr *Base = ME->getBase();
127  
128    return EmitCXXMemberOrOperatorMemberCallExpr(
129        CE, MD, ReturnValue, HasQualifier, Qualifier, IsArrow, Base);
130  }
131  
EmitCXXMemberOrOperatorMemberCallExpr(const CallExpr * CE,const CXXMethodDecl * MD,ReturnValueSlot ReturnValue,bool HasQualifier,NestedNameSpecifier * Qualifier,bool IsArrow,const Expr * Base)132  RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
133      const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue,
134      bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow,
135      const Expr *Base) {
136    assert(isa<CXXMemberCallExpr>(CE) || isa<CXXOperatorCallExpr>(CE));
137  
138    // Compute the object pointer.
139    bool CanUseVirtualCall = MD->isVirtual() && !HasQualifier;
140  
141    const CXXMethodDecl *DevirtualizedMethod = nullptr;
142    if (CanUseVirtualCall && CanDevirtualizeMemberFunctionCall(Base, MD)) {
143      const CXXRecordDecl *BestDynamicDecl = Base->getBestDynamicClassType();
144      DevirtualizedMethod = MD->getCorrespondingMethodInClass(BestDynamicDecl);
145      assert(DevirtualizedMethod);
146      const CXXRecordDecl *DevirtualizedClass = DevirtualizedMethod->getParent();
147      const Expr *Inner = Base->ignoreParenBaseCasts();
148      if (DevirtualizedMethod->getReturnType().getCanonicalType() !=
149          MD->getReturnType().getCanonicalType())
150        // If the return types are not the same, this might be a case where more
151        // code needs to run to compensate for it. For example, the derived
152        // method might return a type that inherits form from the return
153        // type of MD and has a prefix.
154        // For now we just avoid devirtualizing these covariant cases.
155        DevirtualizedMethod = nullptr;
156      else if (getCXXRecord(Inner) == DevirtualizedClass)
157        // If the class of the Inner expression is where the dynamic method
158        // is defined, build the this pointer from it.
159        Base = Inner;
160      else if (getCXXRecord(Base) != DevirtualizedClass) {
161        // If the method is defined in a class that is not the best dynamic
162        // one or the one of the full expression, we would have to build
163        // a derived-to-base cast to compute the correct this pointer, but
164        // we don't have support for that yet, so do a virtual call.
165        DevirtualizedMethod = nullptr;
166      }
167    }
168  
169    Address This = Address::invalid();
170    if (IsArrow)
171      This = EmitPointerWithAlignment(Base);
172    else
173      This = EmitLValue(Base).getAddress();
174  
175  
176    if (MD->isTrivial() || (MD->isDefaulted() && MD->getParent()->isUnion())) {
177      if (isa<CXXDestructorDecl>(MD)) return RValue::get(nullptr);
178      if (isa<CXXConstructorDecl>(MD) &&
179          cast<CXXConstructorDecl>(MD)->isDefaultConstructor())
180        return RValue::get(nullptr);
181  
182      if (!MD->getParent()->mayInsertExtraPadding()) {
183        if (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) {
184          // We don't like to generate the trivial copy/move assignment operator
185          // when it isn't necessary; just produce the proper effect here.
186          // Special case: skip first argument of CXXOperatorCall (it is "this").
187          unsigned ArgsToSkip = isa<CXXOperatorCallExpr>(CE) ? 1 : 0;
188          Address RHS = EmitLValue(*(CE->arg_begin() + ArgsToSkip)).getAddress();
189          EmitAggregateAssign(This, RHS, CE->getType());
190          return RValue::get(This.getPointer());
191        }
192  
193        if (isa<CXXConstructorDecl>(MD) &&
194            cast<CXXConstructorDecl>(MD)->isCopyOrMoveConstructor()) {
195          // Trivial move and copy ctor are the same.
196          assert(CE->getNumArgs() == 1 && "unexpected argcount for trivial ctor");
197          Address RHS = EmitLValue(*CE->arg_begin()).getAddress();
198          EmitAggregateCopy(This, RHS, (*CE->arg_begin())->getType());
199          return RValue::get(This.getPointer());
200        }
201        llvm_unreachable("unknown trivial member function");
202      }
203    }
204  
205    // Compute the function type we're calling.
206    const CXXMethodDecl *CalleeDecl =
207        DevirtualizedMethod ? DevirtualizedMethod : MD;
208    const CGFunctionInfo *FInfo = nullptr;
209    if (const auto *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl))
210      FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
211          Dtor, StructorType::Complete);
212    else if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(CalleeDecl))
213      FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
214          Ctor, StructorType::Complete);
215    else
216      FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(CalleeDecl);
217  
218    llvm::FunctionType *Ty = CGM.getTypes().GetFunctionType(*FInfo);
219  
220    // C++ [class.virtual]p12:
221    //   Explicit qualification with the scope operator (5.1) suppresses the
222    //   virtual call mechanism.
223    //
224    // We also don't emit a virtual call if the base expression has a record type
225    // because then we know what the type is.
226    bool UseVirtualCall = CanUseVirtualCall && !DevirtualizedMethod;
227    llvm::Value *Callee;
228  
229    if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
230      assert(CE->arg_begin() == CE->arg_end() &&
231             "Destructor shouldn't have explicit parameters");
232      assert(ReturnValue.isNull() && "Destructor shouldn't have return value");
233      if (UseVirtualCall) {
234        CGM.getCXXABI().EmitVirtualDestructorCall(
235            *this, Dtor, Dtor_Complete, This, cast<CXXMemberCallExpr>(CE));
236      } else {
237        if (getLangOpts().AppleKext && MD->isVirtual() && HasQualifier)
238          Callee = BuildAppleKextVirtualCall(MD, Qualifier, Ty);
239        else if (!DevirtualizedMethod)
240          Callee =
241              CGM.getAddrOfCXXStructor(Dtor, StructorType::Complete, FInfo, Ty);
242        else {
243          const CXXDestructorDecl *DDtor =
244            cast<CXXDestructorDecl>(DevirtualizedMethod);
245          Callee = CGM.GetAddrOfFunction(GlobalDecl(DDtor, Dtor_Complete), Ty);
246        }
247        EmitCXXMemberOrOperatorCall(MD, Callee, ReturnValue, This.getPointer(),
248                                    /*ImplicitParam=*/nullptr, QualType(), CE);
249      }
250      return RValue::get(nullptr);
251    }
252  
253    if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(MD)) {
254      Callee = CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty);
255    } else if (UseVirtualCall) {
256      Callee = CGM.getCXXABI().getVirtualFunctionPointer(*this, MD, This, Ty,
257                                                         CE->getLocStart());
258    } else {
259      if (SanOpts.has(SanitizerKind::CFINVCall) &&
260          MD->getParent()->isDynamicClass()) {
261        llvm::Value *VTable = GetVTablePtr(This, Int8PtrTy, MD->getParent());
262        EmitVTablePtrCheckForCall(MD->getParent(), VTable, CFITCK_NVCall,
263                                  CE->getLocStart());
264      }
265  
266      if (getLangOpts().AppleKext && MD->isVirtual() && HasQualifier)
267        Callee = BuildAppleKextVirtualCall(MD, Qualifier, Ty);
268      else if (!DevirtualizedMethod)
269        Callee = CGM.GetAddrOfFunction(MD, Ty);
270      else {
271        Callee = CGM.GetAddrOfFunction(DevirtualizedMethod, Ty);
272      }
273    }
274  
275    if (MD->isVirtual()) {
276      This = CGM.getCXXABI().adjustThisArgumentForVirtualFunctionCall(
277          *this, CalleeDecl, This, UseVirtualCall);
278    }
279  
280    return EmitCXXMemberOrOperatorCall(MD, Callee, ReturnValue, This.getPointer(),
281                                       /*ImplicitParam=*/nullptr, QualType(), CE);
282  }
283  
284  RValue
EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr * E,ReturnValueSlot ReturnValue)285  CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
286                                                ReturnValueSlot ReturnValue) {
287    const BinaryOperator *BO =
288        cast<BinaryOperator>(E->getCallee()->IgnoreParens());
289    const Expr *BaseExpr = BO->getLHS();
290    const Expr *MemFnExpr = BO->getRHS();
291  
292    const MemberPointerType *MPT =
293      MemFnExpr->getType()->castAs<MemberPointerType>();
294  
295    const FunctionProtoType *FPT =
296      MPT->getPointeeType()->castAs<FunctionProtoType>();
297    const CXXRecordDecl *RD =
298      cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
299  
300    // Get the member function pointer.
301    llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
302  
303    // Emit the 'this' pointer.
304    Address This = Address::invalid();
305    if (BO->getOpcode() == BO_PtrMemI)
306      This = EmitPointerWithAlignment(BaseExpr);
307    else
308      This = EmitLValue(BaseExpr).getAddress();
309  
310    EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This.getPointer(),
311                  QualType(MPT->getClass(), 0));
312  
313    // Ask the ABI to load the callee.  Note that This is modified.
314    llvm::Value *ThisPtrForCall = nullptr;
315    llvm::Value *Callee =
316      CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, BO, This,
317                                               ThisPtrForCall, MemFnPtr, MPT);
318  
319    CallArgList Args;
320  
321    QualType ThisType =
322      getContext().getPointerType(getContext().getTagDeclType(RD));
323  
324    // Push the this ptr.
325    Args.add(RValue::get(ThisPtrForCall), ThisType);
326  
327    RequiredArgs required =
328        RequiredArgs::forPrototypePlus(FPT, 1, /*FD=*/nullptr);
329  
330    // And the rest of the call args
331    EmitCallArgs(Args, FPT, E->arguments());
332    return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required),
333                    Callee, ReturnValue, Args);
334  }
335  
336  RValue
EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr * E,const CXXMethodDecl * MD,ReturnValueSlot ReturnValue)337  CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
338                                                 const CXXMethodDecl *MD,
339                                                 ReturnValueSlot ReturnValue) {
340    assert(MD->isInstance() &&
341           "Trying to emit a member call expr on a static method!");
342    return EmitCXXMemberOrOperatorMemberCallExpr(
343        E, MD, ReturnValue, /*HasQualifier=*/false, /*Qualifier=*/nullptr,
344        /*IsArrow=*/false, E->getArg(0));
345  }
346  
EmitCUDAKernelCallExpr(const CUDAKernelCallExpr * E,ReturnValueSlot ReturnValue)347  RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
348                                                 ReturnValueSlot ReturnValue) {
349    return CGM.getCUDARuntime().EmitCUDAKernelCallExpr(*this, E, ReturnValue);
350  }
351  
EmitNullBaseClassInitialization(CodeGenFunction & CGF,Address DestPtr,const CXXRecordDecl * Base)352  static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
353                                              Address DestPtr,
354                                              const CXXRecordDecl *Base) {
355    if (Base->isEmpty())
356      return;
357  
358    DestPtr = CGF.Builder.CreateElementBitCast(DestPtr, CGF.Int8Ty);
359  
360    const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base);
361    CharUnits NVSize = Layout.getNonVirtualSize();
362  
363    // We cannot simply zero-initialize the entire base sub-object if vbptrs are
364    // present, they are initialized by the most derived class before calling the
365    // constructor.
366    SmallVector<std::pair<CharUnits, CharUnits>, 1> Stores;
367    Stores.emplace_back(CharUnits::Zero(), NVSize);
368  
369    // Each store is split by the existence of a vbptr.
370    CharUnits VBPtrWidth = CGF.getPointerSize();
371    std::vector<CharUnits> VBPtrOffsets =
372        CGF.CGM.getCXXABI().getVBPtrOffsets(Base);
373    for (CharUnits VBPtrOffset : VBPtrOffsets) {
374      // Stop before we hit any virtual base pointers located in virtual bases.
375      if (VBPtrOffset >= NVSize)
376        break;
377      std::pair<CharUnits, CharUnits> LastStore = Stores.pop_back_val();
378      CharUnits LastStoreOffset = LastStore.first;
379      CharUnits LastStoreSize = LastStore.second;
380  
381      CharUnits SplitBeforeOffset = LastStoreOffset;
382      CharUnits SplitBeforeSize = VBPtrOffset - SplitBeforeOffset;
383      assert(!SplitBeforeSize.isNegative() && "negative store size!");
384      if (!SplitBeforeSize.isZero())
385        Stores.emplace_back(SplitBeforeOffset, SplitBeforeSize);
386  
387      CharUnits SplitAfterOffset = VBPtrOffset + VBPtrWidth;
388      CharUnits SplitAfterSize = LastStoreSize - SplitAfterOffset;
389      assert(!SplitAfterSize.isNegative() && "negative store size!");
390      if (!SplitAfterSize.isZero())
391        Stores.emplace_back(SplitAfterOffset, SplitAfterSize);
392    }
393  
394    // If the type contains a pointer to data member we can't memset it to zero.
395    // Instead, create a null constant and copy it to the destination.
396    // TODO: there are other patterns besides zero that we can usefully memset,
397    // like -1, which happens to be the pattern used by member-pointers.
398    // TODO: isZeroInitializable can be over-conservative in the case where a
399    // virtual base contains a member pointer.
400    llvm::Constant *NullConstantForBase = CGF.CGM.EmitNullConstantForBase(Base);
401    if (!NullConstantForBase->isNullValue()) {
402      llvm::GlobalVariable *NullVariable = new llvm::GlobalVariable(
403          CGF.CGM.getModule(), NullConstantForBase->getType(),
404          /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage,
405          NullConstantForBase, Twine());
406  
407      CharUnits Align = std::max(Layout.getNonVirtualAlignment(),
408                                 DestPtr.getAlignment());
409      NullVariable->setAlignment(Align.getQuantity());
410  
411      Address SrcPtr = Address(CGF.EmitCastToVoidPtr(NullVariable), Align);
412  
413      // Get and call the appropriate llvm.memcpy overload.
414      for (std::pair<CharUnits, CharUnits> Store : Stores) {
415        CharUnits StoreOffset = Store.first;
416        CharUnits StoreSize = Store.second;
417        llvm::Value *StoreSizeVal = CGF.CGM.getSize(StoreSize);
418        CGF.Builder.CreateMemCpy(
419            CGF.Builder.CreateConstInBoundsByteGEP(DestPtr, StoreOffset),
420            CGF.Builder.CreateConstInBoundsByteGEP(SrcPtr, StoreOffset),
421            StoreSizeVal);
422      }
423  
424    // Otherwise, just memset the whole thing to zero.  This is legal
425    // because in LLVM, all default initializers (other than the ones we just
426    // handled above) are guaranteed to have a bit pattern of all zeros.
427    } else {
428      for (std::pair<CharUnits, CharUnits> Store : Stores) {
429        CharUnits StoreOffset = Store.first;
430        CharUnits StoreSize = Store.second;
431        llvm::Value *StoreSizeVal = CGF.CGM.getSize(StoreSize);
432        CGF.Builder.CreateMemSet(
433            CGF.Builder.CreateConstInBoundsByteGEP(DestPtr, StoreOffset),
434            CGF.Builder.getInt8(0), StoreSizeVal);
435      }
436    }
437  }
438  
439  void
EmitCXXConstructExpr(const CXXConstructExpr * E,AggValueSlot Dest)440  CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
441                                        AggValueSlot Dest) {
442    assert(!Dest.isIgnored() && "Must have a destination!");
443    const CXXConstructorDecl *CD = E->getConstructor();
444  
445    // If we require zero initialization before (or instead of) calling the
446    // constructor, as can be the case with a non-user-provided default
447    // constructor, emit the zero initialization now, unless destination is
448    // already zeroed.
449    if (E->requiresZeroInitialization() && !Dest.isZeroed()) {
450      switch (E->getConstructionKind()) {
451      case CXXConstructExpr::CK_Delegating:
452      case CXXConstructExpr::CK_Complete:
453        EmitNullInitialization(Dest.getAddress(), E->getType());
454        break;
455      case CXXConstructExpr::CK_VirtualBase:
456      case CXXConstructExpr::CK_NonVirtualBase:
457        EmitNullBaseClassInitialization(*this, Dest.getAddress(),
458                                        CD->getParent());
459        break;
460      }
461    }
462  
463    // If this is a call to a trivial default constructor, do nothing.
464    if (CD->isTrivial() && CD->isDefaultConstructor())
465      return;
466  
467    // Elide the constructor if we're constructing from a temporary.
468    // The temporary check is required because Sema sets this on NRVO
469    // returns.
470    if (getLangOpts().ElideConstructors && E->isElidable()) {
471      assert(getContext().hasSameUnqualifiedType(E->getType(),
472                                                 E->getArg(0)->getType()));
473      if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
474        EmitAggExpr(E->getArg(0), Dest);
475        return;
476      }
477    }
478  
479    if (const ArrayType *arrayType
480          = getContext().getAsArrayType(E->getType())) {
481      EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddress(), E);
482    } else {
483      CXXCtorType Type = Ctor_Complete;
484      bool ForVirtualBase = false;
485      bool Delegating = false;
486  
487      switch (E->getConstructionKind()) {
488       case CXXConstructExpr::CK_Delegating:
489        // We should be emitting a constructor; GlobalDecl will assert this
490        Type = CurGD.getCtorType();
491        Delegating = true;
492        break;
493  
494       case CXXConstructExpr::CK_Complete:
495        Type = Ctor_Complete;
496        break;
497  
498       case CXXConstructExpr::CK_VirtualBase:
499        ForVirtualBase = true;
500        // fall-through
501  
502       case CXXConstructExpr::CK_NonVirtualBase:
503        Type = Ctor_Base;
504      }
505  
506      // Call the constructor.
507      EmitCXXConstructorCall(CD, Type, ForVirtualBase, Delegating,
508                             Dest.getAddress(), E);
509    }
510  }
511  
EmitSynthesizedCXXCopyCtor(Address Dest,Address Src,const Expr * Exp)512  void CodeGenFunction::EmitSynthesizedCXXCopyCtor(Address Dest, Address Src,
513                                                   const Expr *Exp) {
514    if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
515      Exp = E->getSubExpr();
516    assert(isa<CXXConstructExpr>(Exp) &&
517           "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
518    const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);
519    const CXXConstructorDecl *CD = E->getConstructor();
520    RunCleanupsScope Scope(*this);
521  
522    // If we require zero initialization before (or instead of) calling the
523    // constructor, as can be the case with a non-user-provided default
524    // constructor, emit the zero initialization now.
525    // FIXME. Do I still need this for a copy ctor synthesis?
526    if (E->requiresZeroInitialization())
527      EmitNullInitialization(Dest, E->getType());
528  
529    assert(!getContext().getAsConstantArrayType(E->getType())
530           && "EmitSynthesizedCXXCopyCtor - Copied-in Array");
531    EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src, E);
532  }
533  
CalculateCookiePadding(CodeGenFunction & CGF,const CXXNewExpr * E)534  static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
535                                          const CXXNewExpr *E) {
536    if (!E->isArray())
537      return CharUnits::Zero();
538  
539    // No cookie is required if the operator new[] being used is the
540    // reserved placement operator new[].
541    if (E->getOperatorNew()->isReservedGlobalPlacementOperator())
542      return CharUnits::Zero();
543  
544    return CGF.CGM.getCXXABI().GetArrayCookieSize(E);
545  }
546  
EmitCXXNewAllocSize(CodeGenFunction & CGF,const CXXNewExpr * e,unsigned minElements,llvm::Value * & numElements,llvm::Value * & sizeWithoutCookie)547  static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
548                                          const CXXNewExpr *e,
549                                          unsigned minElements,
550                                          llvm::Value *&numElements,
551                                          llvm::Value *&sizeWithoutCookie) {
552    QualType type = e->getAllocatedType();
553  
554    if (!e->isArray()) {
555      CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
556      sizeWithoutCookie
557        = llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity());
558      return sizeWithoutCookie;
559    }
560  
561    // The width of size_t.
562    unsigned sizeWidth = CGF.SizeTy->getBitWidth();
563  
564    // Figure out the cookie size.
565    llvm::APInt cookieSize(sizeWidth,
566                           CalculateCookiePadding(CGF, e).getQuantity());
567  
568    // Emit the array size expression.
569    // We multiply the size of all dimensions for NumElements.
570    // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
571    numElements = CGF.EmitScalarExpr(e->getArraySize());
572    assert(isa<llvm::IntegerType>(numElements->getType()));
573  
574    // The number of elements can be have an arbitrary integer type;
575    // essentially, we need to multiply it by a constant factor, add a
576    // cookie size, and verify that the result is representable as a
577    // size_t.  That's just a gloss, though, and it's wrong in one
578    // important way: if the count is negative, it's an error even if
579    // the cookie size would bring the total size >= 0.
580    bool isSigned
581      = e->getArraySize()->getType()->isSignedIntegerOrEnumerationType();
582    llvm::IntegerType *numElementsType
583      = cast<llvm::IntegerType>(numElements->getType());
584    unsigned numElementsWidth = numElementsType->getBitWidth();
585  
586    // Compute the constant factor.
587    llvm::APInt arraySizeMultiplier(sizeWidth, 1);
588    while (const ConstantArrayType *CAT
589               = CGF.getContext().getAsConstantArrayType(type)) {
590      type = CAT->getElementType();
591      arraySizeMultiplier *= CAT->getSize();
592    }
593  
594    CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
595    llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity());
596    typeSizeMultiplier *= arraySizeMultiplier;
597  
598    // This will be a size_t.
599    llvm::Value *size;
600  
601    // If someone is doing 'new int[42]' there is no need to do a dynamic check.
602    // Don't bloat the -O0 code.
603    if (llvm::ConstantInt *numElementsC =
604          dyn_cast<llvm::ConstantInt>(numElements)) {
605      const llvm::APInt &count = numElementsC->getValue();
606  
607      bool hasAnyOverflow = false;
608  
609      // If 'count' was a negative number, it's an overflow.
610      if (isSigned && count.isNegative())
611        hasAnyOverflow = true;
612  
613      // We want to do all this arithmetic in size_t.  If numElements is
614      // wider than that, check whether it's already too big, and if so,
615      // overflow.
616      else if (numElementsWidth > sizeWidth &&
617               numElementsWidth - sizeWidth > count.countLeadingZeros())
618        hasAnyOverflow = true;
619  
620      // Okay, compute a count at the right width.
621      llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth);
622  
623      // If there is a brace-initializer, we cannot allocate fewer elements than
624      // there are initializers. If we do, that's treated like an overflow.
625      if (adjustedCount.ult(minElements))
626        hasAnyOverflow = true;
627  
628      // Scale numElements by that.  This might overflow, but we don't
629      // care because it only overflows if allocationSize does, too, and
630      // if that overflows then we shouldn't use this.
631      numElements = llvm::ConstantInt::get(CGF.SizeTy,
632                                           adjustedCount * arraySizeMultiplier);
633  
634      // Compute the size before cookie, and track whether it overflowed.
635      bool overflow;
636      llvm::APInt allocationSize
637        = adjustedCount.umul_ov(typeSizeMultiplier, overflow);
638      hasAnyOverflow |= overflow;
639  
640      // Add in the cookie, and check whether it's overflowed.
641      if (cookieSize != 0) {
642        // Save the current size without a cookie.  This shouldn't be
643        // used if there was overflow.
644        sizeWithoutCookie = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
645  
646        allocationSize = allocationSize.uadd_ov(cookieSize, overflow);
647        hasAnyOverflow |= overflow;
648      }
649  
650      // On overflow, produce a -1 so operator new will fail.
651      if (hasAnyOverflow) {
652        size = llvm::Constant::getAllOnesValue(CGF.SizeTy);
653      } else {
654        size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
655      }
656  
657    // Otherwise, we might need to use the overflow intrinsics.
658    } else {
659      // There are up to five conditions we need to test for:
660      // 1) if isSigned, we need to check whether numElements is negative;
661      // 2) if numElementsWidth > sizeWidth, we need to check whether
662      //   numElements is larger than something representable in size_t;
663      // 3) if minElements > 0, we need to check whether numElements is smaller
664      //    than that.
665      // 4) we need to compute
666      //      sizeWithoutCookie := numElements * typeSizeMultiplier
667      //    and check whether it overflows; and
668      // 5) if we need a cookie, we need to compute
669      //      size := sizeWithoutCookie + cookieSize
670      //    and check whether it overflows.
671  
672      llvm::Value *hasOverflow = nullptr;
673  
674      // If numElementsWidth > sizeWidth, then one way or another, we're
675      // going to have to do a comparison for (2), and this happens to
676      // take care of (1), too.
677      if (numElementsWidth > sizeWidth) {
678        llvm::APInt threshold(numElementsWidth, 1);
679        threshold <<= sizeWidth;
680  
681        llvm::Value *thresholdV
682          = llvm::ConstantInt::get(numElementsType, threshold);
683  
684        hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV);
685        numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy);
686  
687      // Otherwise, if we're signed, we want to sext up to size_t.
688      } else if (isSigned) {
689        if (numElementsWidth < sizeWidth)
690          numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy);
691  
692        // If there's a non-1 type size multiplier, then we can do the
693        // signedness check at the same time as we do the multiply
694        // because a negative number times anything will cause an
695        // unsigned overflow.  Otherwise, we have to do it here. But at least
696        // in this case, we can subsume the >= minElements check.
697        if (typeSizeMultiplier == 1)
698          hasOverflow = CGF.Builder.CreateICmpSLT(numElements,
699                                llvm::ConstantInt::get(CGF.SizeTy, minElements));
700  
701      // Otherwise, zext up to size_t if necessary.
702      } else if (numElementsWidth < sizeWidth) {
703        numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy);
704      }
705  
706      assert(numElements->getType() == CGF.SizeTy);
707  
708      if (minElements) {
709        // Don't allow allocation of fewer elements than we have initializers.
710        if (!hasOverflow) {
711          hasOverflow = CGF.Builder.CreateICmpULT(numElements,
712                                llvm::ConstantInt::get(CGF.SizeTy, minElements));
713        } else if (numElementsWidth > sizeWidth) {
714          // The other existing overflow subsumes this check.
715          // We do an unsigned comparison, since any signed value < -1 is
716          // taken care of either above or below.
717          hasOverflow = CGF.Builder.CreateOr(hasOverflow,
718                            CGF.Builder.CreateICmpULT(numElements,
719                                llvm::ConstantInt::get(CGF.SizeTy, minElements)));
720        }
721      }
722  
723      size = numElements;
724  
725      // Multiply by the type size if necessary.  This multiplier
726      // includes all the factors for nested arrays.
727      //
728      // This step also causes numElements to be scaled up by the
729      // nested-array factor if necessary.  Overflow on this computation
730      // can be ignored because the result shouldn't be used if
731      // allocation fails.
732      if (typeSizeMultiplier != 1) {
733        llvm::Value *umul_with_overflow
734          = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy);
735  
736        llvm::Value *tsmV =
737          llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier);
738        llvm::Value *result =
739            CGF.Builder.CreateCall(umul_with_overflow, {size, tsmV});
740  
741        llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
742        if (hasOverflow)
743          hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
744        else
745          hasOverflow = overflowed;
746  
747        size = CGF.Builder.CreateExtractValue(result, 0);
748  
749        // Also scale up numElements by the array size multiplier.
750        if (arraySizeMultiplier != 1) {
751          // If the base element type size is 1, then we can re-use the
752          // multiply we just did.
753          if (typeSize.isOne()) {
754            assert(arraySizeMultiplier == typeSizeMultiplier);
755            numElements = size;
756  
757          // Otherwise we need a separate multiply.
758          } else {
759            llvm::Value *asmV =
760              llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier);
761            numElements = CGF.Builder.CreateMul(numElements, asmV);
762          }
763        }
764      } else {
765        // numElements doesn't need to be scaled.
766        assert(arraySizeMultiplier == 1);
767      }
768  
769      // Add in the cookie size if necessary.
770      if (cookieSize != 0) {
771        sizeWithoutCookie = size;
772  
773        llvm::Value *uadd_with_overflow
774          = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy);
775  
776        llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize);
777        llvm::Value *result =
778            CGF.Builder.CreateCall(uadd_with_overflow, {size, cookieSizeV});
779  
780        llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
781        if (hasOverflow)
782          hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
783        else
784          hasOverflow = overflowed;
785  
786        size = CGF.Builder.CreateExtractValue(result, 0);
787      }
788  
789      // If we had any possibility of dynamic overflow, make a select to
790      // overwrite 'size' with an all-ones value, which should cause
791      // operator new to throw.
792      if (hasOverflow)
793        size = CGF.Builder.CreateSelect(hasOverflow,
794                                   llvm::Constant::getAllOnesValue(CGF.SizeTy),
795                                        size);
796    }
797  
798    if (cookieSize == 0)
799      sizeWithoutCookie = size;
800    else
801      assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?");
802  
803    return size;
804  }
805  
StoreAnyExprIntoOneUnit(CodeGenFunction & CGF,const Expr * Init,QualType AllocType,Address NewPtr)806  static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
807                                      QualType AllocType, Address NewPtr) {
808    // FIXME: Refactor with EmitExprAsInit.
809    switch (CGF.getEvaluationKind(AllocType)) {
810    case TEK_Scalar:
811      CGF.EmitScalarInit(Init, nullptr,
812                         CGF.MakeAddrLValue(NewPtr, AllocType), false);
813      return;
814    case TEK_Complex:
815      CGF.EmitComplexExprIntoLValue(Init, CGF.MakeAddrLValue(NewPtr, AllocType),
816                                    /*isInit*/ true);
817      return;
818    case TEK_Aggregate: {
819      AggValueSlot Slot
820        = AggValueSlot::forAddr(NewPtr, AllocType.getQualifiers(),
821                                AggValueSlot::IsDestructed,
822                                AggValueSlot::DoesNotNeedGCBarriers,
823                                AggValueSlot::IsNotAliased);
824      CGF.EmitAggExpr(Init, Slot);
825      return;
826    }
827    }
828    llvm_unreachable("bad evaluation kind");
829  }
830  
EmitNewArrayInitializer(const CXXNewExpr * E,QualType ElementType,llvm::Type * ElementTy,Address BeginPtr,llvm::Value * NumElements,llvm::Value * AllocSizeWithoutCookie)831  void CodeGenFunction::EmitNewArrayInitializer(
832      const CXXNewExpr *E, QualType ElementType, llvm::Type *ElementTy,
833      Address BeginPtr, llvm::Value *NumElements,
834      llvm::Value *AllocSizeWithoutCookie) {
835    // If we have a type with trivial initialization and no initializer,
836    // there's nothing to do.
837    if (!E->hasInitializer())
838      return;
839  
840    Address CurPtr = BeginPtr;
841  
842    unsigned InitListElements = 0;
843  
844    const Expr *Init = E->getInitializer();
845    Address EndOfInit = Address::invalid();
846    QualType::DestructionKind DtorKind = ElementType.isDestructedType();
847    EHScopeStack::stable_iterator Cleanup;
848    llvm::Instruction *CleanupDominator = nullptr;
849  
850    CharUnits ElementSize = getContext().getTypeSizeInChars(ElementType);
851    CharUnits ElementAlign =
852      BeginPtr.getAlignment().alignmentOfArrayElement(ElementSize);
853  
854    // If the initializer is an initializer list, first do the explicit elements.
855    if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
856      InitListElements = ILE->getNumInits();
857  
858      // If this is a multi-dimensional array new, we will initialize multiple
859      // elements with each init list element.
860      QualType AllocType = E->getAllocatedType();
861      if (const ConstantArrayType *CAT = dyn_cast_or_null<ConstantArrayType>(
862              AllocType->getAsArrayTypeUnsafe())) {
863        ElementTy = ConvertTypeForMem(AllocType);
864        CurPtr = Builder.CreateElementBitCast(CurPtr, ElementTy);
865        InitListElements *= getContext().getConstantArrayElementCount(CAT);
866      }
867  
868      // Enter a partial-destruction Cleanup if necessary.
869      if (needsEHCleanup(DtorKind)) {
870        // In principle we could tell the Cleanup where we are more
871        // directly, but the control flow can get so varied here that it
872        // would actually be quite complex.  Therefore we go through an
873        // alloca.
874        EndOfInit = CreateTempAlloca(BeginPtr.getType(), getPointerAlign(),
875                                     "array.init.end");
876        CleanupDominator = Builder.CreateStore(BeginPtr.getPointer(), EndOfInit);
877        pushIrregularPartialArrayCleanup(BeginPtr.getPointer(), EndOfInit,
878                                         ElementType, ElementAlign,
879                                         getDestroyer(DtorKind));
880        Cleanup = EHStack.stable_begin();
881      }
882  
883      CharUnits StartAlign = CurPtr.getAlignment();
884      for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) {
885        // Tell the cleanup that it needs to destroy up to this
886        // element.  TODO: some of these stores can be trivially
887        // observed to be unnecessary.
888        if (EndOfInit.isValid()) {
889          auto FinishedPtr =
890            Builder.CreateBitCast(CurPtr.getPointer(), BeginPtr.getType());
891          Builder.CreateStore(FinishedPtr, EndOfInit);
892        }
893        // FIXME: If the last initializer is an incomplete initializer list for
894        // an array, and we have an array filler, we can fold together the two
895        // initialization loops.
896        StoreAnyExprIntoOneUnit(*this, ILE->getInit(i),
897                                ILE->getInit(i)->getType(), CurPtr);
898        CurPtr = Address(Builder.CreateInBoundsGEP(CurPtr.getPointer(),
899                                                   Builder.getSize(1),
900                                                   "array.exp.next"),
901                         StartAlign.alignmentAtOffset((i + 1) * ElementSize));
902      }
903  
904      // The remaining elements are filled with the array filler expression.
905      Init = ILE->getArrayFiller();
906  
907      // Extract the initializer for the individual array elements by pulling
908      // out the array filler from all the nested initializer lists. This avoids
909      // generating a nested loop for the initialization.
910      while (Init && Init->getType()->isConstantArrayType()) {
911        auto *SubILE = dyn_cast<InitListExpr>(Init);
912        if (!SubILE)
913          break;
914        assert(SubILE->getNumInits() == 0 && "explicit inits in array filler?");
915        Init = SubILE->getArrayFiller();
916      }
917  
918      // Switch back to initializing one base element at a time.
919      CurPtr = Builder.CreateBitCast(CurPtr, BeginPtr.getType());
920    }
921  
922    // Attempt to perform zero-initialization using memset.
923    auto TryMemsetInitialization = [&]() -> bool {
924      // FIXME: If the type is a pointer-to-data-member under the Itanium ABI,
925      // we can initialize with a memset to -1.
926      if (!CGM.getTypes().isZeroInitializable(ElementType))
927        return false;
928  
929      // Optimization: since zero initialization will just set the memory
930      // to all zeroes, generate a single memset to do it in one shot.
931  
932      // Subtract out the size of any elements we've already initialized.
933      auto *RemainingSize = AllocSizeWithoutCookie;
934      if (InitListElements) {
935        // We know this can't overflow; we check this when doing the allocation.
936        auto *InitializedSize = llvm::ConstantInt::get(
937            RemainingSize->getType(),
938            getContext().getTypeSizeInChars(ElementType).getQuantity() *
939                InitListElements);
940        RemainingSize = Builder.CreateSub(RemainingSize, InitializedSize);
941      }
942  
943      // Create the memset.
944      Builder.CreateMemSet(CurPtr, Builder.getInt8(0), RemainingSize, false);
945      return true;
946    };
947  
948    // If all elements have already been initialized, skip any further
949    // initialization.
950    llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements);
951    if (ConstNum && ConstNum->getZExtValue() <= InitListElements) {
952      // If there was a Cleanup, deactivate it.
953      if (CleanupDominator)
954        DeactivateCleanupBlock(Cleanup, CleanupDominator);
955      return;
956    }
957  
958    assert(Init && "have trailing elements to initialize but no initializer");
959  
960    // If this is a constructor call, try to optimize it out, and failing that
961    // emit a single loop to initialize all remaining elements.
962    if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Init)) {
963      CXXConstructorDecl *Ctor = CCE->getConstructor();
964      if (Ctor->isTrivial()) {
965        // If new expression did not specify value-initialization, then there
966        // is no initialization.
967        if (!CCE->requiresZeroInitialization() || Ctor->getParent()->isEmpty())
968          return;
969  
970        if (TryMemsetInitialization())
971          return;
972      }
973  
974      // Store the new Cleanup position for irregular Cleanups.
975      //
976      // FIXME: Share this cleanup with the constructor call emission rather than
977      // having it create a cleanup of its own.
978      if (EndOfInit.isValid())
979        Builder.CreateStore(CurPtr.getPointer(), EndOfInit);
980  
981      // Emit a constructor call loop to initialize the remaining elements.
982      if (InitListElements)
983        NumElements = Builder.CreateSub(
984            NumElements,
985            llvm::ConstantInt::get(NumElements->getType(), InitListElements));
986      EmitCXXAggrConstructorCall(Ctor, NumElements, CurPtr, CCE,
987                                 CCE->requiresZeroInitialization());
988      return;
989    }
990  
991    // If this is value-initialization, we can usually use memset.
992    ImplicitValueInitExpr IVIE(ElementType);
993    if (isa<ImplicitValueInitExpr>(Init)) {
994      if (TryMemsetInitialization())
995        return;
996  
997      // Switch to an ImplicitValueInitExpr for the element type. This handles
998      // only one case: multidimensional array new of pointers to members. In
999      // all other cases, we already have an initializer for the array element.
1000      Init = &IVIE;
1001    }
1002  
1003    // At this point we should have found an initializer for the individual
1004    // elements of the array.
1005    assert(getContext().hasSameUnqualifiedType(ElementType, Init->getType()) &&
1006           "got wrong type of element to initialize");
1007  
1008    // If we have an empty initializer list, we can usually use memset.
1009    if (auto *ILE = dyn_cast<InitListExpr>(Init))
1010      if (ILE->getNumInits() == 0 && TryMemsetInitialization())
1011        return;
1012  
1013    // If we have a struct whose every field is value-initialized, we can
1014    // usually use memset.
1015    if (auto *ILE = dyn_cast<InitListExpr>(Init)) {
1016      if (const RecordType *RType = ILE->getType()->getAs<RecordType>()) {
1017        if (RType->getDecl()->isStruct()) {
1018          unsigned NumElements = 0;
1019          if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RType->getDecl()))
1020            NumElements = CXXRD->getNumBases();
1021          for (auto *Field : RType->getDecl()->fields())
1022            if (!Field->isUnnamedBitfield())
1023              ++NumElements;
1024          // FIXME: Recurse into nested InitListExprs.
1025          if (ILE->getNumInits() == NumElements)
1026            for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
1027              if (!isa<ImplicitValueInitExpr>(ILE->getInit(i)))
1028                --NumElements;
1029          if (ILE->getNumInits() == NumElements && TryMemsetInitialization())
1030            return;
1031        }
1032      }
1033    }
1034  
1035    // Create the loop blocks.
1036    llvm::BasicBlock *EntryBB = Builder.GetInsertBlock();
1037    llvm::BasicBlock *LoopBB = createBasicBlock("new.loop");
1038    llvm::BasicBlock *ContBB = createBasicBlock("new.loop.end");
1039  
1040    // Find the end of the array, hoisted out of the loop.
1041    llvm::Value *EndPtr =
1042      Builder.CreateInBoundsGEP(BeginPtr.getPointer(), NumElements, "array.end");
1043  
1044    // If the number of elements isn't constant, we have to now check if there is
1045    // anything left to initialize.
1046    if (!ConstNum) {
1047      llvm::Value *IsEmpty =
1048        Builder.CreateICmpEQ(CurPtr.getPointer(), EndPtr, "array.isempty");
1049      Builder.CreateCondBr(IsEmpty, ContBB, LoopBB);
1050    }
1051  
1052    // Enter the loop.
1053    EmitBlock(LoopBB);
1054  
1055    // Set up the current-element phi.
1056    llvm::PHINode *CurPtrPhi =
1057      Builder.CreatePHI(CurPtr.getType(), 2, "array.cur");
1058    CurPtrPhi->addIncoming(CurPtr.getPointer(), EntryBB);
1059  
1060    CurPtr = Address(CurPtrPhi, ElementAlign);
1061  
1062    // Store the new Cleanup position for irregular Cleanups.
1063    if (EndOfInit.isValid())
1064      Builder.CreateStore(CurPtr.getPointer(), EndOfInit);
1065  
1066    // Enter a partial-destruction Cleanup if necessary.
1067    if (!CleanupDominator && needsEHCleanup(DtorKind)) {
1068      pushRegularPartialArrayCleanup(BeginPtr.getPointer(), CurPtr.getPointer(),
1069                                     ElementType, ElementAlign,
1070                                     getDestroyer(DtorKind));
1071      Cleanup = EHStack.stable_begin();
1072      CleanupDominator = Builder.CreateUnreachable();
1073    }
1074  
1075    // Emit the initializer into this element.
1076    StoreAnyExprIntoOneUnit(*this, Init, Init->getType(), CurPtr);
1077  
1078    // Leave the Cleanup if we entered one.
1079    if (CleanupDominator) {
1080      DeactivateCleanupBlock(Cleanup, CleanupDominator);
1081      CleanupDominator->eraseFromParent();
1082    }
1083  
1084    // Advance to the next element by adjusting the pointer type as necessary.
1085    llvm::Value *NextPtr =
1086      Builder.CreateConstInBoundsGEP1_32(ElementTy, CurPtr.getPointer(), 1,
1087                                         "array.next");
1088  
1089    // Check whether we've gotten to the end of the array and, if so,
1090    // exit the loop.
1091    llvm::Value *IsEnd = Builder.CreateICmpEQ(NextPtr, EndPtr, "array.atend");
1092    Builder.CreateCondBr(IsEnd, ContBB, LoopBB);
1093    CurPtrPhi->addIncoming(NextPtr, Builder.GetInsertBlock());
1094  
1095    EmitBlock(ContBB);
1096  }
1097  
EmitNewInitializer(CodeGenFunction & CGF,const CXXNewExpr * E,QualType ElementType,llvm::Type * ElementTy,Address NewPtr,llvm::Value * NumElements,llvm::Value * AllocSizeWithoutCookie)1098  static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
1099                                 QualType ElementType, llvm::Type *ElementTy,
1100                                 Address NewPtr, llvm::Value *NumElements,
1101                                 llvm::Value *AllocSizeWithoutCookie) {
1102    ApplyDebugLocation DL(CGF, E);
1103    if (E->isArray())
1104      CGF.EmitNewArrayInitializer(E, ElementType, ElementTy, NewPtr, NumElements,
1105                                  AllocSizeWithoutCookie);
1106    else if (const Expr *Init = E->getInitializer())
1107      StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr);
1108  }
1109  
1110  /// Emit a call to an operator new or operator delete function, as implicitly
1111  /// created by new-expressions and delete-expressions.
EmitNewDeleteCall(CodeGenFunction & CGF,const FunctionDecl * Callee,const FunctionProtoType * CalleeType,const CallArgList & Args)1112  static RValue EmitNewDeleteCall(CodeGenFunction &CGF,
1113                                  const FunctionDecl *Callee,
1114                                  const FunctionProtoType *CalleeType,
1115                                  const CallArgList &Args) {
1116    llvm::Instruction *CallOrInvoke;
1117    llvm::Value *CalleeAddr = CGF.CGM.GetAddrOfFunction(Callee);
1118    RValue RV =
1119        CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(
1120                         Args, CalleeType, /*chainCall=*/false),
1121                     CalleeAddr, ReturnValueSlot(), Args, Callee, &CallOrInvoke);
1122  
1123    /// C++1y [expr.new]p10:
1124    ///   [In a new-expression,] an implementation is allowed to omit a call
1125    ///   to a replaceable global allocation function.
1126    ///
1127    /// We model such elidable calls with the 'builtin' attribute.
1128    llvm::Function *Fn = dyn_cast<llvm::Function>(CalleeAddr);
1129    if (Callee->isReplaceableGlobalAllocationFunction() &&
1130        Fn && Fn->hasFnAttribute(llvm::Attribute::NoBuiltin)) {
1131      // FIXME: Add addAttribute to CallSite.
1132      if (llvm::CallInst *CI = dyn_cast<llvm::CallInst>(CallOrInvoke))
1133        CI->addAttribute(llvm::AttributeSet::FunctionIndex,
1134                         llvm::Attribute::Builtin);
1135      else if (llvm::InvokeInst *II = dyn_cast<llvm::InvokeInst>(CallOrInvoke))
1136        II->addAttribute(llvm::AttributeSet::FunctionIndex,
1137                         llvm::Attribute::Builtin);
1138      else
1139        llvm_unreachable("unexpected kind of call instruction");
1140    }
1141  
1142    return RV;
1143  }
1144  
EmitBuiltinNewDeleteCall(const FunctionProtoType * Type,const Expr * Arg,bool IsDelete)1145  RValue CodeGenFunction::EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
1146                                                   const Expr *Arg,
1147                                                   bool IsDelete) {
1148    CallArgList Args;
1149    const Stmt *ArgS = Arg;
1150    EmitCallArgs(Args, *Type->param_type_begin(), llvm::makeArrayRef(ArgS));
1151    // Find the allocation or deallocation function that we're calling.
1152    ASTContext &Ctx = getContext();
1153    DeclarationName Name = Ctx.DeclarationNames
1154        .getCXXOperatorName(IsDelete ? OO_Delete : OO_New);
1155    for (auto *Decl : Ctx.getTranslationUnitDecl()->lookup(Name))
1156      if (auto *FD = dyn_cast<FunctionDecl>(Decl))
1157        if (Ctx.hasSameType(FD->getType(), QualType(Type, 0)))
1158          return EmitNewDeleteCall(*this, cast<FunctionDecl>(Decl), Type, Args);
1159    llvm_unreachable("predeclared global operator new/delete is missing");
1160  }
1161  
1162  namespace {
1163    /// A cleanup to call the given 'operator delete' function upon
1164    /// abnormal exit from a new expression.
1165    class CallDeleteDuringNew final : public EHScopeStack::Cleanup {
1166      size_t NumPlacementArgs;
1167      const FunctionDecl *OperatorDelete;
1168      llvm::Value *Ptr;
1169      llvm::Value *AllocSize;
1170  
getPlacementArgs()1171      RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); }
1172  
1173    public:
getExtraSize(size_t NumPlacementArgs)1174      static size_t getExtraSize(size_t NumPlacementArgs) {
1175        return NumPlacementArgs * sizeof(RValue);
1176      }
1177  
CallDeleteDuringNew(size_t NumPlacementArgs,const FunctionDecl * OperatorDelete,llvm::Value * Ptr,llvm::Value * AllocSize)1178      CallDeleteDuringNew(size_t NumPlacementArgs,
1179                          const FunctionDecl *OperatorDelete,
1180                          llvm::Value *Ptr,
1181                          llvm::Value *AllocSize)
1182        : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
1183          Ptr(Ptr), AllocSize(AllocSize) {}
1184  
setPlacementArg(unsigned I,RValue Arg)1185      void setPlacementArg(unsigned I, RValue Arg) {
1186        assert(I < NumPlacementArgs && "index out of range");
1187        getPlacementArgs()[I] = Arg;
1188      }
1189  
Emit(CodeGenFunction & CGF,Flags flags)1190      void Emit(CodeGenFunction &CGF, Flags flags) override {
1191        const FunctionProtoType *FPT
1192          = OperatorDelete->getType()->getAs<FunctionProtoType>();
1193        assert(FPT->getNumParams() == NumPlacementArgs + 1 ||
1194               (FPT->getNumParams() == 2 && NumPlacementArgs == 0));
1195  
1196        CallArgList DeleteArgs;
1197  
1198        // The first argument is always a void*.
1199        FunctionProtoType::param_type_iterator AI = FPT->param_type_begin();
1200        DeleteArgs.add(RValue::get(Ptr), *AI++);
1201  
1202        // A member 'operator delete' can take an extra 'size_t' argument.
1203        if (FPT->getNumParams() == NumPlacementArgs + 2)
1204          DeleteArgs.add(RValue::get(AllocSize), *AI++);
1205  
1206        // Pass the rest of the arguments, which must match exactly.
1207        for (unsigned I = 0; I != NumPlacementArgs; ++I)
1208          DeleteArgs.add(getPlacementArgs()[I], *AI++);
1209  
1210        // Call 'operator delete'.
1211        EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs);
1212      }
1213    };
1214  
1215    /// A cleanup to call the given 'operator delete' function upon
1216    /// abnormal exit from a new expression when the new expression is
1217    /// conditional.
1218    class CallDeleteDuringConditionalNew final : public EHScopeStack::Cleanup {
1219      size_t NumPlacementArgs;
1220      const FunctionDecl *OperatorDelete;
1221      DominatingValue<RValue>::saved_type Ptr;
1222      DominatingValue<RValue>::saved_type AllocSize;
1223  
getPlacementArgs()1224      DominatingValue<RValue>::saved_type *getPlacementArgs() {
1225        return reinterpret_cast<DominatingValue<RValue>::saved_type*>(this+1);
1226      }
1227  
1228    public:
getExtraSize(size_t NumPlacementArgs)1229      static size_t getExtraSize(size_t NumPlacementArgs) {
1230        return NumPlacementArgs * sizeof(DominatingValue<RValue>::saved_type);
1231      }
1232  
CallDeleteDuringConditionalNew(size_t NumPlacementArgs,const FunctionDecl * OperatorDelete,DominatingValue<RValue>::saved_type Ptr,DominatingValue<RValue>::saved_type AllocSize)1233      CallDeleteDuringConditionalNew(size_t NumPlacementArgs,
1234                                     const FunctionDecl *OperatorDelete,
1235                                     DominatingValue<RValue>::saved_type Ptr,
1236                                DominatingValue<RValue>::saved_type AllocSize)
1237        : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
1238          Ptr(Ptr), AllocSize(AllocSize) {}
1239  
setPlacementArg(unsigned I,DominatingValue<RValue>::saved_type Arg)1240      void setPlacementArg(unsigned I, DominatingValue<RValue>::saved_type Arg) {
1241        assert(I < NumPlacementArgs && "index out of range");
1242        getPlacementArgs()[I] = Arg;
1243      }
1244  
Emit(CodeGenFunction & CGF,Flags flags)1245      void Emit(CodeGenFunction &CGF, Flags flags) override {
1246        const FunctionProtoType *FPT
1247          = OperatorDelete->getType()->getAs<FunctionProtoType>();
1248        assert(FPT->getNumParams() == NumPlacementArgs + 1 ||
1249               (FPT->getNumParams() == 2 && NumPlacementArgs == 0));
1250  
1251        CallArgList DeleteArgs;
1252  
1253        // The first argument is always a void*.
1254        FunctionProtoType::param_type_iterator AI = FPT->param_type_begin();
1255        DeleteArgs.add(Ptr.restore(CGF), *AI++);
1256  
1257        // A member 'operator delete' can take an extra 'size_t' argument.
1258        if (FPT->getNumParams() == NumPlacementArgs + 2) {
1259          RValue RV = AllocSize.restore(CGF);
1260          DeleteArgs.add(RV, *AI++);
1261        }
1262  
1263        // Pass the rest of the arguments, which must match exactly.
1264        for (unsigned I = 0; I != NumPlacementArgs; ++I) {
1265          RValue RV = getPlacementArgs()[I].restore(CGF);
1266          DeleteArgs.add(RV, *AI++);
1267        }
1268  
1269        // Call 'operator delete'.
1270        EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs);
1271      }
1272    };
1273  }
1274  
1275  /// Enter a cleanup to call 'operator delete' if the initializer in a
1276  /// new-expression throws.
EnterNewDeleteCleanup(CodeGenFunction & CGF,const CXXNewExpr * E,Address NewPtr,llvm::Value * AllocSize,const CallArgList & NewArgs)1277  static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
1278                                    const CXXNewExpr *E,
1279                                    Address NewPtr,
1280                                    llvm::Value *AllocSize,
1281                                    const CallArgList &NewArgs) {
1282    // If we're not inside a conditional branch, then the cleanup will
1283    // dominate and we can do the easier (and more efficient) thing.
1284    if (!CGF.isInConditionalBranch()) {
1285      CallDeleteDuringNew *Cleanup = CGF.EHStack
1286        .pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup,
1287                                                   E->getNumPlacementArgs(),
1288                                                   E->getOperatorDelete(),
1289                                                   NewPtr.getPointer(),
1290                                                   AllocSize);
1291      for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
1292        Cleanup->setPlacementArg(I, NewArgs[I+1].RV);
1293  
1294      return;
1295    }
1296  
1297    // Otherwise, we need to save all this stuff.
1298    DominatingValue<RValue>::saved_type SavedNewPtr =
1299      DominatingValue<RValue>::save(CGF, RValue::get(NewPtr.getPointer()));
1300    DominatingValue<RValue>::saved_type SavedAllocSize =
1301      DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
1302  
1303    CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack
1304      .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(EHCleanup,
1305                                                   E->getNumPlacementArgs(),
1306                                                   E->getOperatorDelete(),
1307                                                   SavedNewPtr,
1308                                                   SavedAllocSize);
1309    for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
1310      Cleanup->setPlacementArg(I,
1311                       DominatingValue<RValue>::save(CGF, NewArgs[I+1].RV));
1312  
1313    CGF.initFullExprCleanup();
1314  }
1315  
EmitCXXNewExpr(const CXXNewExpr * E)1316  llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
1317    // The element type being allocated.
1318    QualType allocType = getContext().getBaseElementType(E->getAllocatedType());
1319  
1320    // 1. Build a call to the allocation function.
1321    FunctionDecl *allocator = E->getOperatorNew();
1322  
1323    // If there is a brace-initializer, cannot allocate fewer elements than inits.
1324    unsigned minElements = 0;
1325    if (E->isArray() && E->hasInitializer()) {
1326      if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E->getInitializer()))
1327        minElements = ILE->getNumInits();
1328    }
1329  
1330    llvm::Value *numElements = nullptr;
1331    llvm::Value *allocSizeWithoutCookie = nullptr;
1332    llvm::Value *allocSize =
1333      EmitCXXNewAllocSize(*this, E, minElements, numElements,
1334                          allocSizeWithoutCookie);
1335  
1336    // Emit the allocation call.  If the allocator is a global placement
1337    // operator, just "inline" it directly.
1338    Address allocation = Address::invalid();
1339    CallArgList allocatorArgs;
1340    if (allocator->isReservedGlobalPlacementOperator()) {
1341      assert(E->getNumPlacementArgs() == 1);
1342      const Expr *arg = *E->placement_arguments().begin();
1343  
1344      AlignmentSource alignSource;
1345      allocation = EmitPointerWithAlignment(arg, &alignSource);
1346  
1347      // The pointer expression will, in many cases, be an opaque void*.
1348      // In these cases, discard the computed alignment and use the
1349      // formal alignment of the allocated type.
1350      if (alignSource != AlignmentSource::Decl) {
1351        allocation = Address(allocation.getPointer(),
1352                             getContext().getTypeAlignInChars(allocType));
1353      }
1354  
1355      // Set up allocatorArgs for the call to operator delete if it's not
1356      // the reserved global operator.
1357      if (E->getOperatorDelete() &&
1358          !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
1359        allocatorArgs.add(RValue::get(allocSize), getContext().getSizeType());
1360        allocatorArgs.add(RValue::get(allocation.getPointer()), arg->getType());
1361      }
1362  
1363    } else {
1364      const FunctionProtoType *allocatorType =
1365        allocator->getType()->castAs<FunctionProtoType>();
1366  
1367      // The allocation size is the first argument.
1368      QualType sizeType = getContext().getSizeType();
1369      allocatorArgs.add(RValue::get(allocSize), sizeType);
1370  
1371      // We start at 1 here because the first argument (the allocation size)
1372      // has already been emitted.
1373      EmitCallArgs(allocatorArgs, allocatorType, E->placement_arguments(),
1374                   /* CalleeDecl */ nullptr,
1375                   /*ParamsToSkip*/ 1);
1376  
1377      RValue RV =
1378        EmitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs);
1379  
1380      // For now, only assume that the allocation function returns
1381      // something satisfactorily aligned for the element type, plus
1382      // the cookie if we have one.
1383      CharUnits allocationAlign =
1384        getContext().getTypeAlignInChars(allocType);
1385      if (allocSize != allocSizeWithoutCookie) {
1386        CharUnits cookieAlign = getSizeAlign(); // FIXME?
1387        allocationAlign = std::max(allocationAlign, cookieAlign);
1388      }
1389  
1390      allocation = Address(RV.getScalarVal(), allocationAlign);
1391    }
1392  
1393    // Emit a null check on the allocation result if the allocation
1394    // function is allowed to return null (because it has a non-throwing
1395    // exception spec or is the reserved placement new) and we have an
1396    // interesting initializer.
1397    bool nullCheck = E->shouldNullCheckAllocation(getContext()) &&
1398      (!allocType.isPODType(getContext()) || E->hasInitializer());
1399  
1400    llvm::BasicBlock *nullCheckBB = nullptr;
1401    llvm::BasicBlock *contBB = nullptr;
1402  
1403    // The null-check means that the initializer is conditionally
1404    // evaluated.
1405    ConditionalEvaluation conditional(*this);
1406  
1407    if (nullCheck) {
1408      conditional.begin(*this);
1409  
1410      nullCheckBB = Builder.GetInsertBlock();
1411      llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull");
1412      contBB = createBasicBlock("new.cont");
1413  
1414      llvm::Value *isNull =
1415        Builder.CreateIsNull(allocation.getPointer(), "new.isnull");
1416      Builder.CreateCondBr(isNull, contBB, notNullBB);
1417      EmitBlock(notNullBB);
1418    }
1419  
1420    // If there's an operator delete, enter a cleanup to call it if an
1421    // exception is thrown.
1422    EHScopeStack::stable_iterator operatorDeleteCleanup;
1423    llvm::Instruction *cleanupDominator = nullptr;
1424    if (E->getOperatorDelete() &&
1425        !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
1426      EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocatorArgs);
1427      operatorDeleteCleanup = EHStack.stable_begin();
1428      cleanupDominator = Builder.CreateUnreachable();
1429    }
1430  
1431    assert((allocSize == allocSizeWithoutCookie) ==
1432           CalculateCookiePadding(*this, E).isZero());
1433    if (allocSize != allocSizeWithoutCookie) {
1434      assert(E->isArray());
1435      allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation,
1436                                                         numElements,
1437                                                         E, allocType);
1438    }
1439  
1440    llvm::Type *elementTy = ConvertTypeForMem(allocType);
1441    Address result = Builder.CreateElementBitCast(allocation, elementTy);
1442  
1443    // Passing pointer through invariant.group.barrier to avoid propagation of
1444    // vptrs information which may be included in previous type.
1445    if (CGM.getCodeGenOpts().StrictVTablePointers &&
1446        CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1447        allocator->isReservedGlobalPlacementOperator())
1448      result = Address(Builder.CreateInvariantGroupBarrier(result.getPointer()),
1449                       result.getAlignment());
1450  
1451    EmitNewInitializer(*this, E, allocType, elementTy, result, numElements,
1452                       allocSizeWithoutCookie);
1453    if (E->isArray()) {
1454      // NewPtr is a pointer to the base element type.  If we're
1455      // allocating an array of arrays, we'll need to cast back to the
1456      // array pointer type.
1457      llvm::Type *resultType = ConvertTypeForMem(E->getType());
1458      if (result.getType() != resultType)
1459        result = Builder.CreateBitCast(result, resultType);
1460    }
1461  
1462    // Deactivate the 'operator delete' cleanup if we finished
1463    // initialization.
1464    if (operatorDeleteCleanup.isValid()) {
1465      DeactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator);
1466      cleanupDominator->eraseFromParent();
1467    }
1468  
1469    llvm::Value *resultPtr = result.getPointer();
1470    if (nullCheck) {
1471      conditional.end(*this);
1472  
1473      llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
1474      EmitBlock(contBB);
1475  
1476      llvm::PHINode *PHI = Builder.CreatePHI(resultPtr->getType(), 2);
1477      PHI->addIncoming(resultPtr, notNullBB);
1478      PHI->addIncoming(llvm::Constant::getNullValue(resultPtr->getType()),
1479                       nullCheckBB);
1480  
1481      resultPtr = PHI;
1482    }
1483  
1484    return resultPtr;
1485  }
1486  
EmitDeleteCall(const FunctionDecl * DeleteFD,llvm::Value * Ptr,QualType DeleteTy)1487  void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
1488                                       llvm::Value *Ptr,
1489                                       QualType DeleteTy) {
1490    assert(DeleteFD->getOverloadedOperator() == OO_Delete);
1491  
1492    const FunctionProtoType *DeleteFTy =
1493      DeleteFD->getType()->getAs<FunctionProtoType>();
1494  
1495    CallArgList DeleteArgs;
1496  
1497    // Check if we need to pass the size to the delete operator.
1498    llvm::Value *Size = nullptr;
1499    QualType SizeTy;
1500    if (DeleteFTy->getNumParams() == 2) {
1501      SizeTy = DeleteFTy->getParamType(1);
1502      CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
1503      Size = llvm::ConstantInt::get(ConvertType(SizeTy),
1504                                    DeleteTypeSize.getQuantity());
1505    }
1506  
1507    QualType ArgTy = DeleteFTy->getParamType(0);
1508    llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
1509    DeleteArgs.add(RValue::get(DeletePtr), ArgTy);
1510  
1511    if (Size)
1512      DeleteArgs.add(RValue::get(Size), SizeTy);
1513  
1514    // Emit the call to delete.
1515    EmitNewDeleteCall(*this, DeleteFD, DeleteFTy, DeleteArgs);
1516  }
1517  
1518  namespace {
1519    /// Calls the given 'operator delete' on a single object.
1520    struct CallObjectDelete final : EHScopeStack::Cleanup {
1521      llvm::Value *Ptr;
1522      const FunctionDecl *OperatorDelete;
1523      QualType ElementType;
1524  
CallObjectDelete__anon37041b990311::CallObjectDelete1525      CallObjectDelete(llvm::Value *Ptr,
1526                       const FunctionDecl *OperatorDelete,
1527                       QualType ElementType)
1528        : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
1529  
Emit__anon37041b990311::CallObjectDelete1530      void Emit(CodeGenFunction &CGF, Flags flags) override {
1531        CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
1532      }
1533    };
1534  }
1535  
1536  void
pushCallObjectDeleteCleanup(const FunctionDecl * OperatorDelete,llvm::Value * CompletePtr,QualType ElementType)1537  CodeGenFunction::pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete,
1538                                               llvm::Value *CompletePtr,
1539                                               QualType ElementType) {
1540    EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup, CompletePtr,
1541                                          OperatorDelete, ElementType);
1542  }
1543  
1544  /// Emit the code for deleting a single object.
EmitObjectDelete(CodeGenFunction & CGF,const CXXDeleteExpr * DE,Address Ptr,QualType ElementType)1545  static void EmitObjectDelete(CodeGenFunction &CGF,
1546                               const CXXDeleteExpr *DE,
1547                               Address Ptr,
1548                               QualType ElementType) {
1549    // Find the destructor for the type, if applicable.  If the
1550    // destructor is virtual, we'll just emit the vcall and return.
1551    const CXXDestructorDecl *Dtor = nullptr;
1552    if (const RecordType *RT = ElementType->getAs<RecordType>()) {
1553      CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1554      if (RD->hasDefinition() && !RD->hasTrivialDestructor()) {
1555        Dtor = RD->getDestructor();
1556  
1557        if (Dtor->isVirtual()) {
1558          CGF.CGM.getCXXABI().emitVirtualObjectDelete(CGF, DE, Ptr, ElementType,
1559                                                      Dtor);
1560          return;
1561        }
1562      }
1563    }
1564  
1565    // Make sure that we call delete even if the dtor throws.
1566    // This doesn't have to a conditional cleanup because we're going
1567    // to pop it off in a second.
1568    const FunctionDecl *OperatorDelete = DE->getOperatorDelete();
1569    CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1570                                              Ptr.getPointer(),
1571                                              OperatorDelete, ElementType);
1572  
1573    if (Dtor)
1574      CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
1575                                /*ForVirtualBase=*/false,
1576                                /*Delegating=*/false,
1577                                Ptr);
1578    else if (auto Lifetime = ElementType.getObjCLifetime()) {
1579      switch (Lifetime) {
1580      case Qualifiers::OCL_None:
1581      case Qualifiers::OCL_ExplicitNone:
1582      case Qualifiers::OCL_Autoreleasing:
1583        break;
1584  
1585      case Qualifiers::OCL_Strong:
1586        CGF.EmitARCDestroyStrong(Ptr, ARCPreciseLifetime);
1587        break;
1588  
1589      case Qualifiers::OCL_Weak:
1590        CGF.EmitARCDestroyWeak(Ptr);
1591        break;
1592      }
1593    }
1594  
1595    CGF.PopCleanupBlock();
1596  }
1597  
1598  namespace {
1599    /// Calls the given 'operator delete' on an array of objects.
1600    struct CallArrayDelete final : EHScopeStack::Cleanup {
1601      llvm::Value *Ptr;
1602      const FunctionDecl *OperatorDelete;
1603      llvm::Value *NumElements;
1604      QualType ElementType;
1605      CharUnits CookieSize;
1606  
CallArrayDelete__anon37041b990411::CallArrayDelete1607      CallArrayDelete(llvm::Value *Ptr,
1608                      const FunctionDecl *OperatorDelete,
1609                      llvm::Value *NumElements,
1610                      QualType ElementType,
1611                      CharUnits CookieSize)
1612        : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
1613          ElementType(ElementType), CookieSize(CookieSize) {}
1614  
Emit__anon37041b990411::CallArrayDelete1615      void Emit(CodeGenFunction &CGF, Flags flags) override {
1616        const FunctionProtoType *DeleteFTy =
1617          OperatorDelete->getType()->getAs<FunctionProtoType>();
1618        assert(DeleteFTy->getNumParams() == 1 || DeleteFTy->getNumParams() == 2);
1619  
1620        CallArgList Args;
1621  
1622        // Pass the pointer as the first argument.
1623        QualType VoidPtrTy = DeleteFTy->getParamType(0);
1624        llvm::Value *DeletePtr
1625          = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy));
1626        Args.add(RValue::get(DeletePtr), VoidPtrTy);
1627  
1628        // Pass the original requested size as the second argument.
1629        if (DeleteFTy->getNumParams() == 2) {
1630          QualType size_t = DeleteFTy->getParamType(1);
1631          llvm::IntegerType *SizeTy
1632            = cast<llvm::IntegerType>(CGF.ConvertType(size_t));
1633  
1634          CharUnits ElementTypeSize =
1635            CGF.CGM.getContext().getTypeSizeInChars(ElementType);
1636  
1637          // The size of an element, multiplied by the number of elements.
1638          llvm::Value *Size
1639            = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity());
1640          if (NumElements)
1641            Size = CGF.Builder.CreateMul(Size, NumElements);
1642  
1643          // Plus the size of the cookie if applicable.
1644          if (!CookieSize.isZero()) {
1645            llvm::Value *CookieSizeV
1646              = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
1647            Size = CGF.Builder.CreateAdd(Size, CookieSizeV);
1648          }
1649  
1650          Args.add(RValue::get(Size), size_t);
1651        }
1652  
1653        // Emit the call to delete.
1654        EmitNewDeleteCall(CGF, OperatorDelete, DeleteFTy, Args);
1655      }
1656    };
1657  }
1658  
1659  /// Emit the code for deleting an array of objects.
EmitArrayDelete(CodeGenFunction & CGF,const CXXDeleteExpr * E,Address deletedPtr,QualType elementType)1660  static void EmitArrayDelete(CodeGenFunction &CGF,
1661                              const CXXDeleteExpr *E,
1662                              Address deletedPtr,
1663                              QualType elementType) {
1664    llvm::Value *numElements = nullptr;
1665    llvm::Value *allocatedPtr = nullptr;
1666    CharUnits cookieSize;
1667    CGF.CGM.getCXXABI().ReadArrayCookie(CGF, deletedPtr, E, elementType,
1668                                        numElements, allocatedPtr, cookieSize);
1669  
1670    assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer");
1671  
1672    // Make sure that we call delete even if one of the dtors throws.
1673    const FunctionDecl *operatorDelete = E->getOperatorDelete();
1674    CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
1675                                             allocatedPtr, operatorDelete,
1676                                             numElements, elementType,
1677                                             cookieSize);
1678  
1679    // Destroy the elements.
1680    if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) {
1681      assert(numElements && "no element count for a type with a destructor!");
1682  
1683      CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
1684      CharUnits elementAlign =
1685        deletedPtr.getAlignment().alignmentOfArrayElement(elementSize);
1686  
1687      llvm::Value *arrayBegin = deletedPtr.getPointer();
1688      llvm::Value *arrayEnd =
1689        CGF.Builder.CreateInBoundsGEP(arrayBegin, numElements, "delete.end");
1690  
1691      // Note that it is legal to allocate a zero-length array, and we
1692      // can never fold the check away because the length should always
1693      // come from a cookie.
1694      CGF.emitArrayDestroy(arrayBegin, arrayEnd, elementType, elementAlign,
1695                           CGF.getDestroyer(dtorKind),
1696                           /*checkZeroLength*/ true,
1697                           CGF.needsEHCleanup(dtorKind));
1698    }
1699  
1700    // Pop the cleanup block.
1701    CGF.PopCleanupBlock();
1702  }
1703  
EmitCXXDeleteExpr(const CXXDeleteExpr * E)1704  void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
1705    const Expr *Arg = E->getArgument();
1706    Address Ptr = EmitPointerWithAlignment(Arg);
1707  
1708    // Null check the pointer.
1709    llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
1710    llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
1711  
1712    llvm::Value *IsNull = Builder.CreateIsNull(Ptr.getPointer(), "isnull");
1713  
1714    Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
1715    EmitBlock(DeleteNotNull);
1716  
1717    // We might be deleting a pointer to array.  If so, GEP down to the
1718    // first non-array element.
1719    // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
1720    QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
1721    if (DeleteTy->isConstantArrayType()) {
1722      llvm::Value *Zero = Builder.getInt32(0);
1723      SmallVector<llvm::Value*,8> GEP;
1724  
1725      GEP.push_back(Zero); // point at the outermost array
1726  
1727      // For each layer of array type we're pointing at:
1728      while (const ConstantArrayType *Arr
1729               = getContext().getAsConstantArrayType(DeleteTy)) {
1730        // 1. Unpeel the array type.
1731        DeleteTy = Arr->getElementType();
1732  
1733        // 2. GEP to the first element of the array.
1734        GEP.push_back(Zero);
1735      }
1736  
1737      Ptr = Address(Builder.CreateInBoundsGEP(Ptr.getPointer(), GEP, "del.first"),
1738                    Ptr.getAlignment());
1739    }
1740  
1741    assert(ConvertTypeForMem(DeleteTy) == Ptr.getElementType());
1742  
1743    if (E->isArrayForm()) {
1744      EmitArrayDelete(*this, E, Ptr, DeleteTy);
1745    } else {
1746      EmitObjectDelete(*this, E, Ptr, DeleteTy);
1747    }
1748  
1749    EmitBlock(DeleteEnd);
1750  }
1751  
isGLValueFromPointerDeref(const Expr * E)1752  static bool isGLValueFromPointerDeref(const Expr *E) {
1753    E = E->IgnoreParens();
1754  
1755    if (const auto *CE = dyn_cast<CastExpr>(E)) {
1756      if (!CE->getSubExpr()->isGLValue())
1757        return false;
1758      return isGLValueFromPointerDeref(CE->getSubExpr());
1759    }
1760  
1761    if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E))
1762      return isGLValueFromPointerDeref(OVE->getSourceExpr());
1763  
1764    if (const auto *BO = dyn_cast<BinaryOperator>(E))
1765      if (BO->getOpcode() == BO_Comma)
1766        return isGLValueFromPointerDeref(BO->getRHS());
1767  
1768    if (const auto *ACO = dyn_cast<AbstractConditionalOperator>(E))
1769      return isGLValueFromPointerDeref(ACO->getTrueExpr()) ||
1770             isGLValueFromPointerDeref(ACO->getFalseExpr());
1771  
1772    // C++11 [expr.sub]p1:
1773    //   The expression E1[E2] is identical (by definition) to *((E1)+(E2))
1774    if (isa<ArraySubscriptExpr>(E))
1775      return true;
1776  
1777    if (const auto *UO = dyn_cast<UnaryOperator>(E))
1778      if (UO->getOpcode() == UO_Deref)
1779        return true;
1780  
1781    return false;
1782  }
1783  
EmitTypeidFromVTable(CodeGenFunction & CGF,const Expr * E,llvm::Type * StdTypeInfoPtrTy)1784  static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, const Expr *E,
1785                                           llvm::Type *StdTypeInfoPtrTy) {
1786    // Get the vtable pointer.
1787    Address ThisPtr = CGF.EmitLValue(E).getAddress();
1788  
1789    // C++ [expr.typeid]p2:
1790    //   If the glvalue expression is obtained by applying the unary * operator to
1791    //   a pointer and the pointer is a null pointer value, the typeid expression
1792    //   throws the std::bad_typeid exception.
1793    //
1794    // However, this paragraph's intent is not clear.  We choose a very generous
1795    // interpretation which implores us to consider comma operators, conditional
1796    // operators, parentheses and other such constructs.
1797    QualType SrcRecordTy = E->getType();
1798    if (CGF.CGM.getCXXABI().shouldTypeidBeNullChecked(
1799            isGLValueFromPointerDeref(E), SrcRecordTy)) {
1800      llvm::BasicBlock *BadTypeidBlock =
1801          CGF.createBasicBlock("typeid.bad_typeid");
1802      llvm::BasicBlock *EndBlock = CGF.createBasicBlock("typeid.end");
1803  
1804      llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr.getPointer());
1805      CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock);
1806  
1807      CGF.EmitBlock(BadTypeidBlock);
1808      CGF.CGM.getCXXABI().EmitBadTypeidCall(CGF);
1809      CGF.EmitBlock(EndBlock);
1810    }
1811  
1812    return CGF.CGM.getCXXABI().EmitTypeid(CGF, SrcRecordTy, ThisPtr,
1813                                          StdTypeInfoPtrTy);
1814  }
1815  
EmitCXXTypeidExpr(const CXXTypeidExpr * E)1816  llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
1817    llvm::Type *StdTypeInfoPtrTy =
1818      ConvertType(E->getType())->getPointerTo();
1819  
1820    if (E->isTypeOperand()) {
1821      llvm::Constant *TypeInfo =
1822          CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand(getContext()));
1823      return Builder.CreateBitCast(TypeInfo, StdTypeInfoPtrTy);
1824    }
1825  
1826    // C++ [expr.typeid]p2:
1827    //   When typeid is applied to a glvalue expression whose type is a
1828    //   polymorphic class type, the result refers to a std::type_info object
1829    //   representing the type of the most derived object (that is, the dynamic
1830    //   type) to which the glvalue refers.
1831    if (E->isPotentiallyEvaluated())
1832      return EmitTypeidFromVTable(*this, E->getExprOperand(),
1833                                  StdTypeInfoPtrTy);
1834  
1835    QualType OperandTy = E->getExprOperand()->getType();
1836    return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy),
1837                                 StdTypeInfoPtrTy);
1838  }
1839  
EmitDynamicCastToNull(CodeGenFunction & CGF,QualType DestTy)1840  static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
1841                                            QualType DestTy) {
1842    llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1843    if (DestTy->isPointerType())
1844      return llvm::Constant::getNullValue(DestLTy);
1845  
1846    /// C++ [expr.dynamic.cast]p9:
1847    ///   A failed cast to reference type throws std::bad_cast
1848    if (!CGF.CGM.getCXXABI().EmitBadCastCall(CGF))
1849      return nullptr;
1850  
1851    CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end"));
1852    return llvm::UndefValue::get(DestLTy);
1853  }
1854  
EmitDynamicCast(Address ThisAddr,const CXXDynamicCastExpr * DCE)1855  llvm::Value *CodeGenFunction::EmitDynamicCast(Address ThisAddr,
1856                                                const CXXDynamicCastExpr *DCE) {
1857    CGM.EmitExplicitCastExprType(DCE, this);
1858    QualType DestTy = DCE->getTypeAsWritten();
1859  
1860    if (DCE->isAlwaysNull())
1861      if (llvm::Value *T = EmitDynamicCastToNull(*this, DestTy))
1862        return T;
1863  
1864    QualType SrcTy = DCE->getSubExpr()->getType();
1865  
1866    // C++ [expr.dynamic.cast]p7:
1867    //   If T is "pointer to cv void," then the result is a pointer to the most
1868    //   derived object pointed to by v.
1869    const PointerType *DestPTy = DestTy->getAs<PointerType>();
1870  
1871    bool isDynamicCastToVoid;
1872    QualType SrcRecordTy;
1873    QualType DestRecordTy;
1874    if (DestPTy) {
1875      isDynamicCastToVoid = DestPTy->getPointeeType()->isVoidType();
1876      SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType();
1877      DestRecordTy = DestPTy->getPointeeType();
1878    } else {
1879      isDynamicCastToVoid = false;
1880      SrcRecordTy = SrcTy;
1881      DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType();
1882    }
1883  
1884    assert(SrcRecordTy->isRecordType() && "source type must be a record type!");
1885  
1886    // C++ [expr.dynamic.cast]p4:
1887    //   If the value of v is a null pointer value in the pointer case, the result
1888    //   is the null pointer value of type T.
1889    bool ShouldNullCheckSrcValue =
1890        CGM.getCXXABI().shouldDynamicCastCallBeNullChecked(SrcTy->isPointerType(),
1891                                                           SrcRecordTy);
1892  
1893    llvm::BasicBlock *CastNull = nullptr;
1894    llvm::BasicBlock *CastNotNull = nullptr;
1895    llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end");
1896  
1897    if (ShouldNullCheckSrcValue) {
1898      CastNull = createBasicBlock("dynamic_cast.null");
1899      CastNotNull = createBasicBlock("dynamic_cast.notnull");
1900  
1901      llvm::Value *IsNull = Builder.CreateIsNull(ThisAddr.getPointer());
1902      Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
1903      EmitBlock(CastNotNull);
1904    }
1905  
1906    llvm::Value *Value;
1907    if (isDynamicCastToVoid) {
1908      Value = CGM.getCXXABI().EmitDynamicCastToVoid(*this, ThisAddr, SrcRecordTy,
1909                                                    DestTy);
1910    } else {
1911      assert(DestRecordTy->isRecordType() &&
1912             "destination type must be a record type!");
1913      Value = CGM.getCXXABI().EmitDynamicCastCall(*this, ThisAddr, SrcRecordTy,
1914                                                  DestTy, DestRecordTy, CastEnd);
1915      CastNotNull = Builder.GetInsertBlock();
1916    }
1917  
1918    if (ShouldNullCheckSrcValue) {
1919      EmitBranch(CastEnd);
1920  
1921      EmitBlock(CastNull);
1922      EmitBranch(CastEnd);
1923    }
1924  
1925    EmitBlock(CastEnd);
1926  
1927    if (ShouldNullCheckSrcValue) {
1928      llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
1929      PHI->addIncoming(Value, CastNotNull);
1930      PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull);
1931  
1932      Value = PHI;
1933    }
1934  
1935    return Value;
1936  }
1937  
EmitLambdaExpr(const LambdaExpr * E,AggValueSlot Slot)1938  void CodeGenFunction::EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Slot) {
1939    RunCleanupsScope Scope(*this);
1940    LValue SlotLV = MakeAddrLValue(Slot.getAddress(), E->getType());
1941  
1942    CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
1943    for (LambdaExpr::const_capture_init_iterator i = E->capture_init_begin(),
1944                                                 e = E->capture_init_end();
1945         i != e; ++i, ++CurField) {
1946      // Emit initialization
1947      LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
1948      if (CurField->hasCapturedVLAType()) {
1949        auto VAT = CurField->getCapturedVLAType();
1950        EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV);
1951      } else {
1952        ArrayRef<VarDecl *> ArrayIndexes;
1953        if (CurField->getType()->isArrayType())
1954          ArrayIndexes = E->getCaptureInitIndexVars(i);
1955        EmitInitializerForField(*CurField, LV, *i, ArrayIndexes);
1956      }
1957    }
1958  }
1959