• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===--- CGCall.cpp - Encapsulate calling convention details ----*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "CGCall.h"
16 #include "CGCXXABI.h"
17 #include "ABIInfo.h"
18 #include "CodeGenFunction.h"
19 #include "CodeGenModule.h"
20 #include "TargetInfo.h"
21 #include "clang/Basic/TargetInfo.h"
22 #include "clang/AST/Decl.h"
23 #include "clang/AST/DeclCXX.h"
24 #include "clang/AST/DeclObjC.h"
25 #include "clang/Frontend/CodeGenOptions.h"
26 #include "llvm/Attributes.h"
27 #include "llvm/Support/CallSite.h"
28 #include "llvm/Target/TargetData.h"
29 #include "llvm/InlineAsm.h"
30 #include "llvm/Transforms/Utils/Local.h"
31 using namespace clang;
32 using namespace CodeGen;
33 
34 /***/
35 
ClangCallConvToLLVMCallConv(CallingConv CC)36 static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
37   switch (CC) {
38   default: return llvm::CallingConv::C;
39   case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
40   case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
41   case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
42   case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
43   case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
44   // TODO: add support for CC_X86Pascal to llvm
45   }
46 }
47 
48 /// Derives the 'this' type for codegen purposes, i.e. ignoring method
49 /// qualification.
50 /// FIXME: address space qualification?
GetThisType(ASTContext & Context,const CXXRecordDecl * RD)51 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
52   QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
53   return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
54 }
55 
56 /// Returns the canonical formal type of the given C++ method.
GetFormalType(const CXXMethodDecl * MD)57 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
58   return MD->getType()->getCanonicalTypeUnqualified()
59            .getAs<FunctionProtoType>();
60 }
61 
62 /// Returns the "extra-canonicalized" return type, which discards
63 /// qualifiers on the return type.  Codegen doesn't care about them,
64 /// and it makes ABI code a little easier to be able to assume that
65 /// all parameter and return types are top-level unqualified.
GetReturnType(QualType RetTy)66 static CanQualType GetReturnType(QualType RetTy) {
67   return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
68 }
69 
70 /// Arrange the argument and result information for a value of the given
71 /// unprototyped freestanding function type.
72 const CGFunctionInfo &
arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP)73 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
74   // When translating an unprototyped function type, always use a
75   // variadic type.
76   return arrangeLLVMFunctionInfo(FTNP->getResultType().getUnqualifiedType(),
77                                  ArrayRef<CanQualType>(),
78                                  FTNP->getExtInfo(),
79                                  RequiredArgs(0));
80 }
81 
82 /// Arrange the LLVM function layout for a value of the given function
83 /// type, on top of any implicit parameters already stored.  Use the
84 /// given ExtInfo instead of the ExtInfo from the function type.
arrangeLLVMFunctionInfo(CodeGenTypes & CGT,SmallVectorImpl<CanQualType> & prefix,CanQual<FunctionProtoType> FTP,FunctionType::ExtInfo extInfo)85 static const CGFunctionInfo &arrangeLLVMFunctionInfo(CodeGenTypes &CGT,
86                                        SmallVectorImpl<CanQualType> &prefix,
87                                              CanQual<FunctionProtoType> FTP,
88                                               FunctionType::ExtInfo extInfo) {
89   RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
90   // FIXME: Kill copy.
91   for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
92     prefix.push_back(FTP->getArgType(i));
93   CanQualType resultType = FTP->getResultType().getUnqualifiedType();
94   return CGT.arrangeLLVMFunctionInfo(resultType, prefix, extInfo, required);
95 }
96 
97 /// Arrange the argument and result information for a free function (i.e.
98 /// not a C++ or ObjC instance method) of the given type.
arrangeFreeFunctionType(CodeGenTypes & CGT,SmallVectorImpl<CanQualType> & prefix,CanQual<FunctionProtoType> FTP)99 static const CGFunctionInfo &arrangeFreeFunctionType(CodeGenTypes &CGT,
100                                       SmallVectorImpl<CanQualType> &prefix,
101                                             CanQual<FunctionProtoType> FTP) {
102   return arrangeLLVMFunctionInfo(CGT, prefix, FTP, FTP->getExtInfo());
103 }
104 
105 /// Given the formal ext-info of a C++ instance method, adjust it
106 /// according to the C++ ABI in effect.
adjustCXXMethodInfo(CodeGenTypes & CGT,FunctionType::ExtInfo & extInfo,bool isVariadic)107 static void adjustCXXMethodInfo(CodeGenTypes &CGT,
108                                 FunctionType::ExtInfo &extInfo,
109                                 bool isVariadic) {
110   if (extInfo.getCC() == CC_Default) {
111     CallingConv CC = CGT.getContext().getDefaultCXXMethodCallConv(isVariadic);
112     extInfo = extInfo.withCallingConv(CC);
113   }
114 }
115 
116 /// Arrange the argument and result information for a free function (i.e.
117 /// not a C++ or ObjC instance method) of the given type.
arrangeCXXMethodType(CodeGenTypes & CGT,SmallVectorImpl<CanQualType> & prefix,CanQual<FunctionProtoType> FTP)118 static const CGFunctionInfo &arrangeCXXMethodType(CodeGenTypes &CGT,
119                                       SmallVectorImpl<CanQualType> &prefix,
120                                             CanQual<FunctionProtoType> FTP) {
121   FunctionType::ExtInfo extInfo = FTP->getExtInfo();
122   adjustCXXMethodInfo(CGT, extInfo, FTP->isVariadic());
123   return arrangeLLVMFunctionInfo(CGT, prefix, FTP, extInfo);
124 }
125 
126 /// Arrange the argument and result information for a value of the
127 /// given freestanding function type.
128 const CGFunctionInfo &
arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP)129 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
130   SmallVector<CanQualType, 16> argTypes;
131   return ::arrangeFreeFunctionType(*this, argTypes, FTP);
132 }
133 
getCallingConventionForDecl(const Decl * D)134 static CallingConv getCallingConventionForDecl(const Decl *D) {
135   // Set the appropriate calling convention for the Function.
136   if (D->hasAttr<StdCallAttr>())
137     return CC_X86StdCall;
138 
139   if (D->hasAttr<FastCallAttr>())
140     return CC_X86FastCall;
141 
142   if (D->hasAttr<ThisCallAttr>())
143     return CC_X86ThisCall;
144 
145   if (D->hasAttr<PascalAttr>())
146     return CC_X86Pascal;
147 
148   if (PcsAttr *PCS = D->getAttr<PcsAttr>())
149     return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
150 
151   return CC_C;
152 }
153 
154 /// Arrange the argument and result information for a call to an
155 /// unknown C++ non-static member function of the given abstract type.
156 /// The member function must be an ordinary function, i.e. not a
157 /// constructor or destructor.
158 const CGFunctionInfo &
arrangeCXXMethodType(const CXXRecordDecl * RD,const FunctionProtoType * FTP)159 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
160                                    const FunctionProtoType *FTP) {
161   SmallVector<CanQualType, 16> argTypes;
162 
163   // Add the 'this' pointer.
164   argTypes.push_back(GetThisType(Context, RD));
165 
166   return ::arrangeCXXMethodType(*this, argTypes,
167               FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
168 }
169 
170 /// Arrange the argument and result information for a declaration or
171 /// definition of the given C++ non-static member function.  The
172 /// member function must be an ordinary function, i.e. not a
173 /// constructor or destructor.
174 const CGFunctionInfo &
arrangeCXXMethodDeclaration(const CXXMethodDecl * MD)175 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
176   assert(!isa<CXXConstructorDecl>(MD) && "wrong method for contructors!");
177   assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
178 
179   CanQual<FunctionProtoType> prototype = GetFormalType(MD);
180 
181   if (MD->isInstance()) {
182     // The abstract case is perfectly fine.
183     return arrangeCXXMethodType(MD->getParent(), prototype.getTypePtr());
184   }
185 
186   return arrangeFreeFunctionType(prototype);
187 }
188 
189 /// Arrange the argument and result information for a declaration
190 /// or definition to the given constructor variant.
191 const CGFunctionInfo &
arrangeCXXConstructorDeclaration(const CXXConstructorDecl * D,CXXCtorType ctorKind)192 CodeGenTypes::arrangeCXXConstructorDeclaration(const CXXConstructorDecl *D,
193                                                CXXCtorType ctorKind) {
194   SmallVector<CanQualType, 16> argTypes;
195   argTypes.push_back(GetThisType(Context, D->getParent()));
196   CanQualType resultType = Context.VoidTy;
197 
198   TheCXXABI.BuildConstructorSignature(D, ctorKind, resultType, argTypes);
199 
200   CanQual<FunctionProtoType> FTP = GetFormalType(D);
201 
202   RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, argTypes.size());
203 
204   // Add the formal parameters.
205   for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
206     argTypes.push_back(FTP->getArgType(i));
207 
208   FunctionType::ExtInfo extInfo = FTP->getExtInfo();
209   adjustCXXMethodInfo(*this, extInfo, FTP->isVariadic());
210   return arrangeLLVMFunctionInfo(resultType, argTypes, extInfo, required);
211 }
212 
213 /// Arrange the argument and result information for a declaration,
214 /// definition, or call to the given destructor variant.  It so
215 /// happens that all three cases produce the same information.
216 const CGFunctionInfo &
arrangeCXXDestructor(const CXXDestructorDecl * D,CXXDtorType dtorKind)217 CodeGenTypes::arrangeCXXDestructor(const CXXDestructorDecl *D,
218                                    CXXDtorType dtorKind) {
219   SmallVector<CanQualType, 2> argTypes;
220   argTypes.push_back(GetThisType(Context, D->getParent()));
221   CanQualType resultType = Context.VoidTy;
222 
223   TheCXXABI.BuildDestructorSignature(D, dtorKind, resultType, argTypes);
224 
225   CanQual<FunctionProtoType> FTP = GetFormalType(D);
226   assert(FTP->getNumArgs() == 0 && "dtor with formal parameters");
227   assert(FTP->isVariadic() == 0 && "dtor with formal parameters");
228 
229   FunctionType::ExtInfo extInfo = FTP->getExtInfo();
230   adjustCXXMethodInfo(*this, extInfo, false);
231   return arrangeLLVMFunctionInfo(resultType, argTypes, extInfo,
232                                  RequiredArgs::All);
233 }
234 
235 /// Arrange the argument and result information for the declaration or
236 /// definition of the given function.
237 const CGFunctionInfo &
arrangeFunctionDeclaration(const FunctionDecl * FD)238 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
239   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
240     if (MD->isInstance())
241       return arrangeCXXMethodDeclaration(MD);
242 
243   CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
244 
245   assert(isa<FunctionType>(FTy));
246 
247   // When declaring a function without a prototype, always use a
248   // non-variadic type.
249   if (isa<FunctionNoProtoType>(FTy)) {
250     CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>();
251     return arrangeLLVMFunctionInfo(noProto->getResultType(),
252                                    ArrayRef<CanQualType>(),
253                                    noProto->getExtInfo(),
254                                    RequiredArgs::All);
255   }
256 
257   assert(isa<FunctionProtoType>(FTy));
258   return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>());
259 }
260 
261 /// Arrange the argument and result information for the declaration or
262 /// definition of an Objective-C method.
263 const CGFunctionInfo &
arrangeObjCMethodDeclaration(const ObjCMethodDecl * MD)264 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
265   // It happens that this is the same as a call with no optional
266   // arguments, except also using the formal 'self' type.
267   return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
268 }
269 
270 /// Arrange the argument and result information for the function type
271 /// through which to perform a send to the given Objective-C method,
272 /// using the given receiver type.  The receiver type is not always
273 /// the 'self' type of the method or even an Objective-C pointer type.
274 /// This is *not* the right method for actually performing such a
275 /// message send, due to the possibility of optional arguments.
276 const CGFunctionInfo &
arrangeObjCMessageSendSignature(const ObjCMethodDecl * MD,QualType receiverType)277 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
278                                               QualType receiverType) {
279   SmallVector<CanQualType, 16> argTys;
280   argTys.push_back(Context.getCanonicalParamType(receiverType));
281   argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
282   // FIXME: Kill copy?
283   for (ObjCMethodDecl::param_const_iterator i = MD->param_begin(),
284          e = MD->param_end(); i != e; ++i) {
285     argTys.push_back(Context.getCanonicalParamType((*i)->getType()));
286   }
287 
288   FunctionType::ExtInfo einfo;
289   einfo = einfo.withCallingConv(getCallingConventionForDecl(MD));
290 
291   if (getContext().getLangOpts().ObjCAutoRefCount &&
292       MD->hasAttr<NSReturnsRetainedAttr>())
293     einfo = einfo.withProducesResult(true);
294 
295   RequiredArgs required =
296     (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
297 
298   return arrangeLLVMFunctionInfo(GetReturnType(MD->getResultType()), argTys,
299                                  einfo, required);
300 }
301 
302 const CGFunctionInfo &
arrangeGlobalDeclaration(GlobalDecl GD)303 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
304   // FIXME: Do we need to handle ObjCMethodDecl?
305   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
306 
307   if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
308     return arrangeCXXConstructorDeclaration(CD, GD.getCtorType());
309 
310   if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
311     return arrangeCXXDestructor(DD, GD.getDtorType());
312 
313   return arrangeFunctionDeclaration(FD);
314 }
315 
316 /// Figure out the rules for calling a function with the given formal
317 /// type using the given arguments.  The arguments are necessary
318 /// because the function might be unprototyped, in which case it's
319 /// target-dependent in crazy ways.
320 const CGFunctionInfo &
arrangeFreeFunctionCall(const CallArgList & args,const FunctionType * fnType)321 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
322                                       const FunctionType *fnType) {
323   RequiredArgs required = RequiredArgs::All;
324   if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
325     if (proto->isVariadic())
326       required = RequiredArgs(proto->getNumArgs());
327   } else if (CGM.getTargetCodeGenInfo()
328                .isNoProtoCallVariadic(args, cast<FunctionNoProtoType>(fnType))) {
329     required = RequiredArgs(0);
330   }
331 
332   return arrangeFreeFunctionCall(fnType->getResultType(), args,
333                                  fnType->getExtInfo(), required);
334 }
335 
336 const CGFunctionInfo &
arrangeFreeFunctionCall(QualType resultType,const CallArgList & args,FunctionType::ExtInfo info,RequiredArgs required)337 CodeGenTypes::arrangeFreeFunctionCall(QualType resultType,
338                                       const CallArgList &args,
339                                       FunctionType::ExtInfo info,
340                                       RequiredArgs required) {
341   // FIXME: Kill copy.
342   SmallVector<CanQualType, 16> argTypes;
343   for (CallArgList::const_iterator i = args.begin(), e = args.end();
344        i != e; ++i)
345     argTypes.push_back(Context.getCanonicalParamType(i->Ty));
346   return arrangeLLVMFunctionInfo(GetReturnType(resultType), argTypes, info,
347                                  required);
348 }
349 
350 /// Arrange a call to a C++ method, passing the given arguments.
351 const CGFunctionInfo &
arrangeCXXMethodCall(const CallArgList & args,const FunctionProtoType * FPT,RequiredArgs required)352 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
353                                    const FunctionProtoType *FPT,
354                                    RequiredArgs required) {
355   // FIXME: Kill copy.
356   SmallVector<CanQualType, 16> argTypes;
357   for (CallArgList::const_iterator i = args.begin(), e = args.end();
358        i != e; ++i)
359     argTypes.push_back(Context.getCanonicalParamType(i->Ty));
360 
361   FunctionType::ExtInfo info = FPT->getExtInfo();
362   adjustCXXMethodInfo(*this, info, FPT->isVariadic());
363   return arrangeLLVMFunctionInfo(GetReturnType(FPT->getResultType()),
364                                  argTypes, info, required);
365 }
366 
367 const CGFunctionInfo &
arrangeFunctionDeclaration(QualType resultType,const FunctionArgList & args,const FunctionType::ExtInfo & info,bool isVariadic)368 CodeGenTypes::arrangeFunctionDeclaration(QualType resultType,
369                                          const FunctionArgList &args,
370                                          const FunctionType::ExtInfo &info,
371                                          bool isVariadic) {
372   // FIXME: Kill copy.
373   SmallVector<CanQualType, 16> argTypes;
374   for (FunctionArgList::const_iterator i = args.begin(), e = args.end();
375        i != e; ++i)
376     argTypes.push_back(Context.getCanonicalParamType((*i)->getType()));
377 
378   RequiredArgs required =
379     (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All);
380   return arrangeLLVMFunctionInfo(GetReturnType(resultType), argTypes, info,
381                                  required);
382 }
383 
arrangeNullaryFunction()384 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
385   return arrangeLLVMFunctionInfo(getContext().VoidTy, ArrayRef<CanQualType>(),
386                                  FunctionType::ExtInfo(), RequiredArgs::All);
387 }
388 
389 /// Arrange the argument and result information for an abstract value
390 /// of a given function type.  This is the method which all of the
391 /// above functions ultimately defer to.
392 const CGFunctionInfo &
arrangeLLVMFunctionInfo(CanQualType resultType,ArrayRef<CanQualType> argTypes,FunctionType::ExtInfo info,RequiredArgs required)393 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
394                                       ArrayRef<CanQualType> argTypes,
395                                       FunctionType::ExtInfo info,
396                                       RequiredArgs required) {
397 #ifndef NDEBUG
398   for (ArrayRef<CanQualType>::const_iterator
399          I = argTypes.begin(), E = argTypes.end(); I != E; ++I)
400     assert(I->isCanonicalAsParam());
401 #endif
402 
403   unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
404 
405   // Lookup or create unique function info.
406   llvm::FoldingSetNodeID ID;
407   CGFunctionInfo::Profile(ID, info, required, resultType, argTypes);
408 
409   void *insertPos = 0;
410   CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
411   if (FI)
412     return *FI;
413 
414   // Construct the function info.  We co-allocate the ArgInfos.
415   FI = CGFunctionInfo::create(CC, info, resultType, argTypes, required);
416   FunctionInfos.InsertNode(FI, insertPos);
417 
418   bool inserted = FunctionsBeingProcessed.insert(FI); (void)inserted;
419   assert(inserted && "Recursively being processed?");
420 
421   // Compute ABI information.
422   getABIInfo().computeInfo(*FI);
423 
424   // Loop over all of the computed argument and return value info.  If any of
425   // them are direct or extend without a specified coerce type, specify the
426   // default now.
427   ABIArgInfo &retInfo = FI->getReturnInfo();
428   if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == 0)
429     retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
430 
431   for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end();
432        I != E; ++I)
433     if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0)
434       I->info.setCoerceToType(ConvertType(I->type));
435 
436   bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
437   assert(erased && "Not in set?");
438 
439   return *FI;
440 }
441 
create(unsigned llvmCC,const FunctionType::ExtInfo & info,CanQualType resultType,ArrayRef<CanQualType> argTypes,RequiredArgs required)442 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
443                                        const FunctionType::ExtInfo &info,
444                                        CanQualType resultType,
445                                        ArrayRef<CanQualType> argTypes,
446                                        RequiredArgs required) {
447   void *buffer = operator new(sizeof(CGFunctionInfo) +
448                               sizeof(ArgInfo) * (argTypes.size() + 1));
449   CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
450   FI->CallingConvention = llvmCC;
451   FI->EffectiveCallingConvention = llvmCC;
452   FI->ASTCallingConvention = info.getCC();
453   FI->NoReturn = info.getNoReturn();
454   FI->ReturnsRetained = info.getProducesResult();
455   FI->Required = required;
456   FI->HasRegParm = info.getHasRegParm();
457   FI->RegParm = info.getRegParm();
458   FI->NumArgs = argTypes.size();
459   FI->getArgsBuffer()[0].type = resultType;
460   for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
461     FI->getArgsBuffer()[i + 1].type = argTypes[i];
462   return FI;
463 }
464 
465 /***/
466 
GetExpandedTypes(QualType type,SmallVectorImpl<llvm::Type * > & expandedTypes)467 void CodeGenTypes::GetExpandedTypes(QualType type,
468                      SmallVectorImpl<llvm::Type*> &expandedTypes) {
469   if (const ConstantArrayType *AT = Context.getAsConstantArrayType(type)) {
470     uint64_t NumElts = AT->getSize().getZExtValue();
471     for (uint64_t Elt = 0; Elt < NumElts; ++Elt)
472       GetExpandedTypes(AT->getElementType(), expandedTypes);
473   } else if (const RecordType *RT = type->getAs<RecordType>()) {
474     const RecordDecl *RD = RT->getDecl();
475     assert(!RD->hasFlexibleArrayMember() &&
476            "Cannot expand structure with flexible array.");
477     if (RD->isUnion()) {
478       // Unions can be here only in degenerative cases - all the fields are same
479       // after flattening. Thus we have to use the "largest" field.
480       const FieldDecl *LargestFD = 0;
481       CharUnits UnionSize = CharUnits::Zero();
482 
483       for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
484            i != e; ++i) {
485         const FieldDecl *FD = *i;
486         assert(!FD->isBitField() &&
487                "Cannot expand structure with bit-field members.");
488         CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
489         if (UnionSize < FieldSize) {
490           UnionSize = FieldSize;
491           LargestFD = FD;
492         }
493       }
494       if (LargestFD)
495         GetExpandedTypes(LargestFD->getType(), expandedTypes);
496     } else {
497       for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
498            i != e; ++i) {
499         assert(!i->isBitField() &&
500                "Cannot expand structure with bit-field members.");
501         GetExpandedTypes(i->getType(), expandedTypes);
502       }
503     }
504   } else if (const ComplexType *CT = type->getAs<ComplexType>()) {
505     llvm::Type *EltTy = ConvertType(CT->getElementType());
506     expandedTypes.push_back(EltTy);
507     expandedTypes.push_back(EltTy);
508   } else
509     expandedTypes.push_back(ConvertType(type));
510 }
511 
512 llvm::Function::arg_iterator
ExpandTypeFromArgs(QualType Ty,LValue LV,llvm::Function::arg_iterator AI)513 CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
514                                     llvm::Function::arg_iterator AI) {
515   assert(LV.isSimple() &&
516          "Unexpected non-simple lvalue during struct expansion.");
517 
518   if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
519     unsigned NumElts = AT->getSize().getZExtValue();
520     QualType EltTy = AT->getElementType();
521     for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
522       llvm::Value *EltAddr = Builder.CreateConstGEP2_32(LV.getAddress(), 0, Elt);
523       LValue LV = MakeAddrLValue(EltAddr, EltTy);
524       AI = ExpandTypeFromArgs(EltTy, LV, AI);
525     }
526   } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
527     RecordDecl *RD = RT->getDecl();
528     if (RD->isUnion()) {
529       // Unions can be here only in degenerative cases - all the fields are same
530       // after flattening. Thus we have to use the "largest" field.
531       const FieldDecl *LargestFD = 0;
532       CharUnits UnionSize = CharUnits::Zero();
533 
534       for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
535            i != e; ++i) {
536         const FieldDecl *FD = *i;
537         assert(!FD->isBitField() &&
538                "Cannot expand structure with bit-field members.");
539         CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
540         if (UnionSize < FieldSize) {
541           UnionSize = FieldSize;
542           LargestFD = FD;
543         }
544       }
545       if (LargestFD) {
546         // FIXME: What are the right qualifiers here?
547         LValue SubLV = EmitLValueForField(LV, LargestFD);
548         AI = ExpandTypeFromArgs(LargestFD->getType(), SubLV, AI);
549       }
550     } else {
551       for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
552            i != e; ++i) {
553         FieldDecl *FD = *i;
554         QualType FT = FD->getType();
555 
556         // FIXME: What are the right qualifiers here?
557         LValue SubLV = EmitLValueForField(LV, FD);
558         AI = ExpandTypeFromArgs(FT, SubLV, AI);
559       }
560     }
561   } else if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
562     QualType EltTy = CT->getElementType();
563     llvm::Value *RealAddr = Builder.CreateStructGEP(LV.getAddress(), 0, "real");
564     EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(RealAddr, EltTy));
565     llvm::Value *ImagAddr = Builder.CreateStructGEP(LV.getAddress(), 1, "imag");
566     EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(ImagAddr, EltTy));
567   } else {
568     EmitStoreThroughLValue(RValue::get(AI), LV);
569     ++AI;
570   }
571 
572   return AI;
573 }
574 
575 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
576 /// accessing some number of bytes out of it, try to gep into the struct to get
577 /// at its inner goodness.  Dive as deep as possible without entering an element
578 /// with an in-memory size smaller than DstSize.
579 static llvm::Value *
EnterStructPointerForCoercedAccess(llvm::Value * SrcPtr,llvm::StructType * SrcSTy,uint64_t DstSize,CodeGenFunction & CGF)580 EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
581                                    llvm::StructType *SrcSTy,
582                                    uint64_t DstSize, CodeGenFunction &CGF) {
583   // We can't dive into a zero-element struct.
584   if (SrcSTy->getNumElements() == 0) return SrcPtr;
585 
586   llvm::Type *FirstElt = SrcSTy->getElementType(0);
587 
588   // If the first elt is at least as large as what we're looking for, or if the
589   // first element is the same size as the whole struct, we can enter it.
590   uint64_t FirstEltSize =
591     CGF.CGM.getTargetData().getTypeAllocSize(FirstElt);
592   if (FirstEltSize < DstSize &&
593       FirstEltSize < CGF.CGM.getTargetData().getTypeAllocSize(SrcSTy))
594     return SrcPtr;
595 
596   // GEP into the first element.
597   SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive");
598 
599   // If the first element is a struct, recurse.
600   llvm::Type *SrcTy =
601     cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
602   if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
603     return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
604 
605   return SrcPtr;
606 }
607 
608 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
609 /// are either integers or pointers.  This does a truncation of the value if it
610 /// is too large or a zero extension if it is too small.
CoerceIntOrPtrToIntOrPtr(llvm::Value * Val,llvm::Type * Ty,CodeGenFunction & CGF)611 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
612                                              llvm::Type *Ty,
613                                              CodeGenFunction &CGF) {
614   if (Val->getType() == Ty)
615     return Val;
616 
617   if (isa<llvm::PointerType>(Val->getType())) {
618     // If this is Pointer->Pointer avoid conversion to and from int.
619     if (isa<llvm::PointerType>(Ty))
620       return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
621 
622     // Convert the pointer to an integer so we can play with its width.
623     Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
624   }
625 
626   llvm::Type *DestIntTy = Ty;
627   if (isa<llvm::PointerType>(DestIntTy))
628     DestIntTy = CGF.IntPtrTy;
629 
630   if (Val->getType() != DestIntTy)
631     Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
632 
633   if (isa<llvm::PointerType>(Ty))
634     Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
635   return Val;
636 }
637 
638 
639 
640 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
641 /// a pointer to an object of type \arg Ty.
642 ///
643 /// This safely handles the case when the src type is smaller than the
644 /// destination type; in this situation the values of bits which not
645 /// present in the src are undefined.
CreateCoercedLoad(llvm::Value * SrcPtr,llvm::Type * Ty,CodeGenFunction & CGF)646 static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
647                                       llvm::Type *Ty,
648                                       CodeGenFunction &CGF) {
649   llvm::Type *SrcTy =
650     cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
651 
652   // If SrcTy and Ty are the same, just do a load.
653   if (SrcTy == Ty)
654     return CGF.Builder.CreateLoad(SrcPtr);
655 
656   uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
657 
658   if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
659     SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
660     SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
661   }
662 
663   uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
664 
665   // If the source and destination are integer or pointer types, just do an
666   // extension or truncation to the desired type.
667   if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
668       (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
669     llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr);
670     return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
671   }
672 
673   // If load is legal, just bitcast the src pointer.
674   if (SrcSize >= DstSize) {
675     // Generally SrcSize is never greater than DstSize, since this means we are
676     // losing bits. However, this can happen in cases where the structure has
677     // additional padding, for example due to a user specified alignment.
678     //
679     // FIXME: Assert that we aren't truncating non-padding bits when have access
680     // to that information.
681     llvm::Value *Casted =
682       CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
683     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
684     // FIXME: Use better alignment / avoid requiring aligned load.
685     Load->setAlignment(1);
686     return Load;
687   }
688 
689   // Otherwise do coercion through memory. This is stupid, but
690   // simple.
691   llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
692   llvm::Value *Casted =
693     CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
694   llvm::StoreInst *Store =
695     CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
696   // FIXME: Use better alignment / avoid requiring aligned store.
697   Store->setAlignment(1);
698   return CGF.Builder.CreateLoad(Tmp);
699 }
700 
701 // Function to store a first-class aggregate into memory.  We prefer to
702 // store the elements rather than the aggregate to be more friendly to
703 // fast-isel.
704 // FIXME: Do we need to recurse here?
BuildAggStore(CodeGenFunction & CGF,llvm::Value * Val,llvm::Value * DestPtr,bool DestIsVolatile,bool LowAlignment)705 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
706                           llvm::Value *DestPtr, bool DestIsVolatile,
707                           bool LowAlignment) {
708   // Prefer scalar stores to first-class aggregate stores.
709   if (llvm::StructType *STy =
710         dyn_cast<llvm::StructType>(Val->getType())) {
711     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
712       llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(DestPtr, 0, i);
713       llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
714       llvm::StoreInst *SI = CGF.Builder.CreateStore(Elt, EltPtr,
715                                                     DestIsVolatile);
716       if (LowAlignment)
717         SI->setAlignment(1);
718     }
719   } else {
720     llvm::StoreInst *SI = CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile);
721     if (LowAlignment)
722       SI->setAlignment(1);
723   }
724 }
725 
726 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
727 /// where the source and destination may have different types.
728 ///
729 /// This safely handles the case when the src type is larger than the
730 /// destination type; the upper bits of the src will be lost.
CreateCoercedStore(llvm::Value * Src,llvm::Value * DstPtr,bool DstIsVolatile,CodeGenFunction & CGF)731 static void CreateCoercedStore(llvm::Value *Src,
732                                llvm::Value *DstPtr,
733                                bool DstIsVolatile,
734                                CodeGenFunction &CGF) {
735   llvm::Type *SrcTy = Src->getType();
736   llvm::Type *DstTy =
737     cast<llvm::PointerType>(DstPtr->getType())->getElementType();
738   if (SrcTy == DstTy) {
739     CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
740     return;
741   }
742 
743   uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
744 
745   if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
746     DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
747     DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
748   }
749 
750   // If the source and destination are integer or pointer types, just do an
751   // extension or truncation to the desired type.
752   if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
753       (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
754     Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
755     CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
756     return;
757   }
758 
759   uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
760 
761   // If store is legal, just bitcast the src pointer.
762   if (SrcSize <= DstSize) {
763     llvm::Value *Casted =
764       CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
765     // FIXME: Use better alignment / avoid requiring aligned store.
766     BuildAggStore(CGF, Src, Casted, DstIsVolatile, true);
767   } else {
768     // Otherwise do coercion through memory. This is stupid, but
769     // simple.
770 
771     // Generally SrcSize is never greater than DstSize, since this means we are
772     // losing bits. However, this can happen in cases where the structure has
773     // additional padding, for example due to a user specified alignment.
774     //
775     // FIXME: Assert that we aren't truncating non-padding bits when have access
776     // to that information.
777     llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
778     CGF.Builder.CreateStore(Src, Tmp);
779     llvm::Value *Casted =
780       CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
781     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
782     // FIXME: Use better alignment / avoid requiring aligned load.
783     Load->setAlignment(1);
784     CGF.Builder.CreateStore(Load, DstPtr, DstIsVolatile);
785   }
786 }
787 
788 /***/
789 
ReturnTypeUsesSRet(const CGFunctionInfo & FI)790 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
791   return FI.getReturnInfo().isIndirect();
792 }
793 
ReturnTypeUsesFPRet(QualType ResultType)794 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
795   if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
796     switch (BT->getKind()) {
797     default:
798       return false;
799     case BuiltinType::Float:
800       return getContext().getTargetInfo().useObjCFPRetForRealType(TargetInfo::Float);
801     case BuiltinType::Double:
802       return getContext().getTargetInfo().useObjCFPRetForRealType(TargetInfo::Double);
803     case BuiltinType::LongDouble:
804       return getContext().getTargetInfo().useObjCFPRetForRealType(
805         TargetInfo::LongDouble);
806     }
807   }
808 
809   return false;
810 }
811 
ReturnTypeUsesFP2Ret(QualType ResultType)812 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
813   if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
814     if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
815       if (BT->getKind() == BuiltinType::LongDouble)
816         return getContext().getTargetInfo().useObjCFP2RetForComplexLongDouble();
817     }
818   }
819 
820   return false;
821 }
822 
GetFunctionType(GlobalDecl GD)823 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
824   const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
825   return GetFunctionType(FI);
826 }
827 
828 llvm::FunctionType *
GetFunctionType(const CGFunctionInfo & FI)829 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
830 
831   bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted;
832   assert(Inserted && "Recursively being processed?");
833 
834   SmallVector<llvm::Type*, 8> argTypes;
835   llvm::Type *resultType = 0;
836 
837   const ABIArgInfo &retAI = FI.getReturnInfo();
838   switch (retAI.getKind()) {
839   case ABIArgInfo::Expand:
840     llvm_unreachable("Invalid ABI kind for return argument");
841 
842   case ABIArgInfo::Extend:
843   case ABIArgInfo::Direct:
844     resultType = retAI.getCoerceToType();
845     break;
846 
847   case ABIArgInfo::Indirect: {
848     assert(!retAI.getIndirectAlign() && "Align unused on indirect return.");
849     resultType = llvm::Type::getVoidTy(getLLVMContext());
850 
851     QualType ret = FI.getReturnType();
852     llvm::Type *ty = ConvertType(ret);
853     unsigned addressSpace = Context.getTargetAddressSpace(ret);
854     argTypes.push_back(llvm::PointerType::get(ty, addressSpace));
855     break;
856   }
857 
858   case ABIArgInfo::Ignore:
859     resultType = llvm::Type::getVoidTy(getLLVMContext());
860     break;
861   }
862 
863   for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
864          ie = FI.arg_end(); it != ie; ++it) {
865     const ABIArgInfo &argAI = it->info;
866 
867     switch (argAI.getKind()) {
868     case ABIArgInfo::Ignore:
869       break;
870 
871     case ABIArgInfo::Indirect: {
872       // indirect arguments are always on the stack, which is addr space #0.
873       llvm::Type *LTy = ConvertTypeForMem(it->type);
874       argTypes.push_back(LTy->getPointerTo());
875       break;
876     }
877 
878     case ABIArgInfo::Extend:
879     case ABIArgInfo::Direct: {
880       // Insert a padding type to ensure proper alignment.
881       if (llvm::Type *PaddingType = argAI.getPaddingType())
882         argTypes.push_back(PaddingType);
883       // If the coerce-to type is a first class aggregate, flatten it.  Either
884       // way is semantically identical, but fast-isel and the optimizer
885       // generally likes scalar values better than FCAs.
886       llvm::Type *argType = argAI.getCoerceToType();
887       if (llvm::StructType *st = dyn_cast<llvm::StructType>(argType)) {
888         for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
889           argTypes.push_back(st->getElementType(i));
890       } else {
891         argTypes.push_back(argType);
892       }
893       break;
894     }
895 
896     case ABIArgInfo::Expand:
897       GetExpandedTypes(it->type, argTypes);
898       break;
899     }
900   }
901 
902   bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
903   assert(Erased && "Not in set?");
904 
905   return llvm::FunctionType::get(resultType, argTypes, FI.isVariadic());
906 }
907 
GetFunctionTypeForVTable(GlobalDecl GD)908 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
909   const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
910   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
911 
912   if (!isFuncTypeConvertible(FPT))
913     return llvm::StructType::get(getLLVMContext());
914 
915   const CGFunctionInfo *Info;
916   if (isa<CXXDestructorDecl>(MD))
917     Info = &arrangeCXXDestructor(cast<CXXDestructorDecl>(MD), GD.getDtorType());
918   else
919     Info = &arrangeCXXMethodDeclaration(MD);
920   return GetFunctionType(*Info);
921 }
922 
ConstructAttributeList(const CGFunctionInfo & FI,const Decl * TargetDecl,AttributeListType & PAL,unsigned & CallingConv)923 void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
924                                            const Decl *TargetDecl,
925                                            AttributeListType &PAL,
926                                            unsigned &CallingConv) {
927   llvm::Attributes FuncAttrs;
928   llvm::Attributes RetAttrs;
929 
930   CallingConv = FI.getEffectiveCallingConvention();
931 
932   if (FI.isNoReturn())
933     FuncAttrs |= llvm::Attribute::NoReturn;
934 
935   // FIXME: handle sseregparm someday...
936   if (TargetDecl) {
937     if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
938       FuncAttrs |= llvm::Attribute::ReturnsTwice;
939     if (TargetDecl->hasAttr<NoThrowAttr>())
940       FuncAttrs |= llvm::Attribute::NoUnwind;
941     else if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
942       const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>();
943       if (FPT && FPT->isNothrow(getContext()))
944         FuncAttrs |= llvm::Attribute::NoUnwind;
945     }
946 
947     if (TargetDecl->hasAttr<NoReturnAttr>())
948       FuncAttrs |= llvm::Attribute::NoReturn;
949 
950     if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
951       FuncAttrs |= llvm::Attribute::ReturnsTwice;
952 
953     // 'const' and 'pure' attribute functions are also nounwind.
954     if (TargetDecl->hasAttr<ConstAttr>()) {
955       FuncAttrs |= llvm::Attribute::ReadNone;
956       FuncAttrs |= llvm::Attribute::NoUnwind;
957     } else if (TargetDecl->hasAttr<PureAttr>()) {
958       FuncAttrs |= llvm::Attribute::ReadOnly;
959       FuncAttrs |= llvm::Attribute::NoUnwind;
960     }
961     if (TargetDecl->hasAttr<MallocAttr>())
962       RetAttrs |= llvm::Attribute::NoAlias;
963   }
964 
965   if (CodeGenOpts.OptimizeSize)
966     FuncAttrs |= llvm::Attribute::OptimizeForSize;
967   if (CodeGenOpts.DisableRedZone)
968     FuncAttrs |= llvm::Attribute::NoRedZone;
969   if (CodeGenOpts.NoImplicitFloat)
970     FuncAttrs |= llvm::Attribute::NoImplicitFloat;
971 
972   QualType RetTy = FI.getReturnType();
973   unsigned Index = 1;
974   const ABIArgInfo &RetAI = FI.getReturnInfo();
975   switch (RetAI.getKind()) {
976   case ABIArgInfo::Extend:
977    if (RetTy->hasSignedIntegerRepresentation())
978      RetAttrs |= llvm::Attribute::SExt;
979    else if (RetTy->hasUnsignedIntegerRepresentation())
980      RetAttrs |= llvm::Attribute::ZExt;
981     break;
982   case ABIArgInfo::Direct:
983   case ABIArgInfo::Ignore:
984     break;
985 
986   case ABIArgInfo::Indirect: {
987     llvm::Attributes SRETAttrs = llvm::Attribute::StructRet;
988     if (RetAI.getInReg())
989       SRETAttrs |= llvm::Attribute::InReg;
990     PAL.push_back(llvm::AttributeWithIndex::get(Index, SRETAttrs));
991 
992     ++Index;
993     // sret disables readnone and readonly
994     FuncAttrs &= ~(llvm::Attribute::ReadOnly |
995                    llvm::Attribute::ReadNone);
996     break;
997   }
998 
999   case ABIArgInfo::Expand:
1000     llvm_unreachable("Invalid ABI kind for return argument");
1001   }
1002 
1003   if (RetAttrs)
1004     PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
1005 
1006   for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1007          ie = FI.arg_end(); it != ie; ++it) {
1008     QualType ParamType = it->type;
1009     const ABIArgInfo &AI = it->info;
1010     llvm::Attributes Attrs;
1011 
1012     // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
1013     // have the corresponding parameter variable.  It doesn't make
1014     // sense to do it here because parameters are so messed up.
1015     switch (AI.getKind()) {
1016     case ABIArgInfo::Extend:
1017       if (ParamType->isSignedIntegerOrEnumerationType())
1018         Attrs |= llvm::Attribute::SExt;
1019       else if (ParamType->isUnsignedIntegerOrEnumerationType())
1020         Attrs |= llvm::Attribute::ZExt;
1021       // FALL THROUGH
1022     case ABIArgInfo::Direct:
1023       if (AI.getInReg())
1024           Attrs |= llvm::Attribute::InReg;
1025 
1026       // FIXME: handle sseregparm someday...
1027 
1028       // Increment Index if there is padding.
1029       Index += (AI.getPaddingType() != 0);
1030 
1031       if (llvm::StructType *STy =
1032           dyn_cast<llvm::StructType>(AI.getCoerceToType())) {
1033         unsigned Extra = STy->getNumElements()-1;  // 1 will be added below.
1034         if (Attrs != llvm::Attribute::None)
1035           for (unsigned I = 0; I < Extra; ++I)
1036             PAL.push_back(llvm::AttributeWithIndex::get(Index + I, Attrs));
1037         Index += Extra;
1038       }
1039       break;
1040 
1041     case ABIArgInfo::Indirect:
1042       if (AI.getIndirectByVal())
1043         Attrs |= llvm::Attribute::ByVal;
1044 
1045       Attrs |=
1046         llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
1047       // byval disables readnone and readonly.
1048       FuncAttrs &= ~(llvm::Attribute::ReadOnly |
1049                      llvm::Attribute::ReadNone);
1050       break;
1051 
1052     case ABIArgInfo::Ignore:
1053       // Skip increment, no matching LLVM parameter.
1054       continue;
1055 
1056     case ABIArgInfo::Expand: {
1057       SmallVector<llvm::Type*, 8> types;
1058       // FIXME: This is rather inefficient. Do we ever actually need to do
1059       // anything here? The result should be just reconstructed on the other
1060       // side, so extension should be a non-issue.
1061       getTypes().GetExpandedTypes(ParamType, types);
1062       Index += types.size();
1063       continue;
1064     }
1065     }
1066 
1067     if (Attrs)
1068       PAL.push_back(llvm::AttributeWithIndex::get(Index, Attrs));
1069     ++Index;
1070   }
1071   if (FuncAttrs)
1072     PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
1073 }
1074 
1075 /// An argument came in as a promoted argument; demote it back to its
1076 /// declared type.
emitArgumentDemotion(CodeGenFunction & CGF,const VarDecl * var,llvm::Value * value)1077 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
1078                                          const VarDecl *var,
1079                                          llvm::Value *value) {
1080   llvm::Type *varType = CGF.ConvertType(var->getType());
1081 
1082   // This can happen with promotions that actually don't change the
1083   // underlying type, like the enum promotions.
1084   if (value->getType() == varType) return value;
1085 
1086   assert((varType->isIntegerTy() || varType->isFloatingPointTy())
1087          && "unexpected promotion type");
1088 
1089   if (isa<llvm::IntegerType>(varType))
1090     return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
1091 
1092   return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
1093 }
1094 
EmitFunctionProlog(const CGFunctionInfo & FI,llvm::Function * Fn,const FunctionArgList & Args)1095 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
1096                                          llvm::Function *Fn,
1097                                          const FunctionArgList &Args) {
1098   // If this is an implicit-return-zero function, go ahead and
1099   // initialize the return value.  TODO: it might be nice to have
1100   // a more general mechanism for this that didn't require synthesized
1101   // return statements.
1102   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
1103     if (FD->hasImplicitReturnZero()) {
1104       QualType RetTy = FD->getResultType().getUnqualifiedType();
1105       llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
1106       llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
1107       Builder.CreateStore(Zero, ReturnValue);
1108     }
1109   }
1110 
1111   // FIXME: We no longer need the types from FunctionArgList; lift up and
1112   // simplify.
1113 
1114   // Emit allocs for param decls.  Give the LLVM Argument nodes names.
1115   llvm::Function::arg_iterator AI = Fn->arg_begin();
1116 
1117   // Name the struct return argument.
1118   if (CGM.ReturnTypeUsesSRet(FI)) {
1119     AI->setName("agg.result");
1120     AI->addAttr(llvm::Attribute::NoAlias);
1121     ++AI;
1122   }
1123 
1124   assert(FI.arg_size() == Args.size() &&
1125          "Mismatch between function signature & arguments.");
1126   unsigned ArgNo = 1;
1127   CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
1128   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
1129        i != e; ++i, ++info_it, ++ArgNo) {
1130     const VarDecl *Arg = *i;
1131     QualType Ty = info_it->type;
1132     const ABIArgInfo &ArgI = info_it->info;
1133 
1134     bool isPromoted =
1135       isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
1136 
1137     switch (ArgI.getKind()) {
1138     case ABIArgInfo::Indirect: {
1139       llvm::Value *V = AI;
1140 
1141       if (hasAggregateLLVMType(Ty)) {
1142         // Aggregates and complex variables are accessed by reference.  All we
1143         // need to do is realign the value, if requested
1144         if (ArgI.getIndirectRealign()) {
1145           llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce");
1146 
1147           // Copy from the incoming argument pointer to the temporary with the
1148           // appropriate alignment.
1149           //
1150           // FIXME: We should have a common utility for generating an aggregate
1151           // copy.
1152           llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
1153           CharUnits Size = getContext().getTypeSizeInChars(Ty);
1154           llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
1155           llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy);
1156           Builder.CreateMemCpy(Dst,
1157                                Src,
1158                                llvm::ConstantInt::get(IntPtrTy,
1159                                                       Size.getQuantity()),
1160                                ArgI.getIndirectAlign(),
1161                                false);
1162           V = AlignedTemp;
1163         }
1164       } else {
1165         // Load scalar value from indirect argument.
1166         CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
1167         V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty);
1168 
1169         if (isPromoted)
1170           V = emitArgumentDemotion(*this, Arg, V);
1171       }
1172       EmitParmDecl(*Arg, V, ArgNo);
1173       break;
1174     }
1175 
1176     case ABIArgInfo::Extend:
1177     case ABIArgInfo::Direct: {
1178       // Skip the dummy padding argument.
1179       if (ArgI.getPaddingType())
1180         ++AI;
1181 
1182       // If we have the trivial case, handle it with no muss and fuss.
1183       if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
1184           ArgI.getCoerceToType() == ConvertType(Ty) &&
1185           ArgI.getDirectOffset() == 0) {
1186         assert(AI != Fn->arg_end() && "Argument mismatch!");
1187         llvm::Value *V = AI;
1188 
1189         if (Arg->getType().isRestrictQualified())
1190           AI->addAttr(llvm::Attribute::NoAlias);
1191 
1192         // Ensure the argument is the correct type.
1193         if (V->getType() != ArgI.getCoerceToType())
1194           V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
1195 
1196         if (isPromoted)
1197           V = emitArgumentDemotion(*this, Arg, V);
1198 
1199         EmitParmDecl(*Arg, V, ArgNo);
1200         break;
1201       }
1202 
1203       llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName());
1204 
1205       // The alignment we need to use is the max of the requested alignment for
1206       // the argument plus the alignment required by our access code below.
1207       unsigned AlignmentToUse =
1208         CGM.getTargetData().getABITypeAlignment(ArgI.getCoerceToType());
1209       AlignmentToUse = std::max(AlignmentToUse,
1210                         (unsigned)getContext().getDeclAlign(Arg).getQuantity());
1211 
1212       Alloca->setAlignment(AlignmentToUse);
1213       llvm::Value *V = Alloca;
1214       llvm::Value *Ptr = V;    // Pointer to store into.
1215 
1216       // If the value is offset in memory, apply the offset now.
1217       if (unsigned Offs = ArgI.getDirectOffset()) {
1218         Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy());
1219         Ptr = Builder.CreateConstGEP1_32(Ptr, Offs);
1220         Ptr = Builder.CreateBitCast(Ptr,
1221                           llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
1222       }
1223 
1224       // If the coerce-to type is a first class aggregate, we flatten it and
1225       // pass the elements. Either way is semantically identical, but fast-isel
1226       // and the optimizer generally likes scalar values better than FCAs.
1227       llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
1228       if (STy && STy->getNumElements() > 1) {
1229         uint64_t SrcSize = CGM.getTargetData().getTypeAllocSize(STy);
1230         llvm::Type *DstTy =
1231           cast<llvm::PointerType>(Ptr->getType())->getElementType();
1232         uint64_t DstSize = CGM.getTargetData().getTypeAllocSize(DstTy);
1233 
1234         if (SrcSize <= DstSize) {
1235           Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
1236 
1237           for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1238             assert(AI != Fn->arg_end() && "Argument mismatch!");
1239             AI->setName(Arg->getName() + ".coerce" + Twine(i));
1240             llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i);
1241             Builder.CreateStore(AI++, EltPtr);
1242           }
1243         } else {
1244           llvm::AllocaInst *TempAlloca =
1245             CreateTempAlloca(ArgI.getCoerceToType(), "coerce");
1246           TempAlloca->setAlignment(AlignmentToUse);
1247           llvm::Value *TempV = TempAlloca;
1248 
1249           for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1250             assert(AI != Fn->arg_end() && "Argument mismatch!");
1251             AI->setName(Arg->getName() + ".coerce" + Twine(i));
1252             llvm::Value *EltPtr = Builder.CreateConstGEP2_32(TempV, 0, i);
1253             Builder.CreateStore(AI++, EltPtr);
1254           }
1255 
1256           Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse);
1257         }
1258       } else {
1259         // Simple case, just do a coerced store of the argument into the alloca.
1260         assert(AI != Fn->arg_end() && "Argument mismatch!");
1261         AI->setName(Arg->getName() + ".coerce");
1262         CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this);
1263       }
1264 
1265 
1266       // Match to what EmitParmDecl is expecting for this type.
1267       if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
1268         V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty);
1269         if (isPromoted)
1270           V = emitArgumentDemotion(*this, Arg, V);
1271       }
1272       EmitParmDecl(*Arg, V, ArgNo);
1273       continue;  // Skip ++AI increment, already done.
1274     }
1275 
1276     case ABIArgInfo::Expand: {
1277       // If this structure was expanded into multiple arguments then
1278       // we need to create a temporary and reconstruct it from the
1279       // arguments.
1280       llvm::AllocaInst *Alloca = CreateMemTemp(Ty);
1281       CharUnits Align = getContext().getDeclAlign(Arg);
1282       Alloca->setAlignment(Align.getQuantity());
1283       LValue LV = MakeAddrLValue(Alloca, Ty, Align);
1284       llvm::Function::arg_iterator End = ExpandTypeFromArgs(Ty, LV, AI);
1285       EmitParmDecl(*Arg, Alloca, ArgNo);
1286 
1287       // Name the arguments used in expansion and increment AI.
1288       unsigned Index = 0;
1289       for (; AI != End; ++AI, ++Index)
1290         AI->setName(Arg->getName() + "." + Twine(Index));
1291       continue;
1292     }
1293 
1294     case ABIArgInfo::Ignore:
1295       // Initialize the local variable appropriately.
1296       if (hasAggregateLLVMType(Ty))
1297         EmitParmDecl(*Arg, CreateMemTemp(Ty), ArgNo);
1298       else
1299         EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())),
1300                      ArgNo);
1301 
1302       // Skip increment, no matching LLVM parameter.
1303       continue;
1304     }
1305 
1306     ++AI;
1307   }
1308   assert(AI == Fn->arg_end() && "Argument mismatch!");
1309 }
1310 
eraseUnusedBitCasts(llvm::Instruction * insn)1311 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
1312   while (insn->use_empty()) {
1313     llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
1314     if (!bitcast) return;
1315 
1316     // This is "safe" because we would have used a ConstantExpr otherwise.
1317     insn = cast<llvm::Instruction>(bitcast->getOperand(0));
1318     bitcast->eraseFromParent();
1319   }
1320 }
1321 
1322 /// Try to emit a fused autorelease of a return result.
tryEmitFusedAutoreleaseOfResult(CodeGenFunction & CGF,llvm::Value * result)1323 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
1324                                                     llvm::Value *result) {
1325   // We must be immediately followed the cast.
1326   llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
1327   if (BB->empty()) return 0;
1328   if (&BB->back() != result) return 0;
1329 
1330   llvm::Type *resultType = result->getType();
1331 
1332   // result is in a BasicBlock and is therefore an Instruction.
1333   llvm::Instruction *generator = cast<llvm::Instruction>(result);
1334 
1335   SmallVector<llvm::Instruction*,4> insnsToKill;
1336 
1337   // Look for:
1338   //  %generator = bitcast %type1* %generator2 to %type2*
1339   while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
1340     // We would have emitted this as a constant if the operand weren't
1341     // an Instruction.
1342     generator = cast<llvm::Instruction>(bitcast->getOperand(0));
1343 
1344     // Require the generator to be immediately followed by the cast.
1345     if (generator->getNextNode() != bitcast)
1346       return 0;
1347 
1348     insnsToKill.push_back(bitcast);
1349   }
1350 
1351   // Look for:
1352   //   %generator = call i8* @objc_retain(i8* %originalResult)
1353   // or
1354   //   %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
1355   llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
1356   if (!call) return 0;
1357 
1358   bool doRetainAutorelease;
1359 
1360   if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) {
1361     doRetainAutorelease = true;
1362   } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints()
1363                                           .objc_retainAutoreleasedReturnValue) {
1364     doRetainAutorelease = false;
1365 
1366     // If we emitted an assembly marker for this call (and the
1367     // ARCEntrypoints field should have been set if so), go looking
1368     // for that call.  If we can't find it, we can't do this
1369     // optimization.  But it should always be the immediately previous
1370     // instruction, unless we needed bitcasts around the call.
1371     if (CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker) {
1372       llvm::Instruction *prev = call->getPrevNode();
1373       assert(prev);
1374       if (isa<llvm::BitCastInst>(prev)) {
1375         prev = prev->getPrevNode();
1376         assert(prev);
1377       }
1378       assert(isa<llvm::CallInst>(prev));
1379       assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
1380                CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker);
1381       insnsToKill.push_back(prev);
1382     }
1383   } else {
1384     return 0;
1385   }
1386 
1387   result = call->getArgOperand(0);
1388   insnsToKill.push_back(call);
1389 
1390   // Keep killing bitcasts, for sanity.  Note that we no longer care
1391   // about precise ordering as long as there's exactly one use.
1392   while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
1393     if (!bitcast->hasOneUse()) break;
1394     insnsToKill.push_back(bitcast);
1395     result = bitcast->getOperand(0);
1396   }
1397 
1398   // Delete all the unnecessary instructions, from latest to earliest.
1399   for (SmallVectorImpl<llvm::Instruction*>::iterator
1400          i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i)
1401     (*i)->eraseFromParent();
1402 
1403   // Do the fused retain/autorelease if we were asked to.
1404   if (doRetainAutorelease)
1405     result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
1406 
1407   // Cast back to the result type.
1408   return CGF.Builder.CreateBitCast(result, resultType);
1409 }
1410 
1411 /// If this is a +1 of the value of an immutable 'self', remove it.
tryRemoveRetainOfSelf(CodeGenFunction & CGF,llvm::Value * result)1412 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
1413                                           llvm::Value *result) {
1414   // This is only applicable to a method with an immutable 'self'.
1415   const ObjCMethodDecl *method =
1416     dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
1417   if (!method) return 0;
1418   const VarDecl *self = method->getSelfDecl();
1419   if (!self->getType().isConstQualified()) return 0;
1420 
1421   // Look for a retain call.
1422   llvm::CallInst *retainCall =
1423     dyn_cast<llvm::CallInst>(result->stripPointerCasts());
1424   if (!retainCall ||
1425       retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain)
1426     return 0;
1427 
1428   // Look for an ordinary load of 'self'.
1429   llvm::Value *retainedValue = retainCall->getArgOperand(0);
1430   llvm::LoadInst *load =
1431     dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
1432   if (!load || load->isAtomic() || load->isVolatile() ||
1433       load->getPointerOperand() != CGF.GetAddrOfLocalVar(self))
1434     return 0;
1435 
1436   // Okay!  Burn it all down.  This relies for correctness on the
1437   // assumption that the retain is emitted as part of the return and
1438   // that thereafter everything is used "linearly".
1439   llvm::Type *resultType = result->getType();
1440   eraseUnusedBitCasts(cast<llvm::Instruction>(result));
1441   assert(retainCall->use_empty());
1442   retainCall->eraseFromParent();
1443   eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
1444 
1445   return CGF.Builder.CreateBitCast(load, resultType);
1446 }
1447 
1448 /// Emit an ARC autorelease of the result of a function.
1449 ///
1450 /// \return the value to actually return from the function
emitAutoreleaseOfResult(CodeGenFunction & CGF,llvm::Value * result)1451 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
1452                                             llvm::Value *result) {
1453   // If we're returning 'self', kill the initial retain.  This is a
1454   // heuristic attempt to "encourage correctness" in the really unfortunate
1455   // case where we have a return of self during a dealloc and we desperately
1456   // need to avoid the possible autorelease.
1457   if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
1458     return self;
1459 
1460   // At -O0, try to emit a fused retain/autorelease.
1461   if (CGF.shouldUseFusedARCCalls())
1462     if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
1463       return fused;
1464 
1465   return CGF.EmitARCAutoreleaseReturnValue(result);
1466 }
1467 
1468 /// Heuristically search for a dominating store to the return-value slot.
findDominatingStoreToReturnValue(CodeGenFunction & CGF)1469 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
1470   // If there are multiple uses of the return-value slot, just check
1471   // for something immediately preceding the IP.  Sometimes this can
1472   // happen with how we generate implicit-returns; it can also happen
1473   // with noreturn cleanups.
1474   if (!CGF.ReturnValue->hasOneUse()) {
1475     llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
1476     if (IP->empty()) return 0;
1477     llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(&IP->back());
1478     if (!store) return 0;
1479     if (store->getPointerOperand() != CGF.ReturnValue) return 0;
1480     assert(!store->isAtomic() && !store->isVolatile()); // see below
1481     return store;
1482   }
1483 
1484   llvm::StoreInst *store =
1485     dyn_cast<llvm::StoreInst>(CGF.ReturnValue->use_back());
1486   if (!store) return 0;
1487 
1488   // These aren't actually possible for non-coerced returns, and we
1489   // only care about non-coerced returns on this code path.
1490   assert(!store->isAtomic() && !store->isVolatile());
1491 
1492   // Now do a first-and-dirty dominance check: just walk up the
1493   // single-predecessors chain from the current insertion point.
1494   llvm::BasicBlock *StoreBB = store->getParent();
1495   llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
1496   while (IP != StoreBB) {
1497     if (!(IP = IP->getSinglePredecessor()))
1498       return 0;
1499   }
1500 
1501   // Okay, the store's basic block dominates the insertion point; we
1502   // can do our thing.
1503   return store;
1504 }
1505 
EmitFunctionEpilog(const CGFunctionInfo & FI)1506 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
1507   // Functions with no result always return void.
1508   if (ReturnValue == 0) {
1509     Builder.CreateRetVoid();
1510     return;
1511   }
1512 
1513   llvm::DebugLoc RetDbgLoc;
1514   llvm::Value *RV = 0;
1515   QualType RetTy = FI.getReturnType();
1516   const ABIArgInfo &RetAI = FI.getReturnInfo();
1517 
1518   switch (RetAI.getKind()) {
1519   case ABIArgInfo::Indirect: {
1520     unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
1521     if (RetTy->isAnyComplexType()) {
1522       ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
1523       StoreComplexToAddr(RT, CurFn->arg_begin(), false);
1524     } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1525       // Do nothing; aggregrates get evaluated directly into the destination.
1526     } else {
1527       EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
1528                         false, Alignment, RetTy);
1529     }
1530     break;
1531   }
1532 
1533   case ABIArgInfo::Extend:
1534   case ABIArgInfo::Direct:
1535     if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
1536         RetAI.getDirectOffset() == 0) {
1537       // The internal return value temp always will have pointer-to-return-type
1538       // type, just do a load.
1539 
1540       // If there is a dominating store to ReturnValue, we can elide
1541       // the load, zap the store, and usually zap the alloca.
1542       if (llvm::StoreInst *SI = findDominatingStoreToReturnValue(*this)) {
1543         // Get the stored value and nuke the now-dead store.
1544         RetDbgLoc = SI->getDebugLoc();
1545         RV = SI->getValueOperand();
1546         SI->eraseFromParent();
1547 
1548         // If that was the only use of the return value, nuke it as well now.
1549         if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
1550           cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
1551           ReturnValue = 0;
1552         }
1553 
1554       // Otherwise, we have to do a simple load.
1555       } else {
1556         RV = Builder.CreateLoad(ReturnValue);
1557       }
1558     } else {
1559       llvm::Value *V = ReturnValue;
1560       // If the value is offset in memory, apply the offset now.
1561       if (unsigned Offs = RetAI.getDirectOffset()) {
1562         V = Builder.CreateBitCast(V, Builder.getInt8PtrTy());
1563         V = Builder.CreateConstGEP1_32(V, Offs);
1564         V = Builder.CreateBitCast(V,
1565                          llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
1566       }
1567 
1568       RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
1569     }
1570 
1571     // In ARC, end functions that return a retainable type with a call
1572     // to objc_autoreleaseReturnValue.
1573     if (AutoreleaseResult) {
1574       assert(getLangOpts().ObjCAutoRefCount &&
1575              !FI.isReturnsRetained() &&
1576              RetTy->isObjCRetainableType());
1577       RV = emitAutoreleaseOfResult(*this, RV);
1578     }
1579 
1580     break;
1581 
1582   case ABIArgInfo::Ignore:
1583     break;
1584 
1585   case ABIArgInfo::Expand:
1586     llvm_unreachable("Invalid ABI kind for return argument");
1587   }
1588 
1589   llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid();
1590   if (!RetDbgLoc.isUnknown())
1591     Ret->setDebugLoc(RetDbgLoc);
1592 }
1593 
EmitDelegateCallArg(CallArgList & args,const VarDecl * param)1594 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
1595                                           const VarDecl *param) {
1596   // StartFunction converted the ABI-lowered parameter(s) into a
1597   // local alloca.  We need to turn that into an r-value suitable
1598   // for EmitCall.
1599   llvm::Value *local = GetAddrOfLocalVar(param);
1600 
1601   QualType type = param->getType();
1602 
1603   // For the most part, we just need to load the alloca, except:
1604   // 1) aggregate r-values are actually pointers to temporaries, and
1605   // 2) references to aggregates are pointers directly to the aggregate.
1606   // I don't know why references to non-aggregates are different here.
1607   if (const ReferenceType *ref = type->getAs<ReferenceType>()) {
1608     if (hasAggregateLLVMType(ref->getPointeeType()))
1609       return args.add(RValue::getAggregate(local), type);
1610 
1611     // Locals which are references to scalars are represented
1612     // with allocas holding the pointer.
1613     return args.add(RValue::get(Builder.CreateLoad(local)), type);
1614   }
1615 
1616   if (type->isAnyComplexType()) {
1617     ComplexPairTy complex = LoadComplexFromAddr(local, /*volatile*/ false);
1618     return args.add(RValue::getComplex(complex), type);
1619   }
1620 
1621   if (hasAggregateLLVMType(type))
1622     return args.add(RValue::getAggregate(local), type);
1623 
1624   unsigned alignment = getContext().getDeclAlign(param).getQuantity();
1625   llvm::Value *value = EmitLoadOfScalar(local, false, alignment, type);
1626   return args.add(RValue::get(value), type);
1627 }
1628 
isProvablyNull(llvm::Value * addr)1629 static bool isProvablyNull(llvm::Value *addr) {
1630   return isa<llvm::ConstantPointerNull>(addr);
1631 }
1632 
isProvablyNonNull(llvm::Value * addr)1633 static bool isProvablyNonNull(llvm::Value *addr) {
1634   return isa<llvm::AllocaInst>(addr);
1635 }
1636 
1637 /// Emit the actual writing-back of a writeback.
emitWriteback(CodeGenFunction & CGF,const CallArgList::Writeback & writeback)1638 static void emitWriteback(CodeGenFunction &CGF,
1639                           const CallArgList::Writeback &writeback) {
1640   llvm::Value *srcAddr = writeback.Address;
1641   assert(!isProvablyNull(srcAddr) &&
1642          "shouldn't have writeback for provably null argument");
1643 
1644   llvm::BasicBlock *contBB = 0;
1645 
1646   // If the argument wasn't provably non-null, we need to null check
1647   // before doing the store.
1648   bool provablyNonNull = isProvablyNonNull(srcAddr);
1649   if (!provablyNonNull) {
1650     llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
1651     contBB = CGF.createBasicBlock("icr.done");
1652 
1653     llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
1654     CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
1655     CGF.EmitBlock(writebackBB);
1656   }
1657 
1658   // Load the value to writeback.
1659   llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
1660 
1661   // Cast it back, in case we're writing an id to a Foo* or something.
1662   value = CGF.Builder.CreateBitCast(value,
1663                cast<llvm::PointerType>(srcAddr->getType())->getElementType(),
1664                             "icr.writeback-cast");
1665 
1666   // Perform the writeback.
1667   QualType srcAddrType = writeback.AddressType;
1668   CGF.EmitStoreThroughLValue(RValue::get(value),
1669                              CGF.MakeAddrLValue(srcAddr, srcAddrType));
1670 
1671   // Jump to the continuation block.
1672   if (!provablyNonNull)
1673     CGF.EmitBlock(contBB);
1674 }
1675 
emitWritebacks(CodeGenFunction & CGF,const CallArgList & args)1676 static void emitWritebacks(CodeGenFunction &CGF,
1677                            const CallArgList &args) {
1678   for (CallArgList::writeback_iterator
1679          i = args.writeback_begin(), e = args.writeback_end(); i != e; ++i)
1680     emitWriteback(CGF, *i);
1681 }
1682 
1683 /// Emit an argument that's being passed call-by-writeback.  That is,
1684 /// we are passing the address of
emitWritebackArg(CodeGenFunction & CGF,CallArgList & args,const ObjCIndirectCopyRestoreExpr * CRE)1685 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
1686                              const ObjCIndirectCopyRestoreExpr *CRE) {
1687   llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr());
1688 
1689   // The dest and src types don't necessarily match in LLVM terms
1690   // because of the crazy ObjC compatibility rules.
1691 
1692   llvm::PointerType *destType =
1693     cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
1694 
1695   // If the address is a constant null, just pass the appropriate null.
1696   if (isProvablyNull(srcAddr)) {
1697     args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
1698              CRE->getType());
1699     return;
1700   }
1701 
1702   QualType srcAddrType =
1703     CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
1704 
1705   // Create the temporary.
1706   llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(),
1707                                            "icr.temp");
1708 
1709   // Zero-initialize it if we're not doing a copy-initialization.
1710   bool shouldCopy = CRE->shouldCopy();
1711   if (!shouldCopy) {
1712     llvm::Value *null =
1713       llvm::ConstantPointerNull::get(
1714         cast<llvm::PointerType>(destType->getElementType()));
1715     CGF.Builder.CreateStore(null, temp);
1716   }
1717 
1718   llvm::BasicBlock *contBB = 0;
1719 
1720   // If the address is *not* known to be non-null, we need to switch.
1721   llvm::Value *finalArgument;
1722 
1723   bool provablyNonNull = isProvablyNonNull(srcAddr);
1724   if (provablyNonNull) {
1725     finalArgument = temp;
1726   } else {
1727     llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
1728 
1729     finalArgument = CGF.Builder.CreateSelect(isNull,
1730                                    llvm::ConstantPointerNull::get(destType),
1731                                              temp, "icr.argument");
1732 
1733     // If we need to copy, then the load has to be conditional, which
1734     // means we need control flow.
1735     if (shouldCopy) {
1736       contBB = CGF.createBasicBlock("icr.cont");
1737       llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
1738       CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
1739       CGF.EmitBlock(copyBB);
1740     }
1741   }
1742 
1743   // Perform a copy if necessary.
1744   if (shouldCopy) {
1745     LValue srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
1746     RValue srcRV = CGF.EmitLoadOfLValue(srcLV);
1747     assert(srcRV.isScalar());
1748 
1749     llvm::Value *src = srcRV.getScalarVal();
1750     src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
1751                                     "icr.cast");
1752 
1753     // Use an ordinary store, not a store-to-lvalue.
1754     CGF.Builder.CreateStore(src, temp);
1755   }
1756 
1757   // Finish the control flow if we needed it.
1758   if (shouldCopy && !provablyNonNull)
1759     CGF.EmitBlock(contBB);
1760 
1761   args.addWriteback(srcAddr, srcAddrType, temp);
1762   args.add(RValue::get(finalArgument), CRE->getType());
1763 }
1764 
EmitCallArg(CallArgList & args,const Expr * E,QualType type)1765 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
1766                                   QualType type) {
1767   if (const ObjCIndirectCopyRestoreExpr *CRE
1768         = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
1769     assert(getContext().getLangOpts().ObjCAutoRefCount);
1770     assert(getContext().hasSameType(E->getType(), type));
1771     return emitWritebackArg(*this, args, CRE);
1772   }
1773 
1774   assert(type->isReferenceType() == E->isGLValue() &&
1775          "reference binding to unmaterialized r-value!");
1776 
1777   if (E->isGLValue()) {
1778     assert(E->getObjectKind() == OK_Ordinary);
1779     return args.add(EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0),
1780                     type);
1781   }
1782 
1783   if (hasAggregateLLVMType(type) && !E->getType()->isAnyComplexType() &&
1784       isa<ImplicitCastExpr>(E) &&
1785       cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
1786     LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
1787     assert(L.isSimple());
1788     args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
1789     return;
1790   }
1791 
1792   args.add(EmitAnyExprToTemp(E), type);
1793 }
1794 
1795 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
1796 // optimizer it can aggressively ignore unwind edges.
1797 void
AddObjCARCExceptionMetadata(llvm::Instruction * Inst)1798 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
1799   if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
1800       !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
1801     Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
1802                       CGM.getNoObjCARCExceptionsMetadata());
1803 }
1804 
1805 /// Emits a call or invoke instruction to the given function, depending
1806 /// on the current state of the EH stack.
1807 llvm::CallSite
EmitCallOrInvoke(llvm::Value * Callee,ArrayRef<llvm::Value * > Args,const Twine & Name)1808 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
1809                                   ArrayRef<llvm::Value *> Args,
1810                                   const Twine &Name) {
1811   llvm::BasicBlock *InvokeDest = getInvokeDest();
1812 
1813   llvm::Instruction *Inst;
1814   if (!InvokeDest)
1815     Inst = Builder.CreateCall(Callee, Args, Name);
1816   else {
1817     llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
1818     Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name);
1819     EmitBlock(ContBB);
1820   }
1821 
1822   // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
1823   // optimizer it can aggressively ignore unwind edges.
1824   if (CGM.getLangOpts().ObjCAutoRefCount)
1825     AddObjCARCExceptionMetadata(Inst);
1826 
1827   return Inst;
1828 }
1829 
1830 llvm::CallSite
EmitCallOrInvoke(llvm::Value * Callee,const Twine & Name)1831 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
1832                                   const Twine &Name) {
1833   return EmitCallOrInvoke(Callee, ArrayRef<llvm::Value *>(), Name);
1834 }
1835 
checkArgMatches(llvm::Value * Elt,unsigned & ArgNo,llvm::FunctionType * FTy)1836 static void checkArgMatches(llvm::Value *Elt, unsigned &ArgNo,
1837                             llvm::FunctionType *FTy) {
1838   if (ArgNo < FTy->getNumParams())
1839     assert(Elt->getType() == FTy->getParamType(ArgNo));
1840   else
1841     assert(FTy->isVarArg());
1842   ++ArgNo;
1843 }
1844 
ExpandTypeToArgs(QualType Ty,RValue RV,SmallVector<llvm::Value *,16> & Args,llvm::FunctionType * IRFuncTy)1845 void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
1846                                        SmallVector<llvm::Value*,16> &Args,
1847                                        llvm::FunctionType *IRFuncTy) {
1848   if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
1849     unsigned NumElts = AT->getSize().getZExtValue();
1850     QualType EltTy = AT->getElementType();
1851     llvm::Value *Addr = RV.getAggregateAddr();
1852     for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
1853       llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt);
1854       LValue LV = MakeAddrLValue(EltAddr, EltTy);
1855       RValue EltRV;
1856       if (EltTy->isAnyComplexType())
1857         // FIXME: Volatile?
1858         EltRV = RValue::getComplex(LoadComplexFromAddr(LV.getAddress(), false));
1859       else if (CodeGenFunction::hasAggregateLLVMType(EltTy))
1860         EltRV = LV.asAggregateRValue();
1861       else
1862         EltRV = EmitLoadOfLValue(LV);
1863       ExpandTypeToArgs(EltTy, EltRV, Args, IRFuncTy);
1864     }
1865   } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
1866     RecordDecl *RD = RT->getDecl();
1867     assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
1868     LValue LV = MakeAddrLValue(RV.getAggregateAddr(), Ty);
1869 
1870     if (RD->isUnion()) {
1871       const FieldDecl *LargestFD = 0;
1872       CharUnits UnionSize = CharUnits::Zero();
1873 
1874       for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
1875            i != e; ++i) {
1876         const FieldDecl *FD = *i;
1877         assert(!FD->isBitField() &&
1878                "Cannot expand structure with bit-field members.");
1879         CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
1880         if (UnionSize < FieldSize) {
1881           UnionSize = FieldSize;
1882           LargestFD = FD;
1883         }
1884       }
1885       if (LargestFD) {
1886         RValue FldRV = EmitRValueForField(LV, LargestFD);
1887         ExpandTypeToArgs(LargestFD->getType(), FldRV, Args, IRFuncTy);
1888       }
1889     } else {
1890       for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
1891            i != e; ++i) {
1892         FieldDecl *FD = *i;
1893 
1894         RValue FldRV = EmitRValueForField(LV, FD);
1895         ExpandTypeToArgs(FD->getType(), FldRV, Args, IRFuncTy);
1896       }
1897     }
1898   } else if (Ty->isAnyComplexType()) {
1899     ComplexPairTy CV = RV.getComplexVal();
1900     Args.push_back(CV.first);
1901     Args.push_back(CV.second);
1902   } else {
1903     assert(RV.isScalar() &&
1904            "Unexpected non-scalar rvalue during struct expansion.");
1905 
1906     // Insert a bitcast as needed.
1907     llvm::Value *V = RV.getScalarVal();
1908     if (Args.size() < IRFuncTy->getNumParams() &&
1909         V->getType() != IRFuncTy->getParamType(Args.size()))
1910       V = Builder.CreateBitCast(V, IRFuncTy->getParamType(Args.size()));
1911 
1912     Args.push_back(V);
1913   }
1914 }
1915 
1916 
EmitCall(const CGFunctionInfo & CallInfo,llvm::Value * Callee,ReturnValueSlot ReturnValue,const CallArgList & CallArgs,const Decl * TargetDecl,llvm::Instruction ** callOrInvoke)1917 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
1918                                  llvm::Value *Callee,
1919                                  ReturnValueSlot ReturnValue,
1920                                  const CallArgList &CallArgs,
1921                                  const Decl *TargetDecl,
1922                                  llvm::Instruction **callOrInvoke) {
1923   // FIXME: We no longer need the types from CallArgs; lift up and simplify.
1924   SmallVector<llvm::Value*, 16> Args;
1925 
1926   // Handle struct-return functions by passing a pointer to the
1927   // location that we would like to return into.
1928   QualType RetTy = CallInfo.getReturnType();
1929   const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
1930 
1931   // IRArgNo - Keep track of the argument number in the callee we're looking at.
1932   unsigned IRArgNo = 0;
1933   llvm::FunctionType *IRFuncTy =
1934     cast<llvm::FunctionType>(
1935                   cast<llvm::PointerType>(Callee->getType())->getElementType());
1936 
1937   // If the call returns a temporary with struct return, create a temporary
1938   // alloca to hold the result, unless one is given to us.
1939   if (CGM.ReturnTypeUsesSRet(CallInfo)) {
1940     llvm::Value *Value = ReturnValue.getValue();
1941     if (!Value)
1942       Value = CreateMemTemp(RetTy);
1943     Args.push_back(Value);
1944     checkArgMatches(Value, IRArgNo, IRFuncTy);
1945   }
1946 
1947   assert(CallInfo.arg_size() == CallArgs.size() &&
1948          "Mismatch between function signature & arguments.");
1949   CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
1950   for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
1951        I != E; ++I, ++info_it) {
1952     const ABIArgInfo &ArgInfo = info_it->info;
1953     RValue RV = I->RV;
1954 
1955     unsigned TypeAlign =
1956       getContext().getTypeAlignInChars(I->Ty).getQuantity();
1957     switch (ArgInfo.getKind()) {
1958     case ABIArgInfo::Indirect: {
1959       if (RV.isScalar() || RV.isComplex()) {
1960         // Make a temporary alloca to pass the argument.
1961         llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
1962         if (ArgInfo.getIndirectAlign() > AI->getAlignment())
1963           AI->setAlignment(ArgInfo.getIndirectAlign());
1964         Args.push_back(AI);
1965 
1966         if (RV.isScalar())
1967           EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false,
1968                             TypeAlign, I->Ty);
1969         else
1970           StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
1971 
1972         // Validate argument match.
1973         checkArgMatches(AI, IRArgNo, IRFuncTy);
1974       } else {
1975         // We want to avoid creating an unnecessary temporary+copy here;
1976         // however, we need one in two cases:
1977         // 1. If the argument is not byval, and we are required to copy the
1978         //    source.  (This case doesn't occur on any common architecture.)
1979         // 2. If the argument is byval, RV is not sufficiently aligned, and
1980         //    we cannot force it to be sufficiently aligned.
1981         llvm::Value *Addr = RV.getAggregateAddr();
1982         unsigned Align = ArgInfo.getIndirectAlign();
1983         const llvm::TargetData *TD = &CGM.getTargetData();
1984         if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
1985             (ArgInfo.getIndirectByVal() && TypeAlign < Align &&
1986              llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align)) {
1987           // Create an aligned temporary, and copy to it.
1988           llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
1989           if (Align > AI->getAlignment())
1990             AI->setAlignment(Align);
1991           Args.push_back(AI);
1992           EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
1993 
1994           // Validate argument match.
1995           checkArgMatches(AI, IRArgNo, IRFuncTy);
1996         } else {
1997           // Skip the extra memcpy call.
1998           Args.push_back(Addr);
1999 
2000           // Validate argument match.
2001           checkArgMatches(Addr, IRArgNo, IRFuncTy);
2002         }
2003       }
2004       break;
2005     }
2006 
2007     case ABIArgInfo::Ignore:
2008       break;
2009 
2010     case ABIArgInfo::Extend:
2011     case ABIArgInfo::Direct: {
2012       // Insert a padding argument to ensure proper alignment.
2013       if (llvm::Type *PaddingType = ArgInfo.getPaddingType()) {
2014         Args.push_back(llvm::UndefValue::get(PaddingType));
2015         ++IRArgNo;
2016       }
2017 
2018       if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
2019           ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
2020           ArgInfo.getDirectOffset() == 0) {
2021         llvm::Value *V;
2022         if (RV.isScalar())
2023           V = RV.getScalarVal();
2024         else
2025           V = Builder.CreateLoad(RV.getAggregateAddr());
2026 
2027         // If the argument doesn't match, perform a bitcast to coerce it.  This
2028         // can happen due to trivial type mismatches.
2029         if (IRArgNo < IRFuncTy->getNumParams() &&
2030             V->getType() != IRFuncTy->getParamType(IRArgNo))
2031           V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRArgNo));
2032         Args.push_back(V);
2033 
2034         checkArgMatches(V, IRArgNo, IRFuncTy);
2035         break;
2036       }
2037 
2038       // FIXME: Avoid the conversion through memory if possible.
2039       llvm::Value *SrcPtr;
2040       if (RV.isScalar()) {
2041         SrcPtr = CreateMemTemp(I->Ty, "coerce");
2042         EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, TypeAlign, I->Ty);
2043       } else if (RV.isComplex()) {
2044         SrcPtr = CreateMemTemp(I->Ty, "coerce");
2045         StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
2046       } else
2047         SrcPtr = RV.getAggregateAddr();
2048 
2049       // If the value is offset in memory, apply the offset now.
2050       if (unsigned Offs = ArgInfo.getDirectOffset()) {
2051         SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy());
2052         SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs);
2053         SrcPtr = Builder.CreateBitCast(SrcPtr,
2054                        llvm::PointerType::getUnqual(ArgInfo.getCoerceToType()));
2055 
2056       }
2057 
2058       // If the coerce-to type is a first class aggregate, we flatten it and
2059       // pass the elements. Either way is semantically identical, but fast-isel
2060       // and the optimizer generally likes scalar values better than FCAs.
2061       if (llvm::StructType *STy =
2062             dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) {
2063         SrcPtr = Builder.CreateBitCast(SrcPtr,
2064                                        llvm::PointerType::getUnqual(STy));
2065         for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2066           llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i);
2067           llvm::LoadInst *LI = Builder.CreateLoad(EltPtr);
2068           // We don't know what we're loading from.
2069           LI->setAlignment(1);
2070           Args.push_back(LI);
2071 
2072           // Validate argument match.
2073           checkArgMatches(LI, IRArgNo, IRFuncTy);
2074         }
2075       } else {
2076         // In the simple case, just pass the coerced loaded value.
2077         Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
2078                                          *this));
2079 
2080         // Validate argument match.
2081         checkArgMatches(Args.back(), IRArgNo, IRFuncTy);
2082       }
2083 
2084       break;
2085     }
2086 
2087     case ABIArgInfo::Expand:
2088       ExpandTypeToArgs(I->Ty, RV, Args, IRFuncTy);
2089       IRArgNo = Args.size();
2090       break;
2091     }
2092   }
2093 
2094   // If the callee is a bitcast of a function to a varargs pointer to function
2095   // type, check to see if we can remove the bitcast.  This handles some cases
2096   // with unprototyped functions.
2097   if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
2098     if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
2099       llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
2100       llvm::FunctionType *CurFT =
2101         cast<llvm::FunctionType>(CurPT->getElementType());
2102       llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
2103 
2104       if (CE->getOpcode() == llvm::Instruction::BitCast &&
2105           ActualFT->getReturnType() == CurFT->getReturnType() &&
2106           ActualFT->getNumParams() == CurFT->getNumParams() &&
2107           ActualFT->getNumParams() == Args.size() &&
2108           (CurFT->isVarArg() || !ActualFT->isVarArg())) {
2109         bool ArgsMatch = true;
2110         for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
2111           if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
2112             ArgsMatch = false;
2113             break;
2114           }
2115 
2116         // Strip the cast if we can get away with it.  This is a nice cleanup,
2117         // but also allows us to inline the function at -O0 if it is marked
2118         // always_inline.
2119         if (ArgsMatch)
2120           Callee = CalleeF;
2121       }
2122     }
2123 
2124   unsigned CallingConv;
2125   CodeGen::AttributeListType AttributeList;
2126   CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
2127   llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList);
2128 
2129   llvm::BasicBlock *InvokeDest = 0;
2130   if (!(Attrs.getFnAttributes() & llvm::Attribute::NoUnwind))
2131     InvokeDest = getInvokeDest();
2132 
2133   llvm::CallSite CS;
2134   if (!InvokeDest) {
2135     CS = Builder.CreateCall(Callee, Args);
2136   } else {
2137     llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
2138     CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, Args);
2139     EmitBlock(Cont);
2140   }
2141   if (callOrInvoke)
2142     *callOrInvoke = CS.getInstruction();
2143 
2144   CS.setAttributes(Attrs);
2145   CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
2146 
2147   // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
2148   // optimizer it can aggressively ignore unwind edges.
2149   if (CGM.getLangOpts().ObjCAutoRefCount)
2150     AddObjCARCExceptionMetadata(CS.getInstruction());
2151 
2152   // If the call doesn't return, finish the basic block and clear the
2153   // insertion point; this allows the rest of IRgen to discard
2154   // unreachable code.
2155   if (CS.doesNotReturn()) {
2156     Builder.CreateUnreachable();
2157     Builder.ClearInsertionPoint();
2158 
2159     // FIXME: For now, emit a dummy basic block because expr emitters in
2160     // generally are not ready to handle emitting expressions at unreachable
2161     // points.
2162     EnsureInsertPoint();
2163 
2164     // Return a reasonable RValue.
2165     return GetUndefRValue(RetTy);
2166   }
2167 
2168   llvm::Instruction *CI = CS.getInstruction();
2169   if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
2170     CI->setName("call");
2171 
2172   // Emit any writebacks immediately.  Arguably this should happen
2173   // after any return-value munging.
2174   if (CallArgs.hasWritebacks())
2175     emitWritebacks(*this, CallArgs);
2176 
2177   switch (RetAI.getKind()) {
2178   case ABIArgInfo::Indirect: {
2179     unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
2180     if (RetTy->isAnyComplexType())
2181       return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
2182     if (CodeGenFunction::hasAggregateLLVMType(RetTy))
2183       return RValue::getAggregate(Args[0]);
2184     return RValue::get(EmitLoadOfScalar(Args[0], false, Alignment, RetTy));
2185   }
2186 
2187   case ABIArgInfo::Ignore:
2188     // If we are ignoring an argument that had a result, make sure to
2189     // construct the appropriate return value for our caller.
2190     return GetUndefRValue(RetTy);
2191 
2192   case ABIArgInfo::Extend:
2193   case ABIArgInfo::Direct: {
2194     llvm::Type *RetIRTy = ConvertType(RetTy);
2195     if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
2196       if (RetTy->isAnyComplexType()) {
2197         llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
2198         llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
2199         return RValue::getComplex(std::make_pair(Real, Imag));
2200       }
2201       if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
2202         llvm::Value *DestPtr = ReturnValue.getValue();
2203         bool DestIsVolatile = ReturnValue.isVolatile();
2204 
2205         if (!DestPtr) {
2206           DestPtr = CreateMemTemp(RetTy, "agg.tmp");
2207           DestIsVolatile = false;
2208         }
2209         BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false);
2210         return RValue::getAggregate(DestPtr);
2211       }
2212 
2213       // If the argument doesn't match, perform a bitcast to coerce it.  This
2214       // can happen due to trivial type mismatches.
2215       llvm::Value *V = CI;
2216       if (V->getType() != RetIRTy)
2217         V = Builder.CreateBitCast(V, RetIRTy);
2218       return RValue::get(V);
2219     }
2220 
2221     llvm::Value *DestPtr = ReturnValue.getValue();
2222     bool DestIsVolatile = ReturnValue.isVolatile();
2223 
2224     if (!DestPtr) {
2225       DestPtr = CreateMemTemp(RetTy, "coerce");
2226       DestIsVolatile = false;
2227     }
2228 
2229     // If the value is offset in memory, apply the offset now.
2230     llvm::Value *StorePtr = DestPtr;
2231     if (unsigned Offs = RetAI.getDirectOffset()) {
2232       StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
2233       StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs);
2234       StorePtr = Builder.CreateBitCast(StorePtr,
2235                          llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
2236     }
2237     CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
2238 
2239     unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
2240     if (RetTy->isAnyComplexType())
2241       return RValue::getComplex(LoadComplexFromAddr(DestPtr, false));
2242     if (CodeGenFunction::hasAggregateLLVMType(RetTy))
2243       return RValue::getAggregate(DestPtr);
2244     return RValue::get(EmitLoadOfScalar(DestPtr, false, Alignment, RetTy));
2245   }
2246 
2247   case ABIArgInfo::Expand:
2248     llvm_unreachable("Invalid ABI kind for return argument");
2249   }
2250 
2251   llvm_unreachable("Unhandled ABIArgInfo::Kind");
2252 }
2253 
2254 /* VarArg handling */
2255 
EmitVAArg(llvm::Value * VAListAddr,QualType Ty)2256 llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
2257   return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
2258 }
2259