• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===--- CGCall.cpp - Encapsulate calling convention details ----*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "CGCall.h"
16 #include "CGCXXABI.h"
17 #include "ABIInfo.h"
18 #include "CodeGenFunction.h"
19 #include "CodeGenModule.h"
20 #include "clang/Basic/TargetInfo.h"
21 #include "clang/AST/Decl.h"
22 #include "clang/AST/DeclCXX.h"
23 #include "clang/AST/DeclObjC.h"
24 #include "clang/Frontend/CodeGenOptions.h"
25 #include "llvm/Attributes.h"
26 #include "llvm/Support/CallSite.h"
27 #include "llvm/Target/TargetData.h"
28 #include "llvm/InlineAsm.h"
29 #include "llvm/Transforms/Utils/Local.h"
30 using namespace clang;
31 using namespace CodeGen;
32 
33 /***/
34 
ClangCallConvToLLVMCallConv(CallingConv CC)35 static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
36   switch (CC) {
37   default: return llvm::CallingConv::C;
38   case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
39   case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
40   case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
41   case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
42   case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
43   // TODO: add support for CC_X86Pascal to llvm
44   }
45 }
46 
47 /// Derives the 'this' type for codegen purposes, i.e. ignoring method
48 /// qualification.
49 /// FIXME: address space qualification?
GetThisType(ASTContext & Context,const CXXRecordDecl * RD)50 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
51   QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
52   return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
53 }
54 
55 /// Returns the canonical formal type of the given C++ method.
GetFormalType(const CXXMethodDecl * MD)56 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
57   return MD->getType()->getCanonicalTypeUnqualified()
58            .getAs<FunctionProtoType>();
59 }
60 
61 /// Returns the "extra-canonicalized" return type, which discards
62 /// qualifiers on the return type.  Codegen doesn't care about them,
63 /// and it makes ABI code a little easier to be able to assume that
64 /// all parameter and return types are top-level unqualified.
GetReturnType(QualType RetTy)65 static CanQualType GetReturnType(QualType RetTy) {
66   return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
67 }
68 
69 const CGFunctionInfo &
getFunctionInfo(CanQual<FunctionNoProtoType> FTNP)70 CodeGenTypes::getFunctionInfo(CanQual<FunctionNoProtoType> FTNP) {
71   return getFunctionInfo(FTNP->getResultType().getUnqualifiedType(),
72                          llvm::SmallVector<CanQualType, 16>(),
73                          FTNP->getExtInfo());
74 }
75 
76 /// \param Args - contains any initial parameters besides those
77 ///   in the formal type
getFunctionInfo(CodeGenTypes & CGT,llvm::SmallVectorImpl<CanQualType> & ArgTys,CanQual<FunctionProtoType> FTP)78 static const CGFunctionInfo &getFunctionInfo(CodeGenTypes &CGT,
79                                   llvm::SmallVectorImpl<CanQualType> &ArgTys,
80                                              CanQual<FunctionProtoType> FTP) {
81   // FIXME: Kill copy.
82   for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
83     ArgTys.push_back(FTP->getArgType(i));
84   CanQualType ResTy = FTP->getResultType().getUnqualifiedType();
85   return CGT.getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo());
86 }
87 
88 const CGFunctionInfo &
getFunctionInfo(CanQual<FunctionProtoType> FTP)89 CodeGenTypes::getFunctionInfo(CanQual<FunctionProtoType> FTP) {
90   llvm::SmallVector<CanQualType, 16> ArgTys;
91   return ::getFunctionInfo(*this, ArgTys, FTP);
92 }
93 
getCallingConventionForDecl(const Decl * D)94 static CallingConv getCallingConventionForDecl(const Decl *D) {
95   // Set the appropriate calling convention for the Function.
96   if (D->hasAttr<StdCallAttr>())
97     return CC_X86StdCall;
98 
99   if (D->hasAttr<FastCallAttr>())
100     return CC_X86FastCall;
101 
102   if (D->hasAttr<ThisCallAttr>())
103     return CC_X86ThisCall;
104 
105   if (D->hasAttr<PascalAttr>())
106     return CC_X86Pascal;
107 
108   if (PcsAttr *PCS = D->getAttr<PcsAttr>())
109     return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
110 
111   return CC_C;
112 }
113 
getFunctionInfo(const CXXRecordDecl * RD,const FunctionProtoType * FTP)114 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXRecordDecl *RD,
115                                                  const FunctionProtoType *FTP) {
116   llvm::SmallVector<CanQualType, 16> ArgTys;
117 
118   // Add the 'this' pointer.
119   ArgTys.push_back(GetThisType(Context, RD));
120 
121   return ::getFunctionInfo(*this, ArgTys,
122               FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
123 }
124 
getFunctionInfo(const CXXMethodDecl * MD)125 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
126   llvm::SmallVector<CanQualType, 16> ArgTys;
127 
128   assert(!isa<CXXConstructorDecl>(MD) && "wrong method for contructors!");
129   assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
130 
131   // Add the 'this' pointer unless this is a static method.
132   if (MD->isInstance())
133     ArgTys.push_back(GetThisType(Context, MD->getParent()));
134 
135   return ::getFunctionInfo(*this, ArgTys, GetFormalType(MD));
136 }
137 
getFunctionInfo(const CXXConstructorDecl * D,CXXCtorType Type)138 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXConstructorDecl *D,
139                                                     CXXCtorType Type) {
140   llvm::SmallVector<CanQualType, 16> ArgTys;
141   ArgTys.push_back(GetThisType(Context, D->getParent()));
142   CanQualType ResTy = Context.VoidTy;
143 
144   TheCXXABI.BuildConstructorSignature(D, Type, ResTy, ArgTys);
145 
146   CanQual<FunctionProtoType> FTP = GetFormalType(D);
147 
148   // Add the formal parameters.
149   for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
150     ArgTys.push_back(FTP->getArgType(i));
151 
152   return getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo());
153 }
154 
getFunctionInfo(const CXXDestructorDecl * D,CXXDtorType Type)155 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXDestructorDecl *D,
156                                                     CXXDtorType Type) {
157   llvm::SmallVector<CanQualType, 2> ArgTys;
158   ArgTys.push_back(GetThisType(Context, D->getParent()));
159   CanQualType ResTy = Context.VoidTy;
160 
161   TheCXXABI.BuildDestructorSignature(D, Type, ResTy, ArgTys);
162 
163   CanQual<FunctionProtoType> FTP = GetFormalType(D);
164   assert(FTP->getNumArgs() == 0 && "dtor with formal parameters");
165 
166   return getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo());
167 }
168 
getFunctionInfo(const FunctionDecl * FD)169 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
170   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
171     if (MD->isInstance())
172       return getFunctionInfo(MD);
173 
174   CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
175   assert(isa<FunctionType>(FTy));
176   if (isa<FunctionNoProtoType>(FTy))
177     return getFunctionInfo(FTy.getAs<FunctionNoProtoType>());
178   assert(isa<FunctionProtoType>(FTy));
179   return getFunctionInfo(FTy.getAs<FunctionProtoType>());
180 }
181 
getFunctionInfo(const ObjCMethodDecl * MD)182 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
183   llvm::SmallVector<CanQualType, 16> ArgTys;
184   ArgTys.push_back(Context.getCanonicalParamType(MD->getSelfDecl()->getType()));
185   ArgTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
186   // FIXME: Kill copy?
187   for (ObjCMethodDecl::param_iterator i = MD->param_begin(),
188          e = MD->param_end(); i != e; ++i) {
189     ArgTys.push_back(Context.getCanonicalParamType((*i)->getType()));
190   }
191 
192   FunctionType::ExtInfo einfo;
193   einfo = einfo.withCallingConv(getCallingConventionForDecl(MD));
194 
195   if (getContext().getLangOptions().ObjCAutoRefCount &&
196       MD->hasAttr<NSReturnsRetainedAttr>())
197     einfo = einfo.withProducesResult(true);
198 
199   return getFunctionInfo(GetReturnType(MD->getResultType()), ArgTys, einfo);
200 }
201 
getFunctionInfo(GlobalDecl GD)202 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(GlobalDecl GD) {
203   // FIXME: Do we need to handle ObjCMethodDecl?
204   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
205 
206   if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
207     return getFunctionInfo(CD, GD.getCtorType());
208 
209   if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
210     return getFunctionInfo(DD, GD.getDtorType());
211 
212   return getFunctionInfo(FD);
213 }
214 
getFunctionInfo(QualType ResTy,const CallArgList & Args,const FunctionType::ExtInfo & Info)215 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
216                                                     const CallArgList &Args,
217                                             const FunctionType::ExtInfo &Info) {
218   // FIXME: Kill copy.
219   llvm::SmallVector<CanQualType, 16> ArgTys;
220   for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
221        i != e; ++i)
222     ArgTys.push_back(Context.getCanonicalParamType(i->Ty));
223   return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
224 }
225 
getFunctionInfo(QualType ResTy,const FunctionArgList & Args,const FunctionType::ExtInfo & Info)226 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
227                                                     const FunctionArgList &Args,
228                                             const FunctionType::ExtInfo &Info) {
229   // FIXME: Kill copy.
230   llvm::SmallVector<CanQualType, 16> ArgTys;
231   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
232        i != e; ++i)
233     ArgTys.push_back(Context.getCanonicalParamType((*i)->getType()));
234   return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
235 }
236 
getNullaryFunctionInfo()237 const CGFunctionInfo &CodeGenTypes::getNullaryFunctionInfo() {
238   llvm::SmallVector<CanQualType, 1> args;
239   return getFunctionInfo(getContext().VoidTy, args, FunctionType::ExtInfo());
240 }
241 
getFunctionInfo(CanQualType ResTy,const llvm::SmallVectorImpl<CanQualType> & ArgTys,const FunctionType::ExtInfo & Info)242 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
243                            const llvm::SmallVectorImpl<CanQualType> &ArgTys,
244                                             const FunctionType::ExtInfo &Info) {
245 #ifndef NDEBUG
246   for (llvm::SmallVectorImpl<CanQualType>::const_iterator
247          I = ArgTys.begin(), E = ArgTys.end(); I != E; ++I)
248     assert(I->isCanonicalAsParam());
249 #endif
250 
251   unsigned CC = ClangCallConvToLLVMCallConv(Info.getCC());
252 
253   // Lookup or create unique function info.
254   llvm::FoldingSetNodeID ID;
255   CGFunctionInfo::Profile(ID, Info, ResTy, ArgTys.begin(), ArgTys.end());
256 
257   void *InsertPos = 0;
258   CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos);
259   if (FI)
260     return *FI;
261 
262   // Construct the function info.
263   FI = new CGFunctionInfo(CC, Info.getNoReturn(), Info.getProducesResult(),
264                           Info.getHasRegParm(), Info.getRegParm(), ResTy,
265                           ArgTys.data(), ArgTys.size());
266   FunctionInfos.InsertNode(FI, InsertPos);
267 
268   bool Inserted = FunctionsBeingProcessed.insert(FI); (void)Inserted;
269   assert(Inserted && "Recursively being processed?");
270 
271   // Compute ABI information.
272   getABIInfo().computeInfo(*FI);
273 
274   // Loop over all of the computed argument and return value info.  If any of
275   // them are direct or extend without a specified coerce type, specify the
276   // default now.
277   ABIArgInfo &RetInfo = FI->getReturnInfo();
278   if (RetInfo.canHaveCoerceToType() && RetInfo.getCoerceToType() == 0)
279     RetInfo.setCoerceToType(ConvertType(FI->getReturnType()));
280 
281   for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end();
282        I != E; ++I)
283     if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0)
284       I->info.setCoerceToType(ConvertType(I->type));
285 
286   bool Erased = FunctionsBeingProcessed.erase(FI); (void)Erased;
287   assert(Erased && "Not in set?");
288 
289   return *FI;
290 }
291 
CGFunctionInfo(unsigned _CallingConvention,bool _NoReturn,bool returnsRetained,bool _HasRegParm,unsigned _RegParm,CanQualType ResTy,const CanQualType * ArgTys,unsigned NumArgTys)292 CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention,
293                                bool _NoReturn, bool returnsRetained,
294                                bool _HasRegParm, unsigned _RegParm,
295                                CanQualType ResTy,
296                                const CanQualType *ArgTys,
297                                unsigned NumArgTys)
298   : CallingConvention(_CallingConvention),
299     EffectiveCallingConvention(_CallingConvention),
300     NoReturn(_NoReturn), ReturnsRetained(returnsRetained),
301     HasRegParm(_HasRegParm), RegParm(_RegParm)
302 {
303   NumArgs = NumArgTys;
304 
305   // FIXME: Coallocate with the CGFunctionInfo object.
306   Args = new ArgInfo[1 + NumArgTys];
307   Args[0].type = ResTy;
308   for (unsigned i = 0; i != NumArgTys; ++i)
309     Args[1 + i].type = ArgTys[i];
310 }
311 
312 /***/
313 
GetExpandedTypes(QualType type,llvm::SmallVectorImpl<llvm::Type * > & expandedTypes)314 void CodeGenTypes::GetExpandedTypes(QualType type,
315                      llvm::SmallVectorImpl<llvm::Type*> &expandedTypes) {
316   const RecordType *RT = type->getAsStructureType();
317   assert(RT && "Can only expand structure types.");
318   const RecordDecl *RD = RT->getDecl();
319   assert(!RD->hasFlexibleArrayMember() &&
320          "Cannot expand structure with flexible array.");
321 
322   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
323          i != e; ++i) {
324     const FieldDecl *FD = *i;
325     assert(!FD->isBitField() &&
326            "Cannot expand structure with bit-field members.");
327 
328     QualType fieldType = FD->getType();
329     if (fieldType->isRecordType())
330       GetExpandedTypes(fieldType, expandedTypes);
331     else
332       expandedTypes.push_back(ConvertType(fieldType));
333   }
334 }
335 
336 llvm::Function::arg_iterator
ExpandTypeFromArgs(QualType Ty,LValue LV,llvm::Function::arg_iterator AI)337 CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
338                                     llvm::Function::arg_iterator AI) {
339   const RecordType *RT = Ty->getAsStructureType();
340   assert(RT && "Can only expand structure types.");
341 
342   RecordDecl *RD = RT->getDecl();
343   assert(LV.isSimple() &&
344          "Unexpected non-simple lvalue during struct expansion.");
345   llvm::Value *Addr = LV.getAddress();
346   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
347          i != e; ++i) {
348     FieldDecl *FD = *i;
349     QualType FT = FD->getType();
350 
351     // FIXME: What are the right qualifiers here?
352     LValue LV = EmitLValueForField(Addr, FD, 0);
353     if (CodeGenFunction::hasAggregateLLVMType(FT)) {
354       AI = ExpandTypeFromArgs(FT, LV, AI);
355     } else {
356       EmitStoreThroughLValue(RValue::get(AI), LV);
357       ++AI;
358     }
359   }
360 
361   return AI;
362 }
363 
364 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
365 /// accessing some number of bytes out of it, try to gep into the struct to get
366 /// at its inner goodness.  Dive as deep as possible without entering an element
367 /// with an in-memory size smaller than DstSize.
368 static llvm::Value *
EnterStructPointerForCoercedAccess(llvm::Value * SrcPtr,llvm::StructType * SrcSTy,uint64_t DstSize,CodeGenFunction & CGF)369 EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
370                                    llvm::StructType *SrcSTy,
371                                    uint64_t DstSize, CodeGenFunction &CGF) {
372   // We can't dive into a zero-element struct.
373   if (SrcSTy->getNumElements() == 0) return SrcPtr;
374 
375   llvm::Type *FirstElt = SrcSTy->getElementType(0);
376 
377   // If the first elt is at least as large as what we're looking for, or if the
378   // first element is the same size as the whole struct, we can enter it.
379   uint64_t FirstEltSize =
380     CGF.CGM.getTargetData().getTypeAllocSize(FirstElt);
381   if (FirstEltSize < DstSize &&
382       FirstEltSize < CGF.CGM.getTargetData().getTypeAllocSize(SrcSTy))
383     return SrcPtr;
384 
385   // GEP into the first element.
386   SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive");
387 
388   // If the first element is a struct, recurse.
389   llvm::Type *SrcTy =
390     cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
391   if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
392     return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
393 
394   return SrcPtr;
395 }
396 
397 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
398 /// are either integers or pointers.  This does a truncation of the value if it
399 /// is too large or a zero extension if it is too small.
CoerceIntOrPtrToIntOrPtr(llvm::Value * Val,llvm::Type * Ty,CodeGenFunction & CGF)400 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
401                                              llvm::Type *Ty,
402                                              CodeGenFunction &CGF) {
403   if (Val->getType() == Ty)
404     return Val;
405 
406   if (isa<llvm::PointerType>(Val->getType())) {
407     // If this is Pointer->Pointer avoid conversion to and from int.
408     if (isa<llvm::PointerType>(Ty))
409       return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
410 
411     // Convert the pointer to an integer so we can play with its width.
412     Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
413   }
414 
415   llvm::Type *DestIntTy = Ty;
416   if (isa<llvm::PointerType>(DestIntTy))
417     DestIntTy = CGF.IntPtrTy;
418 
419   if (Val->getType() != DestIntTy)
420     Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
421 
422   if (isa<llvm::PointerType>(Ty))
423     Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
424   return Val;
425 }
426 
427 
428 
429 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
430 /// a pointer to an object of type \arg Ty.
431 ///
432 /// This safely handles the case when the src type is smaller than the
433 /// destination type; in this situation the values of bits which not
434 /// present in the src are undefined.
CreateCoercedLoad(llvm::Value * SrcPtr,llvm::Type * Ty,CodeGenFunction & CGF)435 static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
436                                       llvm::Type *Ty,
437                                       CodeGenFunction &CGF) {
438   llvm::Type *SrcTy =
439     cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
440 
441   // If SrcTy and Ty are the same, just do a load.
442   if (SrcTy == Ty)
443     return CGF.Builder.CreateLoad(SrcPtr);
444 
445   uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
446 
447   if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
448     SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
449     SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
450   }
451 
452   uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
453 
454   // If the source and destination are integer or pointer types, just do an
455   // extension or truncation to the desired type.
456   if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
457       (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
458     llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr);
459     return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
460   }
461 
462   // If load is legal, just bitcast the src pointer.
463   if (SrcSize >= DstSize) {
464     // Generally SrcSize is never greater than DstSize, since this means we are
465     // losing bits. However, this can happen in cases where the structure has
466     // additional padding, for example due to a user specified alignment.
467     //
468     // FIXME: Assert that we aren't truncating non-padding bits when have access
469     // to that information.
470     llvm::Value *Casted =
471       CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
472     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
473     // FIXME: Use better alignment / avoid requiring aligned load.
474     Load->setAlignment(1);
475     return Load;
476   }
477 
478   // Otherwise do coercion through memory. This is stupid, but
479   // simple.
480   llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
481   llvm::Value *Casted =
482     CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
483   llvm::StoreInst *Store =
484     CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
485   // FIXME: Use better alignment / avoid requiring aligned store.
486   Store->setAlignment(1);
487   return CGF.Builder.CreateLoad(Tmp);
488 }
489 
490 // Function to store a first-class aggregate into memory.  We prefer to
491 // store the elements rather than the aggregate to be more friendly to
492 // fast-isel.
493 // FIXME: Do we need to recurse here?
BuildAggStore(CodeGenFunction & CGF,llvm::Value * Val,llvm::Value * DestPtr,bool DestIsVolatile,bool LowAlignment)494 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
495                           llvm::Value *DestPtr, bool DestIsVolatile,
496                           bool LowAlignment) {
497   // Prefer scalar stores to first-class aggregate stores.
498   if (llvm::StructType *STy =
499         dyn_cast<llvm::StructType>(Val->getType())) {
500     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
501       llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(DestPtr, 0, i);
502       llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
503       llvm::StoreInst *SI = CGF.Builder.CreateStore(Elt, EltPtr,
504                                                     DestIsVolatile);
505       if (LowAlignment)
506         SI->setAlignment(1);
507     }
508   } else {
509     CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile);
510   }
511 }
512 
513 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
514 /// where the source and destination may have different types.
515 ///
516 /// This safely handles the case when the src type is larger than the
517 /// destination type; the upper bits of the src will be lost.
CreateCoercedStore(llvm::Value * Src,llvm::Value * DstPtr,bool DstIsVolatile,CodeGenFunction & CGF)518 static void CreateCoercedStore(llvm::Value *Src,
519                                llvm::Value *DstPtr,
520                                bool DstIsVolatile,
521                                CodeGenFunction &CGF) {
522   llvm::Type *SrcTy = Src->getType();
523   llvm::Type *DstTy =
524     cast<llvm::PointerType>(DstPtr->getType())->getElementType();
525   if (SrcTy == DstTy) {
526     CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
527     return;
528   }
529 
530   uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
531 
532   if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
533     DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
534     DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
535   }
536 
537   // If the source and destination are integer or pointer types, just do an
538   // extension or truncation to the desired type.
539   if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
540       (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
541     Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
542     CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
543     return;
544   }
545 
546   uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
547 
548   // If store is legal, just bitcast the src pointer.
549   if (SrcSize <= DstSize) {
550     llvm::Value *Casted =
551       CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
552     // FIXME: Use better alignment / avoid requiring aligned store.
553     BuildAggStore(CGF, Src, Casted, DstIsVolatile, true);
554   } else {
555     // Otherwise do coercion through memory. This is stupid, but
556     // simple.
557 
558     // Generally SrcSize is never greater than DstSize, since this means we are
559     // losing bits. However, this can happen in cases where the structure has
560     // additional padding, for example due to a user specified alignment.
561     //
562     // FIXME: Assert that we aren't truncating non-padding bits when have access
563     // to that information.
564     llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
565     CGF.Builder.CreateStore(Src, Tmp);
566     llvm::Value *Casted =
567       CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
568     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
569     // FIXME: Use better alignment / avoid requiring aligned load.
570     Load->setAlignment(1);
571     CGF.Builder.CreateStore(Load, DstPtr, DstIsVolatile);
572   }
573 }
574 
575 /***/
576 
ReturnTypeUsesSRet(const CGFunctionInfo & FI)577 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
578   return FI.getReturnInfo().isIndirect();
579 }
580 
ReturnTypeUsesFPRet(QualType ResultType)581 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
582   if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
583     switch (BT->getKind()) {
584     default:
585       return false;
586     case BuiltinType::Float:
587       return getContext().Target.useObjCFPRetForRealType(TargetInfo::Float);
588     case BuiltinType::Double:
589       return getContext().Target.useObjCFPRetForRealType(TargetInfo::Double);
590     case BuiltinType::LongDouble:
591       return getContext().Target.useObjCFPRetForRealType(
592         TargetInfo::LongDouble);
593     }
594   }
595 
596   return false;
597 }
598 
GetFunctionType(GlobalDecl GD)599 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
600   const CGFunctionInfo &FI = getFunctionInfo(GD);
601 
602   // For definition purposes, don't consider a K&R function variadic.
603   bool Variadic = false;
604   if (const FunctionProtoType *FPT =
605         cast<FunctionDecl>(GD.getDecl())->getType()->getAs<FunctionProtoType>())
606     Variadic = FPT->isVariadic();
607 
608   return GetFunctionType(FI, Variadic);
609 }
610 
611 llvm::FunctionType *
GetFunctionType(const CGFunctionInfo & FI,bool isVariadic)612 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic) {
613 
614   bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted;
615   assert(Inserted && "Recursively being processed?");
616 
617   llvm::SmallVector<llvm::Type*, 8> argTypes;
618   llvm::Type *resultType = 0;
619 
620   const ABIArgInfo &retAI = FI.getReturnInfo();
621   switch (retAI.getKind()) {
622   case ABIArgInfo::Expand:
623     llvm_unreachable("Invalid ABI kind for return argument");
624 
625   case ABIArgInfo::Extend:
626   case ABIArgInfo::Direct:
627     resultType = retAI.getCoerceToType();
628     break;
629 
630   case ABIArgInfo::Indirect: {
631     assert(!retAI.getIndirectAlign() && "Align unused on indirect return.");
632     resultType = llvm::Type::getVoidTy(getLLVMContext());
633 
634     QualType ret = FI.getReturnType();
635     llvm::Type *ty = ConvertType(ret);
636     unsigned addressSpace = Context.getTargetAddressSpace(ret);
637     argTypes.push_back(llvm::PointerType::get(ty, addressSpace));
638     break;
639   }
640 
641   case ABIArgInfo::Ignore:
642     resultType = llvm::Type::getVoidTy(getLLVMContext());
643     break;
644   }
645 
646   for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
647          ie = FI.arg_end(); it != ie; ++it) {
648     const ABIArgInfo &argAI = it->info;
649 
650     switch (argAI.getKind()) {
651     case ABIArgInfo::Ignore:
652       break;
653 
654     case ABIArgInfo::Indirect: {
655       // indirect arguments are always on the stack, which is addr space #0.
656       llvm::Type *LTy = ConvertTypeForMem(it->type);
657       argTypes.push_back(LTy->getPointerTo());
658       break;
659     }
660 
661     case ABIArgInfo::Extend:
662     case ABIArgInfo::Direct: {
663       // If the coerce-to type is a first class aggregate, flatten it.  Either
664       // way is semantically identical, but fast-isel and the optimizer
665       // generally likes scalar values better than FCAs.
666       llvm::Type *argType = argAI.getCoerceToType();
667       if (llvm::StructType *st = dyn_cast<llvm::StructType>(argType)) {
668         for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
669           argTypes.push_back(st->getElementType(i));
670       } else {
671         argTypes.push_back(argType);
672       }
673       break;
674     }
675 
676     case ABIArgInfo::Expand:
677       GetExpandedTypes(it->type, argTypes);
678       break;
679     }
680   }
681 
682   bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
683   assert(Erased && "Not in set?");
684 
685   return llvm::FunctionType::get(resultType, argTypes, isVariadic);
686 }
687 
GetFunctionTypeForVTable(GlobalDecl GD)688 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
689   const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
690   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
691 
692   if (!isFuncTypeConvertible(FPT))
693     return llvm::StructType::get(getLLVMContext());
694 
695   const CGFunctionInfo *Info;
696   if (isa<CXXDestructorDecl>(MD))
697     Info = &getFunctionInfo(cast<CXXDestructorDecl>(MD), GD.getDtorType());
698   else
699     Info = &getFunctionInfo(MD);
700   return GetFunctionType(*Info, FPT->isVariadic());
701 }
702 
ConstructAttributeList(const CGFunctionInfo & FI,const Decl * TargetDecl,AttributeListType & PAL,unsigned & CallingConv)703 void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
704                                            const Decl *TargetDecl,
705                                            AttributeListType &PAL,
706                                            unsigned &CallingConv) {
707   unsigned FuncAttrs = 0;
708   unsigned RetAttrs = 0;
709 
710   CallingConv = FI.getEffectiveCallingConvention();
711 
712   if (FI.isNoReturn())
713     FuncAttrs |= llvm::Attribute::NoReturn;
714 
715   // FIXME: handle sseregparm someday...
716   if (TargetDecl) {
717     if (TargetDecl->hasAttr<NoThrowAttr>())
718       FuncAttrs |= llvm::Attribute::NoUnwind;
719     else if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
720       const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>();
721       if (FPT && FPT->isNothrow(getContext()))
722         FuncAttrs |= llvm::Attribute::NoUnwind;
723     }
724 
725     if (TargetDecl->hasAttr<NoReturnAttr>())
726       FuncAttrs |= llvm::Attribute::NoReturn;
727     if (TargetDecl->hasAttr<ConstAttr>())
728       FuncAttrs |= llvm::Attribute::ReadNone;
729     else if (TargetDecl->hasAttr<PureAttr>())
730       FuncAttrs |= llvm::Attribute::ReadOnly;
731     if (TargetDecl->hasAttr<MallocAttr>())
732       RetAttrs |= llvm::Attribute::NoAlias;
733   }
734 
735   if (CodeGenOpts.OptimizeSize)
736     FuncAttrs |= llvm::Attribute::OptimizeForSize;
737   if (CodeGenOpts.DisableRedZone)
738     FuncAttrs |= llvm::Attribute::NoRedZone;
739   if (CodeGenOpts.NoImplicitFloat)
740     FuncAttrs |= llvm::Attribute::NoImplicitFloat;
741 
742   QualType RetTy = FI.getReturnType();
743   unsigned Index = 1;
744   const ABIArgInfo &RetAI = FI.getReturnInfo();
745   switch (RetAI.getKind()) {
746   case ABIArgInfo::Extend:
747    if (RetTy->hasSignedIntegerRepresentation())
748      RetAttrs |= llvm::Attribute::SExt;
749    else if (RetTy->hasUnsignedIntegerRepresentation())
750      RetAttrs |= llvm::Attribute::ZExt;
751     break;
752   case ABIArgInfo::Direct:
753   case ABIArgInfo::Ignore:
754     break;
755 
756   case ABIArgInfo::Indirect:
757     PAL.push_back(llvm::AttributeWithIndex::get(Index,
758                                                 llvm::Attribute::StructRet));
759     ++Index;
760     // sret disables readnone and readonly
761     FuncAttrs &= ~(llvm::Attribute::ReadOnly |
762                    llvm::Attribute::ReadNone);
763     break;
764 
765   case ABIArgInfo::Expand:
766     assert(0 && "Invalid ABI kind for return argument");
767   }
768 
769   if (RetAttrs)
770     PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
771 
772   // FIXME: RegParm should be reduced in case of global register variable.
773   signed RegParm;
774   if (FI.getHasRegParm())
775     RegParm = FI.getRegParm();
776   else
777     RegParm = CodeGenOpts.NumRegisterParameters;
778 
779   unsigned PointerWidth = getContext().Target.getPointerWidth(0);
780   for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
781          ie = FI.arg_end(); it != ie; ++it) {
782     QualType ParamType = it->type;
783     const ABIArgInfo &AI = it->info;
784     unsigned Attributes = 0;
785 
786     // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
787     // have the corresponding parameter variable.  It doesn't make
788     // sense to do it here because parameters are so messed up.
789     switch (AI.getKind()) {
790     case ABIArgInfo::Extend:
791       if (ParamType->isSignedIntegerOrEnumerationType())
792         Attributes |= llvm::Attribute::SExt;
793       else if (ParamType->isUnsignedIntegerOrEnumerationType())
794         Attributes |= llvm::Attribute::ZExt;
795       // FALL THROUGH
796     case ABIArgInfo::Direct:
797       if (RegParm > 0 &&
798           (ParamType->isIntegerType() || ParamType->isPointerType())) {
799         RegParm -=
800         (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
801         if (RegParm >= 0)
802           Attributes |= llvm::Attribute::InReg;
803       }
804       // FIXME: handle sseregparm someday...
805 
806       if (llvm::StructType *STy =
807             dyn_cast<llvm::StructType>(AI.getCoerceToType()))
808         Index += STy->getNumElements()-1;  // 1 will be added below.
809       break;
810 
811     case ABIArgInfo::Indirect:
812       if (AI.getIndirectByVal())
813         Attributes |= llvm::Attribute::ByVal;
814 
815       Attributes |=
816         llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
817       // byval disables readnone and readonly.
818       FuncAttrs &= ~(llvm::Attribute::ReadOnly |
819                      llvm::Attribute::ReadNone);
820       break;
821 
822     case ABIArgInfo::Ignore:
823       // Skip increment, no matching LLVM parameter.
824       continue;
825 
826     case ABIArgInfo::Expand: {
827       llvm::SmallVector<llvm::Type*, 8> types;
828       // FIXME: This is rather inefficient. Do we ever actually need to do
829       // anything here? The result should be just reconstructed on the other
830       // side, so extension should be a non-issue.
831       getTypes().GetExpandedTypes(ParamType, types);
832       Index += types.size();
833       continue;
834     }
835     }
836 
837     if (Attributes)
838       PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes));
839     ++Index;
840   }
841   if (FuncAttrs)
842     PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
843 }
844 
845 /// An argument came in as a promoted argument; demote it back to its
846 /// declared type.
emitArgumentDemotion(CodeGenFunction & CGF,const VarDecl * var,llvm::Value * value)847 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
848                                          const VarDecl *var,
849                                          llvm::Value *value) {
850   llvm::Type *varType = CGF.ConvertType(var->getType());
851 
852   // This can happen with promotions that actually don't change the
853   // underlying type, like the enum promotions.
854   if (value->getType() == varType) return value;
855 
856   assert((varType->isIntegerTy() || varType->isFloatingPointTy())
857          && "unexpected promotion type");
858 
859   if (isa<llvm::IntegerType>(varType))
860     return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
861 
862   return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
863 }
864 
EmitFunctionProlog(const CGFunctionInfo & FI,llvm::Function * Fn,const FunctionArgList & Args)865 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
866                                          llvm::Function *Fn,
867                                          const FunctionArgList &Args) {
868   // If this is an implicit-return-zero function, go ahead and
869   // initialize the return value.  TODO: it might be nice to have
870   // a more general mechanism for this that didn't require synthesized
871   // return statements.
872   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
873     if (FD->hasImplicitReturnZero()) {
874       QualType RetTy = FD->getResultType().getUnqualifiedType();
875       llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
876       llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
877       Builder.CreateStore(Zero, ReturnValue);
878     }
879   }
880 
881   // FIXME: We no longer need the types from FunctionArgList; lift up and
882   // simplify.
883 
884   // Emit allocs for param decls.  Give the LLVM Argument nodes names.
885   llvm::Function::arg_iterator AI = Fn->arg_begin();
886 
887   // Name the struct return argument.
888   if (CGM.ReturnTypeUsesSRet(FI)) {
889     AI->setName("agg.result");
890     ++AI;
891   }
892 
893   assert(FI.arg_size() == Args.size() &&
894          "Mismatch between function signature & arguments.");
895   unsigned ArgNo = 1;
896   CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
897   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
898        i != e; ++i, ++info_it, ++ArgNo) {
899     const VarDecl *Arg = *i;
900     QualType Ty = info_it->type;
901     const ABIArgInfo &ArgI = info_it->info;
902 
903     bool isPromoted =
904       isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
905 
906     switch (ArgI.getKind()) {
907     case ABIArgInfo::Indirect: {
908       llvm::Value *V = AI;
909 
910       if (hasAggregateLLVMType(Ty)) {
911         // Aggregates and complex variables are accessed by reference.  All we
912         // need to do is realign the value, if requested
913         if (ArgI.getIndirectRealign()) {
914           llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce");
915 
916           // Copy from the incoming argument pointer to the temporary with the
917           // appropriate alignment.
918           //
919           // FIXME: We should have a common utility for generating an aggregate
920           // copy.
921           llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
922           CharUnits Size = getContext().getTypeSizeInChars(Ty);
923           llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
924           llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy);
925           Builder.CreateMemCpy(Dst,
926                                Src,
927                                llvm::ConstantInt::get(IntPtrTy,
928                                                       Size.getQuantity()),
929                                ArgI.getIndirectAlign(),
930                                false);
931           V = AlignedTemp;
932         }
933       } else {
934         // Load scalar value from indirect argument.
935         CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
936         V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty);
937 
938         if (isPromoted)
939           V = emitArgumentDemotion(*this, Arg, V);
940       }
941       EmitParmDecl(*Arg, V, ArgNo);
942       break;
943     }
944 
945     case ABIArgInfo::Extend:
946     case ABIArgInfo::Direct: {
947       // If we have the trivial case, handle it with no muss and fuss.
948       if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
949           ArgI.getCoerceToType() == ConvertType(Ty) &&
950           ArgI.getDirectOffset() == 0) {
951         assert(AI != Fn->arg_end() && "Argument mismatch!");
952         llvm::Value *V = AI;
953 
954         if (Arg->getType().isRestrictQualified())
955           AI->addAttr(llvm::Attribute::NoAlias);
956 
957         // Ensure the argument is the correct type.
958         if (V->getType() != ArgI.getCoerceToType())
959           V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
960 
961         if (isPromoted)
962           V = emitArgumentDemotion(*this, Arg, V);
963 
964         EmitParmDecl(*Arg, V, ArgNo);
965         break;
966       }
967 
968       llvm::AllocaInst *Alloca = CreateMemTemp(Ty, "coerce");
969 
970       // The alignment we need to use is the max of the requested alignment for
971       // the argument plus the alignment required by our access code below.
972       unsigned AlignmentToUse =
973         CGM.getTargetData().getABITypeAlignment(ArgI.getCoerceToType());
974       AlignmentToUse = std::max(AlignmentToUse,
975                         (unsigned)getContext().getDeclAlign(Arg).getQuantity());
976 
977       Alloca->setAlignment(AlignmentToUse);
978       llvm::Value *V = Alloca;
979       llvm::Value *Ptr = V;    // Pointer to store into.
980 
981       // If the value is offset in memory, apply the offset now.
982       if (unsigned Offs = ArgI.getDirectOffset()) {
983         Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy());
984         Ptr = Builder.CreateConstGEP1_32(Ptr, Offs);
985         Ptr = Builder.CreateBitCast(Ptr,
986                           llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
987       }
988 
989       // If the coerce-to type is a first class aggregate, we flatten it and
990       // pass the elements. Either way is semantically identical, but fast-isel
991       // and the optimizer generally likes scalar values better than FCAs.
992       if (llvm::StructType *STy =
993             dyn_cast<llvm::StructType>(ArgI.getCoerceToType())) {
994         Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
995 
996         for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
997           assert(AI != Fn->arg_end() && "Argument mismatch!");
998           AI->setName(Arg->getName() + ".coerce" + llvm::Twine(i));
999           llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i);
1000           Builder.CreateStore(AI++, EltPtr);
1001         }
1002       } else {
1003         // Simple case, just do a coerced store of the argument into the alloca.
1004         assert(AI != Fn->arg_end() && "Argument mismatch!");
1005         AI->setName(Arg->getName() + ".coerce");
1006         CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this);
1007       }
1008 
1009 
1010       // Match to what EmitParmDecl is expecting for this type.
1011       if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
1012         V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty);
1013         if (isPromoted)
1014           V = emitArgumentDemotion(*this, Arg, V);
1015       }
1016       EmitParmDecl(*Arg, V, ArgNo);
1017       continue;  // Skip ++AI increment, already done.
1018     }
1019 
1020     case ABIArgInfo::Expand: {
1021       // If this structure was expanded into multiple arguments then
1022       // we need to create a temporary and reconstruct it from the
1023       // arguments.
1024       llvm::Value *Temp = CreateMemTemp(Ty, Arg->getName() + ".addr");
1025       llvm::Function::arg_iterator End =
1026         ExpandTypeFromArgs(Ty, MakeAddrLValue(Temp, Ty), AI);
1027       EmitParmDecl(*Arg, Temp, ArgNo);
1028 
1029       // Name the arguments used in expansion and increment AI.
1030       unsigned Index = 0;
1031       for (; AI != End; ++AI, ++Index)
1032         AI->setName(Arg->getName() + "." + llvm::Twine(Index));
1033       continue;
1034     }
1035 
1036     case ABIArgInfo::Ignore:
1037       // Initialize the local variable appropriately.
1038       if (hasAggregateLLVMType(Ty))
1039         EmitParmDecl(*Arg, CreateMemTemp(Ty), ArgNo);
1040       else
1041         EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())),
1042                      ArgNo);
1043 
1044       // Skip increment, no matching LLVM parameter.
1045       continue;
1046     }
1047 
1048     ++AI;
1049   }
1050   assert(AI == Fn->arg_end() && "Argument mismatch!");
1051 }
1052 
1053 /// Try to emit a fused autorelease of a return result.
tryEmitFusedAutoreleaseOfResult(CodeGenFunction & CGF,llvm::Value * result)1054 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
1055                                                     llvm::Value *result) {
1056   // We must be immediately followed the cast.
1057   llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
1058   if (BB->empty()) return 0;
1059   if (&BB->back() != result) return 0;
1060 
1061   llvm::Type *resultType = result->getType();
1062 
1063   // result is in a BasicBlock and is therefore an Instruction.
1064   llvm::Instruction *generator = cast<llvm::Instruction>(result);
1065 
1066   llvm::SmallVector<llvm::Instruction*,4> insnsToKill;
1067 
1068   // Look for:
1069   //  %generator = bitcast %type1* %generator2 to %type2*
1070   while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
1071     // We would have emitted this as a constant if the operand weren't
1072     // an Instruction.
1073     generator = cast<llvm::Instruction>(bitcast->getOperand(0));
1074 
1075     // Require the generator to be immediately followed by the cast.
1076     if (generator->getNextNode() != bitcast)
1077       return 0;
1078 
1079     insnsToKill.push_back(bitcast);
1080   }
1081 
1082   // Look for:
1083   //   %generator = call i8* @objc_retain(i8* %originalResult)
1084   // or
1085   //   %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
1086   llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
1087   if (!call) return 0;
1088 
1089   bool doRetainAutorelease;
1090 
1091   if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) {
1092     doRetainAutorelease = true;
1093   } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints()
1094                                           .objc_retainAutoreleasedReturnValue) {
1095     doRetainAutorelease = false;
1096 
1097     // Look for an inline asm immediately preceding the call and kill it, too.
1098     llvm::Instruction *prev = call->getPrevNode();
1099     if (llvm::CallInst *asmCall = dyn_cast_or_null<llvm::CallInst>(prev))
1100       if (asmCall->getCalledValue()
1101             == CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker)
1102         insnsToKill.push_back(prev);
1103   } else {
1104     return 0;
1105   }
1106 
1107   result = call->getArgOperand(0);
1108   insnsToKill.push_back(call);
1109 
1110   // Keep killing bitcasts, for sanity.  Note that we no longer care
1111   // about precise ordering as long as there's exactly one use.
1112   while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
1113     if (!bitcast->hasOneUse()) break;
1114     insnsToKill.push_back(bitcast);
1115     result = bitcast->getOperand(0);
1116   }
1117 
1118   // Delete all the unnecessary instructions, from latest to earliest.
1119   for (llvm::SmallVectorImpl<llvm::Instruction*>::iterator
1120          i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i)
1121     (*i)->eraseFromParent();
1122 
1123   // Do the fused retain/autorelease if we were asked to.
1124   if (doRetainAutorelease)
1125     result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
1126 
1127   // Cast back to the result type.
1128   return CGF.Builder.CreateBitCast(result, resultType);
1129 }
1130 
1131 /// Emit an ARC autorelease of the result of a function.
emitAutoreleaseOfResult(CodeGenFunction & CGF,llvm::Value * result)1132 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
1133                                             llvm::Value *result) {
1134   // At -O0, try to emit a fused retain/autorelease.
1135   if (CGF.shouldUseFusedARCCalls())
1136     if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
1137       return fused;
1138 
1139   return CGF.EmitARCAutoreleaseReturnValue(result);
1140 }
1141 
EmitFunctionEpilog(const CGFunctionInfo & FI)1142 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
1143   // Functions with no result always return void.
1144   if (ReturnValue == 0) {
1145     Builder.CreateRetVoid();
1146     return;
1147   }
1148 
1149   llvm::DebugLoc RetDbgLoc;
1150   llvm::Value *RV = 0;
1151   QualType RetTy = FI.getReturnType();
1152   const ABIArgInfo &RetAI = FI.getReturnInfo();
1153 
1154   switch (RetAI.getKind()) {
1155   case ABIArgInfo::Indirect: {
1156     unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
1157     if (RetTy->isAnyComplexType()) {
1158       ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
1159       StoreComplexToAddr(RT, CurFn->arg_begin(), false);
1160     } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1161       // Do nothing; aggregrates get evaluated directly into the destination.
1162     } else {
1163       EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
1164                         false, Alignment, RetTy);
1165     }
1166     break;
1167   }
1168 
1169   case ABIArgInfo::Extend:
1170   case ABIArgInfo::Direct:
1171     if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
1172         RetAI.getDirectOffset() == 0) {
1173       // The internal return value temp always will have pointer-to-return-type
1174       // type, just do a load.
1175 
1176       // If the instruction right before the insertion point is a store to the
1177       // return value, we can elide the load, zap the store, and usually zap the
1178       // alloca.
1179       llvm::BasicBlock *InsertBB = Builder.GetInsertBlock();
1180       llvm::StoreInst *SI = 0;
1181       if (InsertBB->empty() ||
1182           !(SI = dyn_cast<llvm::StoreInst>(&InsertBB->back())) ||
1183           SI->getPointerOperand() != ReturnValue || SI->isVolatile()) {
1184         RV = Builder.CreateLoad(ReturnValue);
1185       } else {
1186         // Get the stored value and nuke the now-dead store.
1187         RetDbgLoc = SI->getDebugLoc();
1188         RV = SI->getValueOperand();
1189         SI->eraseFromParent();
1190 
1191         // If that was the only use of the return value, nuke it as well now.
1192         if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
1193           cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
1194           ReturnValue = 0;
1195         }
1196       }
1197     } else {
1198       llvm::Value *V = ReturnValue;
1199       // If the value is offset in memory, apply the offset now.
1200       if (unsigned Offs = RetAI.getDirectOffset()) {
1201         V = Builder.CreateBitCast(V, Builder.getInt8PtrTy());
1202         V = Builder.CreateConstGEP1_32(V, Offs);
1203         V = Builder.CreateBitCast(V,
1204                          llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
1205       }
1206 
1207       RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
1208     }
1209 
1210     // In ARC, end functions that return a retainable type with a call
1211     // to objc_autoreleaseReturnValue.
1212     if (AutoreleaseResult) {
1213       assert(getLangOptions().ObjCAutoRefCount &&
1214              !FI.isReturnsRetained() &&
1215              RetTy->isObjCRetainableType());
1216       RV = emitAutoreleaseOfResult(*this, RV);
1217     }
1218 
1219     break;
1220 
1221   case ABIArgInfo::Ignore:
1222     break;
1223 
1224   case ABIArgInfo::Expand:
1225     assert(0 && "Invalid ABI kind for return argument");
1226   }
1227 
1228   llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid();
1229   if (!RetDbgLoc.isUnknown())
1230     Ret->setDebugLoc(RetDbgLoc);
1231 }
1232 
EmitDelegateCallArg(CallArgList & args,const VarDecl * param)1233 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
1234                                           const VarDecl *param) {
1235   // StartFunction converted the ABI-lowered parameter(s) into a
1236   // local alloca.  We need to turn that into an r-value suitable
1237   // for EmitCall.
1238   llvm::Value *local = GetAddrOfLocalVar(param);
1239 
1240   QualType type = param->getType();
1241 
1242   // For the most part, we just need to load the alloca, except:
1243   // 1) aggregate r-values are actually pointers to temporaries, and
1244   // 2) references to aggregates are pointers directly to the aggregate.
1245   // I don't know why references to non-aggregates are different here.
1246   if (const ReferenceType *ref = type->getAs<ReferenceType>()) {
1247     if (hasAggregateLLVMType(ref->getPointeeType()))
1248       return args.add(RValue::getAggregate(local), type);
1249 
1250     // Locals which are references to scalars are represented
1251     // with allocas holding the pointer.
1252     return args.add(RValue::get(Builder.CreateLoad(local)), type);
1253   }
1254 
1255   if (type->isAnyComplexType()) {
1256     ComplexPairTy complex = LoadComplexFromAddr(local, /*volatile*/ false);
1257     return args.add(RValue::getComplex(complex), type);
1258   }
1259 
1260   if (hasAggregateLLVMType(type))
1261     return args.add(RValue::getAggregate(local), type);
1262 
1263   unsigned alignment = getContext().getDeclAlign(param).getQuantity();
1264   llvm::Value *value = EmitLoadOfScalar(local, false, alignment, type);
1265   return args.add(RValue::get(value), type);
1266 }
1267 
isProvablyNull(llvm::Value * addr)1268 static bool isProvablyNull(llvm::Value *addr) {
1269   return isa<llvm::ConstantPointerNull>(addr);
1270 }
1271 
isProvablyNonNull(llvm::Value * addr)1272 static bool isProvablyNonNull(llvm::Value *addr) {
1273   return isa<llvm::AllocaInst>(addr);
1274 }
1275 
1276 /// Emit the actual writing-back of a writeback.
emitWriteback(CodeGenFunction & CGF,const CallArgList::Writeback & writeback)1277 static void emitWriteback(CodeGenFunction &CGF,
1278                           const CallArgList::Writeback &writeback) {
1279   llvm::Value *srcAddr = writeback.Address;
1280   assert(!isProvablyNull(srcAddr) &&
1281          "shouldn't have writeback for provably null argument");
1282 
1283   llvm::BasicBlock *contBB = 0;
1284 
1285   // If the argument wasn't provably non-null, we need to null check
1286   // before doing the store.
1287   bool provablyNonNull = isProvablyNonNull(srcAddr);
1288   if (!provablyNonNull) {
1289     llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
1290     contBB = CGF.createBasicBlock("icr.done");
1291 
1292     llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
1293     CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
1294     CGF.EmitBlock(writebackBB);
1295   }
1296 
1297   // Load the value to writeback.
1298   llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
1299 
1300   // Cast it back, in case we're writing an id to a Foo* or something.
1301   value = CGF.Builder.CreateBitCast(value,
1302                cast<llvm::PointerType>(srcAddr->getType())->getElementType(),
1303                             "icr.writeback-cast");
1304 
1305   // Perform the writeback.
1306   QualType srcAddrType = writeback.AddressType;
1307   CGF.EmitStoreThroughLValue(RValue::get(value),
1308                              CGF.MakeAddrLValue(srcAddr, srcAddrType));
1309 
1310   // Jump to the continuation block.
1311   if (!provablyNonNull)
1312     CGF.EmitBlock(contBB);
1313 }
1314 
emitWritebacks(CodeGenFunction & CGF,const CallArgList & args)1315 static void emitWritebacks(CodeGenFunction &CGF,
1316                            const CallArgList &args) {
1317   for (CallArgList::writeback_iterator
1318          i = args.writeback_begin(), e = args.writeback_end(); i != e; ++i)
1319     emitWriteback(CGF, *i);
1320 }
1321 
1322 /// Emit an argument that's being passed call-by-writeback.  That is,
1323 /// we are passing the address of
emitWritebackArg(CodeGenFunction & CGF,CallArgList & args,const ObjCIndirectCopyRestoreExpr * CRE)1324 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
1325                              const ObjCIndirectCopyRestoreExpr *CRE) {
1326   llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr());
1327 
1328   // The dest and src types don't necessarily match in LLVM terms
1329   // because of the crazy ObjC compatibility rules.
1330 
1331   llvm::PointerType *destType =
1332     cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
1333 
1334   // If the address is a constant null, just pass the appropriate null.
1335   if (isProvablyNull(srcAddr)) {
1336     args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
1337              CRE->getType());
1338     return;
1339   }
1340 
1341   QualType srcAddrType =
1342     CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
1343 
1344   // Create the temporary.
1345   llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(),
1346                                            "icr.temp");
1347 
1348   // Zero-initialize it if we're not doing a copy-initialization.
1349   bool shouldCopy = CRE->shouldCopy();
1350   if (!shouldCopy) {
1351     llvm::Value *null =
1352       llvm::ConstantPointerNull::get(
1353         cast<llvm::PointerType>(destType->getElementType()));
1354     CGF.Builder.CreateStore(null, temp);
1355   }
1356 
1357   llvm::BasicBlock *contBB = 0;
1358 
1359   // If the address is *not* known to be non-null, we need to switch.
1360   llvm::Value *finalArgument;
1361 
1362   bool provablyNonNull = isProvablyNonNull(srcAddr);
1363   if (provablyNonNull) {
1364     finalArgument = temp;
1365   } else {
1366     llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
1367 
1368     finalArgument = CGF.Builder.CreateSelect(isNull,
1369                                    llvm::ConstantPointerNull::get(destType),
1370                                              temp, "icr.argument");
1371 
1372     // If we need to copy, then the load has to be conditional, which
1373     // means we need control flow.
1374     if (shouldCopy) {
1375       contBB = CGF.createBasicBlock("icr.cont");
1376       llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
1377       CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
1378       CGF.EmitBlock(copyBB);
1379     }
1380   }
1381 
1382   // Perform a copy if necessary.
1383   if (shouldCopy) {
1384     LValue srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
1385     RValue srcRV = CGF.EmitLoadOfLValue(srcLV);
1386     assert(srcRV.isScalar());
1387 
1388     llvm::Value *src = srcRV.getScalarVal();
1389     src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
1390                                     "icr.cast");
1391 
1392     // Use an ordinary store, not a store-to-lvalue.
1393     CGF.Builder.CreateStore(src, temp);
1394   }
1395 
1396   // Finish the control flow if we needed it.
1397   if (shouldCopy && !provablyNonNull)
1398     CGF.EmitBlock(contBB);
1399 
1400   args.addWriteback(srcAddr, srcAddrType, temp);
1401   args.add(RValue::get(finalArgument), CRE->getType());
1402 }
1403 
EmitCallArg(CallArgList & args,const Expr * E,QualType type)1404 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
1405                                   QualType type) {
1406   if (const ObjCIndirectCopyRestoreExpr *CRE
1407         = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
1408     assert(getContext().getLangOptions().ObjCAutoRefCount);
1409     assert(getContext().hasSameType(E->getType(), type));
1410     return emitWritebackArg(*this, args, CRE);
1411   }
1412 
1413   if (type->isReferenceType())
1414     return args.add(EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0),
1415                     type);
1416 
1417   if (hasAggregateLLVMType(type) && !E->getType()->isAnyComplexType() &&
1418       isa<ImplicitCastExpr>(E) &&
1419       cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
1420     LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
1421     assert(L.isSimple());
1422     args.add(RValue::getAggregate(L.getAddress(), L.isVolatileQualified()),
1423              type, /*NeedsCopy*/true);
1424     return;
1425   }
1426 
1427   args.add(EmitAnyExprToTemp(E), type);
1428 }
1429 
1430 /// Emits a call or invoke instruction to the given function, depending
1431 /// on the current state of the EH stack.
1432 llvm::CallSite
EmitCallOrInvoke(llvm::Value * Callee,llvm::ArrayRef<llvm::Value * > Args,const llvm::Twine & Name)1433 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
1434                                   llvm::ArrayRef<llvm::Value *> Args,
1435                                   const llvm::Twine &Name) {
1436   llvm::BasicBlock *InvokeDest = getInvokeDest();
1437   if (!InvokeDest)
1438     return Builder.CreateCall(Callee, Args, Name);
1439 
1440   llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
1441   llvm::InvokeInst *Invoke = Builder.CreateInvoke(Callee, ContBB, InvokeDest,
1442                                                   Args, Name);
1443   EmitBlock(ContBB);
1444   return Invoke;
1445 }
1446 
1447 llvm::CallSite
EmitCallOrInvoke(llvm::Value * Callee,const llvm::Twine & Name)1448 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
1449                                   const llvm::Twine &Name) {
1450   return EmitCallOrInvoke(Callee, llvm::ArrayRef<llvm::Value *>(), Name);
1451 }
1452 
checkArgMatches(llvm::Value * Elt,unsigned & ArgNo,llvm::FunctionType * FTy)1453 static void checkArgMatches(llvm::Value *Elt, unsigned &ArgNo,
1454                             llvm::FunctionType *FTy) {
1455   if (ArgNo < FTy->getNumParams())
1456     assert(Elt->getType() == FTy->getParamType(ArgNo));
1457   else
1458     assert(FTy->isVarArg());
1459   ++ArgNo;
1460 }
1461 
ExpandTypeToArgs(QualType Ty,RValue RV,llvm::SmallVector<llvm::Value *,16> & Args,llvm::FunctionType * IRFuncTy)1462 void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
1463                                        llvm::SmallVector<llvm::Value*,16> &Args,
1464                                        llvm::FunctionType *IRFuncTy) {
1465   const RecordType *RT = Ty->getAsStructureType();
1466   assert(RT && "Can only expand structure types.");
1467 
1468   RecordDecl *RD = RT->getDecl();
1469   assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
1470   llvm::Value *Addr = RV.getAggregateAddr();
1471   for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
1472        i != e; ++i) {
1473     FieldDecl *FD = *i;
1474     QualType FT = FD->getType();
1475 
1476     // FIXME: What are the right qualifiers here?
1477     LValue LV = EmitLValueForField(Addr, FD, 0);
1478     if (CodeGenFunction::hasAggregateLLVMType(FT)) {
1479       ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()),
1480                        Args, IRFuncTy);
1481       continue;
1482     }
1483 
1484     RValue RV = EmitLoadOfLValue(LV);
1485     assert(RV.isScalar() &&
1486            "Unexpected non-scalar rvalue during struct expansion.");
1487 
1488     // Insert a bitcast as needed.
1489     llvm::Value *V = RV.getScalarVal();
1490     if (Args.size() < IRFuncTy->getNumParams() &&
1491         V->getType() != IRFuncTy->getParamType(Args.size()))
1492       V = Builder.CreateBitCast(V, IRFuncTy->getParamType(Args.size()));
1493 
1494     Args.push_back(V);
1495   }
1496 }
1497 
1498 
EmitCall(const CGFunctionInfo & CallInfo,llvm::Value * Callee,ReturnValueSlot ReturnValue,const CallArgList & CallArgs,const Decl * TargetDecl,llvm::Instruction ** callOrInvoke)1499 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
1500                                  llvm::Value *Callee,
1501                                  ReturnValueSlot ReturnValue,
1502                                  const CallArgList &CallArgs,
1503                                  const Decl *TargetDecl,
1504                                  llvm::Instruction **callOrInvoke) {
1505   // FIXME: We no longer need the types from CallArgs; lift up and simplify.
1506   llvm::SmallVector<llvm::Value*, 16> Args;
1507 
1508   // Handle struct-return functions by passing a pointer to the
1509   // location that we would like to return into.
1510   QualType RetTy = CallInfo.getReturnType();
1511   const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
1512 
1513   // IRArgNo - Keep track of the argument number in the callee we're looking at.
1514   unsigned IRArgNo = 0;
1515   llvm::FunctionType *IRFuncTy =
1516     cast<llvm::FunctionType>(
1517                   cast<llvm::PointerType>(Callee->getType())->getElementType());
1518 
1519   // If the call returns a temporary with struct return, create a temporary
1520   // alloca to hold the result, unless one is given to us.
1521   if (CGM.ReturnTypeUsesSRet(CallInfo)) {
1522     llvm::Value *Value = ReturnValue.getValue();
1523     if (!Value)
1524       Value = CreateMemTemp(RetTy);
1525     Args.push_back(Value);
1526     checkArgMatches(Value, IRArgNo, IRFuncTy);
1527   }
1528 
1529   assert(CallInfo.arg_size() == CallArgs.size() &&
1530          "Mismatch between function signature & arguments.");
1531   CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
1532   for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
1533        I != E; ++I, ++info_it) {
1534     const ABIArgInfo &ArgInfo = info_it->info;
1535     RValue RV = I->RV;
1536 
1537     unsigned TypeAlign =
1538       getContext().getTypeAlignInChars(I->Ty).getQuantity();
1539     switch (ArgInfo.getKind()) {
1540     case ABIArgInfo::Indirect: {
1541       if (RV.isScalar() || RV.isComplex()) {
1542         // Make a temporary alloca to pass the argument.
1543         llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
1544         if (ArgInfo.getIndirectAlign() > AI->getAlignment())
1545           AI->setAlignment(ArgInfo.getIndirectAlign());
1546         Args.push_back(AI);
1547 
1548         if (RV.isScalar())
1549           EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false,
1550                             TypeAlign, I->Ty);
1551         else
1552           StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
1553 
1554         // Validate argument match.
1555         checkArgMatches(AI, IRArgNo, IRFuncTy);
1556       } else {
1557         // We want to avoid creating an unnecessary temporary+copy here;
1558         // however, we need one in two cases:
1559         // 1. If the argument is not byval, and we are required to copy the
1560         //    source.  (This case doesn't occur on any common architecture.)
1561         // 2. If the argument is byval, RV is not sufficiently aligned, and
1562         //    we cannot force it to be sufficiently aligned.
1563         llvm::Value *Addr = RV.getAggregateAddr();
1564         unsigned Align = ArgInfo.getIndirectAlign();
1565         const llvm::TargetData *TD = &CGM.getTargetData();
1566         if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
1567             (ArgInfo.getIndirectByVal() && TypeAlign < Align &&
1568              llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align)) {
1569           // Create an aligned temporary, and copy to it.
1570           llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
1571           if (Align > AI->getAlignment())
1572             AI->setAlignment(Align);
1573           Args.push_back(AI);
1574           EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
1575 
1576           // Validate argument match.
1577           checkArgMatches(AI, IRArgNo, IRFuncTy);
1578         } else {
1579           // Skip the extra memcpy call.
1580           Args.push_back(Addr);
1581 
1582           // Validate argument match.
1583           checkArgMatches(Addr, IRArgNo, IRFuncTy);
1584         }
1585       }
1586       break;
1587     }
1588 
1589     case ABIArgInfo::Ignore:
1590       break;
1591 
1592     case ABIArgInfo::Extend:
1593     case ABIArgInfo::Direct: {
1594       if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
1595           ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
1596           ArgInfo.getDirectOffset() == 0) {
1597         llvm::Value *V;
1598         if (RV.isScalar())
1599           V = RV.getScalarVal();
1600         else
1601           V = Builder.CreateLoad(RV.getAggregateAddr());
1602 
1603         // If the argument doesn't match, perform a bitcast to coerce it.  This
1604         // can happen due to trivial type mismatches.
1605         if (IRArgNo < IRFuncTy->getNumParams() &&
1606             V->getType() != IRFuncTy->getParamType(IRArgNo))
1607           V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRArgNo));
1608         Args.push_back(V);
1609 
1610         checkArgMatches(V, IRArgNo, IRFuncTy);
1611         break;
1612       }
1613 
1614       // FIXME: Avoid the conversion through memory if possible.
1615       llvm::Value *SrcPtr;
1616       if (RV.isScalar()) {
1617         SrcPtr = CreateMemTemp(I->Ty, "coerce");
1618         EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, TypeAlign, I->Ty);
1619       } else if (RV.isComplex()) {
1620         SrcPtr = CreateMemTemp(I->Ty, "coerce");
1621         StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
1622       } else
1623         SrcPtr = RV.getAggregateAddr();
1624 
1625       // If the value is offset in memory, apply the offset now.
1626       if (unsigned Offs = ArgInfo.getDirectOffset()) {
1627         SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy());
1628         SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs);
1629         SrcPtr = Builder.CreateBitCast(SrcPtr,
1630                        llvm::PointerType::getUnqual(ArgInfo.getCoerceToType()));
1631 
1632       }
1633 
1634       // If the coerce-to type is a first class aggregate, we flatten it and
1635       // pass the elements. Either way is semantically identical, but fast-isel
1636       // and the optimizer generally likes scalar values better than FCAs.
1637       if (llvm::StructType *STy =
1638             dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) {
1639         SrcPtr = Builder.CreateBitCast(SrcPtr,
1640                                        llvm::PointerType::getUnqual(STy));
1641         for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1642           llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i);
1643           llvm::LoadInst *LI = Builder.CreateLoad(EltPtr);
1644           // We don't know what we're loading from.
1645           LI->setAlignment(1);
1646           Args.push_back(LI);
1647 
1648           // Validate argument match.
1649           checkArgMatches(LI, IRArgNo, IRFuncTy);
1650         }
1651       } else {
1652         // In the simple case, just pass the coerced loaded value.
1653         Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
1654                                          *this));
1655 
1656         // Validate argument match.
1657         checkArgMatches(Args.back(), IRArgNo, IRFuncTy);
1658       }
1659 
1660       break;
1661     }
1662 
1663     case ABIArgInfo::Expand:
1664       ExpandTypeToArgs(I->Ty, RV, Args, IRFuncTy);
1665       IRArgNo = Args.size();
1666       break;
1667     }
1668   }
1669 
1670   // If the callee is a bitcast of a function to a varargs pointer to function
1671   // type, check to see if we can remove the bitcast.  This handles some cases
1672   // with unprototyped functions.
1673   if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
1674     if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
1675       llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
1676       llvm::FunctionType *CurFT =
1677         cast<llvm::FunctionType>(CurPT->getElementType());
1678       llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
1679 
1680       if (CE->getOpcode() == llvm::Instruction::BitCast &&
1681           ActualFT->getReturnType() == CurFT->getReturnType() &&
1682           ActualFT->getNumParams() == CurFT->getNumParams() &&
1683           ActualFT->getNumParams() == Args.size() &&
1684           (CurFT->isVarArg() || !ActualFT->isVarArg())) {
1685         bool ArgsMatch = true;
1686         for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
1687           if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
1688             ArgsMatch = false;
1689             break;
1690           }
1691 
1692         // Strip the cast if we can get away with it.  This is a nice cleanup,
1693         // but also allows us to inline the function at -O0 if it is marked
1694         // always_inline.
1695         if (ArgsMatch)
1696           Callee = CalleeF;
1697       }
1698     }
1699 
1700   unsigned CallingConv;
1701   CodeGen::AttributeListType AttributeList;
1702   CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
1703   llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
1704                                                    AttributeList.end());
1705 
1706   llvm::BasicBlock *InvokeDest = 0;
1707   if (!(Attrs.getFnAttributes() & llvm::Attribute::NoUnwind))
1708     InvokeDest = getInvokeDest();
1709 
1710   llvm::CallSite CS;
1711   if (!InvokeDest) {
1712     CS = Builder.CreateCall(Callee, Args);
1713   } else {
1714     llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
1715     CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, Args);
1716     EmitBlock(Cont);
1717   }
1718   if (callOrInvoke)
1719     *callOrInvoke = CS.getInstruction();
1720 
1721   CS.setAttributes(Attrs);
1722   CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
1723 
1724   // If the call doesn't return, finish the basic block and clear the
1725   // insertion point; this allows the rest of IRgen to discard
1726   // unreachable code.
1727   if (CS.doesNotReturn()) {
1728     Builder.CreateUnreachable();
1729     Builder.ClearInsertionPoint();
1730 
1731     // FIXME: For now, emit a dummy basic block because expr emitters in
1732     // generally are not ready to handle emitting expressions at unreachable
1733     // points.
1734     EnsureInsertPoint();
1735 
1736     // Return a reasonable RValue.
1737     return GetUndefRValue(RetTy);
1738   }
1739 
1740   llvm::Instruction *CI = CS.getInstruction();
1741   if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
1742     CI->setName("call");
1743 
1744   // Emit any writebacks immediately.  Arguably this should happen
1745   // after any return-value munging.
1746   if (CallArgs.hasWritebacks())
1747     emitWritebacks(*this, CallArgs);
1748 
1749   switch (RetAI.getKind()) {
1750   case ABIArgInfo::Indirect: {
1751     unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
1752     if (RetTy->isAnyComplexType())
1753       return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
1754     if (CodeGenFunction::hasAggregateLLVMType(RetTy))
1755       return RValue::getAggregate(Args[0]);
1756     return RValue::get(EmitLoadOfScalar(Args[0], false, Alignment, RetTy));
1757   }
1758 
1759   case ABIArgInfo::Ignore:
1760     // If we are ignoring an argument that had a result, make sure to
1761     // construct the appropriate return value for our caller.
1762     return GetUndefRValue(RetTy);
1763 
1764   case ABIArgInfo::Extend:
1765   case ABIArgInfo::Direct: {
1766     llvm::Type *RetIRTy = ConvertType(RetTy);
1767     if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
1768       if (RetTy->isAnyComplexType()) {
1769         llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
1770         llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
1771         return RValue::getComplex(std::make_pair(Real, Imag));
1772       }
1773       if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1774         llvm::Value *DestPtr = ReturnValue.getValue();
1775         bool DestIsVolatile = ReturnValue.isVolatile();
1776 
1777         if (!DestPtr) {
1778           DestPtr = CreateMemTemp(RetTy, "agg.tmp");
1779           DestIsVolatile = false;
1780         }
1781         BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false);
1782         return RValue::getAggregate(DestPtr);
1783       }
1784 
1785       // If the argument doesn't match, perform a bitcast to coerce it.  This
1786       // can happen due to trivial type mismatches.
1787       llvm::Value *V = CI;
1788       if (V->getType() != RetIRTy)
1789         V = Builder.CreateBitCast(V, RetIRTy);
1790       return RValue::get(V);
1791     }
1792 
1793     llvm::Value *DestPtr = ReturnValue.getValue();
1794     bool DestIsVolatile = ReturnValue.isVolatile();
1795 
1796     if (!DestPtr) {
1797       DestPtr = CreateMemTemp(RetTy, "coerce");
1798       DestIsVolatile = false;
1799     }
1800 
1801     // If the value is offset in memory, apply the offset now.
1802     llvm::Value *StorePtr = DestPtr;
1803     if (unsigned Offs = RetAI.getDirectOffset()) {
1804       StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
1805       StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs);
1806       StorePtr = Builder.CreateBitCast(StorePtr,
1807                          llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
1808     }
1809     CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
1810 
1811     unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
1812     if (RetTy->isAnyComplexType())
1813       return RValue::getComplex(LoadComplexFromAddr(DestPtr, false));
1814     if (CodeGenFunction::hasAggregateLLVMType(RetTy))
1815       return RValue::getAggregate(DestPtr);
1816     return RValue::get(EmitLoadOfScalar(DestPtr, false, Alignment, RetTy));
1817   }
1818 
1819   case ABIArgInfo::Expand:
1820     assert(0 && "Invalid ABI kind for return argument");
1821   }
1822 
1823   assert(0 && "Unhandled ABIArgInfo::Kind");
1824   return RValue::get(0);
1825 }
1826 
1827 /* VarArg handling */
1828 
EmitVAArg(llvm::Value * VAListAddr,QualType Ty)1829 llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
1830   return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
1831 }
1832