1 //===--- CGCall.cpp - Encapsulate calling convention details ----*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "CGCall.h"
16 #include "ABIInfo.h"
17 #include "CGCXXABI.h"
18 #include "CodeGenFunction.h"
19 #include "CodeGenModule.h"
20 #include "TargetInfo.h"
21 #include "clang/AST/Decl.h"
22 #include "clang/AST/DeclCXX.h"
23 #include "clang/AST/DeclObjC.h"
24 #include "clang/Basic/TargetInfo.h"
25 #include "clang/Frontend/CodeGenOptions.h"
26 #include "llvm/ADT/StringExtras.h"
27 #include "llvm/IR/Attributes.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/InlineAsm.h"
30 #include "llvm/MC/SubtargetFeature.h"
31 #include "llvm/Support/CallSite.h"
32 #include "llvm/Transforms/Utils/Local.h"
33 using namespace clang;
34 using namespace CodeGen;
35
36 /***/
37
ClangCallConvToLLVMCallConv(CallingConv CC)38 static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
39 switch (CC) {
40 default: return llvm::CallingConv::C;
41 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
42 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
43 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
44 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
45 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
46 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
47 // TODO: add support for CC_X86Pascal to llvm
48 }
49 }
50
51 /// Derives the 'this' type for codegen purposes, i.e. ignoring method
52 /// qualification.
53 /// FIXME: address space qualification?
GetThisType(ASTContext & Context,const CXXRecordDecl * RD)54 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
55 QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
56 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
57 }
58
59 /// Returns the canonical formal type of the given C++ method.
GetFormalType(const CXXMethodDecl * MD)60 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
61 return MD->getType()->getCanonicalTypeUnqualified()
62 .getAs<FunctionProtoType>();
63 }
64
65 /// Returns the "extra-canonicalized" return type, which discards
66 /// qualifiers on the return type. Codegen doesn't care about them,
67 /// and it makes ABI code a little easier to be able to assume that
68 /// all parameter and return types are top-level unqualified.
GetReturnType(QualType RetTy)69 static CanQualType GetReturnType(QualType RetTy) {
70 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
71 }
72
73 /// Arrange the argument and result information for a value of the given
74 /// unprototyped freestanding function type.
75 const CGFunctionInfo &
arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP)76 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
77 // When translating an unprototyped function type, always use a
78 // variadic type.
79 return arrangeLLVMFunctionInfo(FTNP->getResultType().getUnqualifiedType(),
80 None, FTNP->getExtInfo(), RequiredArgs(0));
81 }
82
83 /// Arrange the LLVM function layout for a value of the given function
84 /// type, on top of any implicit parameters already stored. Use the
85 /// given ExtInfo instead of the ExtInfo from the function type.
arrangeLLVMFunctionInfo(CodeGenTypes & CGT,SmallVectorImpl<CanQualType> & prefix,CanQual<FunctionProtoType> FTP,FunctionType::ExtInfo extInfo)86 static const CGFunctionInfo &arrangeLLVMFunctionInfo(CodeGenTypes &CGT,
87 SmallVectorImpl<CanQualType> &prefix,
88 CanQual<FunctionProtoType> FTP,
89 FunctionType::ExtInfo extInfo) {
90 RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
91 // FIXME: Kill copy.
92 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
93 prefix.push_back(FTP->getArgType(i));
94 CanQualType resultType = FTP->getResultType().getUnqualifiedType();
95 return CGT.arrangeLLVMFunctionInfo(resultType, prefix, extInfo, required);
96 }
97
98 /// Arrange the argument and result information for a free function (i.e.
99 /// not a C++ or ObjC instance method) of the given type.
arrangeFreeFunctionType(CodeGenTypes & CGT,SmallVectorImpl<CanQualType> & prefix,CanQual<FunctionProtoType> FTP)100 static const CGFunctionInfo &arrangeFreeFunctionType(CodeGenTypes &CGT,
101 SmallVectorImpl<CanQualType> &prefix,
102 CanQual<FunctionProtoType> FTP) {
103 return arrangeLLVMFunctionInfo(CGT, prefix, FTP, FTP->getExtInfo());
104 }
105
106 /// Given the formal ext-info of a C++ instance method, adjust it
107 /// according to the C++ ABI in effect.
adjustCXXMethodInfo(CodeGenTypes & CGT,FunctionType::ExtInfo & extInfo,bool isVariadic)108 static void adjustCXXMethodInfo(CodeGenTypes &CGT,
109 FunctionType::ExtInfo &extInfo,
110 bool isVariadic) {
111 if (extInfo.getCC() == CC_Default) {
112 CallingConv CC = CGT.getContext().getDefaultCXXMethodCallConv(isVariadic);
113 extInfo = extInfo.withCallingConv(CC);
114 }
115 }
116
117 /// Arrange the argument and result information for a free function (i.e.
118 /// not a C++ or ObjC instance method) of the given type.
arrangeCXXMethodType(CodeGenTypes & CGT,SmallVectorImpl<CanQualType> & prefix,CanQual<FunctionProtoType> FTP)119 static const CGFunctionInfo &arrangeCXXMethodType(CodeGenTypes &CGT,
120 SmallVectorImpl<CanQualType> &prefix,
121 CanQual<FunctionProtoType> FTP) {
122 FunctionType::ExtInfo extInfo = FTP->getExtInfo();
123 adjustCXXMethodInfo(CGT, extInfo, FTP->isVariadic());
124 return arrangeLLVMFunctionInfo(CGT, prefix, FTP, extInfo);
125 }
126
127 /// Arrange the argument and result information for a value of the
128 /// given freestanding function type.
129 const CGFunctionInfo &
arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP)130 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
131 SmallVector<CanQualType, 16> argTypes;
132 return ::arrangeFreeFunctionType(*this, argTypes, FTP);
133 }
134
getCallingConventionForDecl(const Decl * D)135 static CallingConv getCallingConventionForDecl(const Decl *D) {
136 // Set the appropriate calling convention for the Function.
137 if (D->hasAttr<StdCallAttr>())
138 return CC_X86StdCall;
139
140 if (D->hasAttr<FastCallAttr>())
141 return CC_X86FastCall;
142
143 if (D->hasAttr<ThisCallAttr>())
144 return CC_X86ThisCall;
145
146 if (D->hasAttr<PascalAttr>())
147 return CC_X86Pascal;
148
149 if (PcsAttr *PCS = D->getAttr<PcsAttr>())
150 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
151
152 if (D->hasAttr<PnaclCallAttr>())
153 return CC_PnaclCall;
154
155 if (D->hasAttr<IntelOclBiccAttr>())
156 return CC_IntelOclBicc;
157
158 return CC_C;
159 }
160
161 /// Arrange the argument and result information for a call to an
162 /// unknown C++ non-static member function of the given abstract type.
163 /// The member function must be an ordinary function, i.e. not a
164 /// constructor or destructor.
165 const CGFunctionInfo &
arrangeCXXMethodType(const CXXRecordDecl * RD,const FunctionProtoType * FTP)166 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
167 const FunctionProtoType *FTP) {
168 SmallVector<CanQualType, 16> argTypes;
169
170 // Add the 'this' pointer.
171 argTypes.push_back(GetThisType(Context, RD));
172
173 return ::arrangeCXXMethodType(*this, argTypes,
174 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
175 }
176
177 /// Arrange the argument and result information for a declaration or
178 /// definition of the given C++ non-static member function. The
179 /// member function must be an ordinary function, i.e. not a
180 /// constructor or destructor.
181 const CGFunctionInfo &
arrangeCXXMethodDeclaration(const CXXMethodDecl * MD)182 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
183 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for contructors!");
184 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
185
186 CanQual<FunctionProtoType> prototype = GetFormalType(MD);
187
188 if (MD->isInstance()) {
189 // The abstract case is perfectly fine.
190 return arrangeCXXMethodType(MD->getParent(), prototype.getTypePtr());
191 }
192
193 return arrangeFreeFunctionType(prototype);
194 }
195
196 /// Arrange the argument and result information for a declaration
197 /// or definition to the given constructor variant.
198 const CGFunctionInfo &
arrangeCXXConstructorDeclaration(const CXXConstructorDecl * D,CXXCtorType ctorKind)199 CodeGenTypes::arrangeCXXConstructorDeclaration(const CXXConstructorDecl *D,
200 CXXCtorType ctorKind) {
201 SmallVector<CanQualType, 16> argTypes;
202 argTypes.push_back(GetThisType(Context, D->getParent()));
203
204 GlobalDecl GD(D, ctorKind);
205 CanQualType resultType =
206 TheCXXABI.HasThisReturn(GD) ? argTypes.front() : Context.VoidTy;
207
208 TheCXXABI.BuildConstructorSignature(D, ctorKind, resultType, argTypes);
209
210 CanQual<FunctionProtoType> FTP = GetFormalType(D);
211
212 RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, argTypes.size());
213
214 // Add the formal parameters.
215 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
216 argTypes.push_back(FTP->getArgType(i));
217
218 FunctionType::ExtInfo extInfo = FTP->getExtInfo();
219 adjustCXXMethodInfo(*this, extInfo, FTP->isVariadic());
220 return arrangeLLVMFunctionInfo(resultType, argTypes, extInfo, required);
221 }
222
223 /// Arrange the argument and result information for a declaration,
224 /// definition, or call to the given destructor variant. It so
225 /// happens that all three cases produce the same information.
226 const CGFunctionInfo &
arrangeCXXDestructor(const CXXDestructorDecl * D,CXXDtorType dtorKind)227 CodeGenTypes::arrangeCXXDestructor(const CXXDestructorDecl *D,
228 CXXDtorType dtorKind) {
229 SmallVector<CanQualType, 2> argTypes;
230 argTypes.push_back(GetThisType(Context, D->getParent()));
231
232 GlobalDecl GD(D, dtorKind);
233 CanQualType resultType =
234 TheCXXABI.HasThisReturn(GD) ? argTypes.front() : Context.VoidTy;
235
236 TheCXXABI.BuildDestructorSignature(D, dtorKind, resultType, argTypes);
237
238 CanQual<FunctionProtoType> FTP = GetFormalType(D);
239 assert(FTP->getNumArgs() == 0 && "dtor with formal parameters");
240 assert(FTP->isVariadic() == 0 && "dtor with formal parameters");
241
242 FunctionType::ExtInfo extInfo = FTP->getExtInfo();
243 adjustCXXMethodInfo(*this, extInfo, false);
244 return arrangeLLVMFunctionInfo(resultType, argTypes, extInfo,
245 RequiredArgs::All);
246 }
247
248 /// Arrange the argument and result information for the declaration or
249 /// definition of the given function.
250 const CGFunctionInfo &
arrangeFunctionDeclaration(const FunctionDecl * FD)251 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
252 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
253 if (MD->isInstance())
254 return arrangeCXXMethodDeclaration(MD);
255
256 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
257
258 assert(isa<FunctionType>(FTy));
259
260 // When declaring a function without a prototype, always use a
261 // non-variadic type.
262 if (isa<FunctionNoProtoType>(FTy)) {
263 CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>();
264 return arrangeLLVMFunctionInfo(noProto->getResultType(), None,
265 noProto->getExtInfo(), RequiredArgs::All);
266 }
267
268 assert(isa<FunctionProtoType>(FTy));
269 return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>());
270 }
271
272 /// Arrange the argument and result information for the declaration or
273 /// definition of an Objective-C method.
274 const CGFunctionInfo &
arrangeObjCMethodDeclaration(const ObjCMethodDecl * MD)275 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
276 // It happens that this is the same as a call with no optional
277 // arguments, except also using the formal 'self' type.
278 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
279 }
280
281 /// Arrange the argument and result information for the function type
282 /// through which to perform a send to the given Objective-C method,
283 /// using the given receiver type. The receiver type is not always
284 /// the 'self' type of the method or even an Objective-C pointer type.
285 /// This is *not* the right method for actually performing such a
286 /// message send, due to the possibility of optional arguments.
287 const CGFunctionInfo &
arrangeObjCMessageSendSignature(const ObjCMethodDecl * MD,QualType receiverType)288 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
289 QualType receiverType) {
290 SmallVector<CanQualType, 16> argTys;
291 argTys.push_back(Context.getCanonicalParamType(receiverType));
292 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
293 // FIXME: Kill copy?
294 for (ObjCMethodDecl::param_const_iterator i = MD->param_begin(),
295 e = MD->param_end(); i != e; ++i) {
296 argTys.push_back(Context.getCanonicalParamType((*i)->getType()));
297 }
298
299 FunctionType::ExtInfo einfo;
300 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD));
301
302 if (getContext().getLangOpts().ObjCAutoRefCount &&
303 MD->hasAttr<NSReturnsRetainedAttr>())
304 einfo = einfo.withProducesResult(true);
305
306 RequiredArgs required =
307 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
308
309 return arrangeLLVMFunctionInfo(GetReturnType(MD->getResultType()), argTys,
310 einfo, required);
311 }
312
313 const CGFunctionInfo &
arrangeGlobalDeclaration(GlobalDecl GD)314 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
315 // FIXME: Do we need to handle ObjCMethodDecl?
316 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
317
318 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
319 return arrangeCXXConstructorDeclaration(CD, GD.getCtorType());
320
321 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
322 return arrangeCXXDestructor(DD, GD.getDtorType());
323
324 return arrangeFunctionDeclaration(FD);
325 }
326
327 /// Arrange a call as unto a free function, except possibly with an
328 /// additional number of formal parameters considered required.
329 static const CGFunctionInfo &
arrangeFreeFunctionLikeCall(CodeGenTypes & CGT,const CallArgList & args,const FunctionType * fnType,unsigned numExtraRequiredArgs)330 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
331 const CallArgList &args,
332 const FunctionType *fnType,
333 unsigned numExtraRequiredArgs) {
334 assert(args.size() >= numExtraRequiredArgs);
335
336 // In most cases, there are no optional arguments.
337 RequiredArgs required = RequiredArgs::All;
338
339 // If we have a variadic prototype, the required arguments are the
340 // extra prefix plus the arguments in the prototype.
341 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
342 if (proto->isVariadic())
343 required = RequiredArgs(proto->getNumArgs() + numExtraRequiredArgs);
344
345 // If we don't have a prototype at all, but we're supposed to
346 // explicitly use the variadic convention for unprototyped calls,
347 // treat all of the arguments as required but preserve the nominal
348 // possibility of variadics.
349 } else if (CGT.CGM.getTargetCodeGenInfo()
350 .isNoProtoCallVariadic(args, cast<FunctionNoProtoType>(fnType))) {
351 required = RequiredArgs(args.size());
352 }
353
354 return CGT.arrangeFreeFunctionCall(fnType->getResultType(), args,
355 fnType->getExtInfo(), required);
356 }
357
358 /// Figure out the rules for calling a function with the given formal
359 /// type using the given arguments. The arguments are necessary
360 /// because the function might be unprototyped, in which case it's
361 /// target-dependent in crazy ways.
362 const CGFunctionInfo &
arrangeFreeFunctionCall(const CallArgList & args,const FunctionType * fnType)363 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
364 const FunctionType *fnType) {
365 return arrangeFreeFunctionLikeCall(*this, args, fnType, 0);
366 }
367
368 /// A block function call is essentially a free-function call with an
369 /// extra implicit argument.
370 const CGFunctionInfo &
arrangeBlockFunctionCall(const CallArgList & args,const FunctionType * fnType)371 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
372 const FunctionType *fnType) {
373 return arrangeFreeFunctionLikeCall(*this, args, fnType, 1);
374 }
375
376 const CGFunctionInfo &
arrangeFreeFunctionCall(QualType resultType,const CallArgList & args,FunctionType::ExtInfo info,RequiredArgs required)377 CodeGenTypes::arrangeFreeFunctionCall(QualType resultType,
378 const CallArgList &args,
379 FunctionType::ExtInfo info,
380 RequiredArgs required) {
381 // FIXME: Kill copy.
382 SmallVector<CanQualType, 16> argTypes;
383 for (CallArgList::const_iterator i = args.begin(), e = args.end();
384 i != e; ++i)
385 argTypes.push_back(Context.getCanonicalParamType(i->Ty));
386 return arrangeLLVMFunctionInfo(GetReturnType(resultType), argTypes, info,
387 required);
388 }
389
390 /// Arrange a call to a C++ method, passing the given arguments.
391 const CGFunctionInfo &
arrangeCXXMethodCall(const CallArgList & args,const FunctionProtoType * FPT,RequiredArgs required)392 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
393 const FunctionProtoType *FPT,
394 RequiredArgs required) {
395 // FIXME: Kill copy.
396 SmallVector<CanQualType, 16> argTypes;
397 for (CallArgList::const_iterator i = args.begin(), e = args.end();
398 i != e; ++i)
399 argTypes.push_back(Context.getCanonicalParamType(i->Ty));
400
401 FunctionType::ExtInfo info = FPT->getExtInfo();
402 adjustCXXMethodInfo(*this, info, FPT->isVariadic());
403 return arrangeLLVMFunctionInfo(GetReturnType(FPT->getResultType()),
404 argTypes, info, required);
405 }
406
407 const CGFunctionInfo &
arrangeFunctionDeclaration(QualType resultType,const FunctionArgList & args,const FunctionType::ExtInfo & info,bool isVariadic)408 CodeGenTypes::arrangeFunctionDeclaration(QualType resultType,
409 const FunctionArgList &args,
410 const FunctionType::ExtInfo &info,
411 bool isVariadic) {
412 // FIXME: Kill copy.
413 SmallVector<CanQualType, 16> argTypes;
414 for (FunctionArgList::const_iterator i = args.begin(), e = args.end();
415 i != e; ++i)
416 argTypes.push_back(Context.getCanonicalParamType((*i)->getType()));
417
418 RequiredArgs required =
419 (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All);
420 return arrangeLLVMFunctionInfo(GetReturnType(resultType), argTypes, info,
421 required);
422 }
423
arrangeNullaryFunction()424 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
425 return arrangeLLVMFunctionInfo(getContext().VoidTy, None,
426 FunctionType::ExtInfo(), RequiredArgs::All);
427 }
428
429 /// Arrange the argument and result information for an abstract value
430 /// of a given function type. This is the method which all of the
431 /// above functions ultimately defer to.
432 const CGFunctionInfo &
arrangeLLVMFunctionInfo(CanQualType resultType,ArrayRef<CanQualType> argTypes,FunctionType::ExtInfo info,RequiredArgs required)433 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
434 ArrayRef<CanQualType> argTypes,
435 FunctionType::ExtInfo info,
436 RequiredArgs required) {
437 #ifndef NDEBUG
438 for (ArrayRef<CanQualType>::const_iterator
439 I = argTypes.begin(), E = argTypes.end(); I != E; ++I)
440 assert(I->isCanonicalAsParam());
441 #endif
442
443 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
444
445 // Lookup or create unique function info.
446 llvm::FoldingSetNodeID ID;
447 CGFunctionInfo::Profile(ID, info, required, resultType, argTypes);
448
449 void *insertPos = 0;
450 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
451 if (FI)
452 return *FI;
453
454 // Construct the function info. We co-allocate the ArgInfos.
455 FI = CGFunctionInfo::create(CC, info, resultType, argTypes, required);
456 FunctionInfos.InsertNode(FI, insertPos);
457
458 bool inserted = FunctionsBeingProcessed.insert(FI); (void)inserted;
459 assert(inserted && "Recursively being processed?");
460
461 // Compute ABI information.
462 getABIInfo().computeInfo(*FI);
463
464 // Loop over all of the computed argument and return value info. If any of
465 // them are direct or extend without a specified coerce type, specify the
466 // default now.
467 ABIArgInfo &retInfo = FI->getReturnInfo();
468 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == 0)
469 retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
470
471 for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end();
472 I != E; ++I)
473 if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0)
474 I->info.setCoerceToType(ConvertType(I->type));
475
476 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
477 assert(erased && "Not in set?");
478
479 return *FI;
480 }
481
create(unsigned llvmCC,const FunctionType::ExtInfo & info,CanQualType resultType,ArrayRef<CanQualType> argTypes,RequiredArgs required)482 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
483 const FunctionType::ExtInfo &info,
484 CanQualType resultType,
485 ArrayRef<CanQualType> argTypes,
486 RequiredArgs required) {
487 void *buffer = operator new(sizeof(CGFunctionInfo) +
488 sizeof(ArgInfo) * (argTypes.size() + 1));
489 CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
490 FI->CallingConvention = llvmCC;
491 FI->EffectiveCallingConvention = llvmCC;
492 FI->ASTCallingConvention = info.getCC();
493 FI->NoReturn = info.getNoReturn();
494 FI->ReturnsRetained = info.getProducesResult();
495 FI->Required = required;
496 FI->HasRegParm = info.getHasRegParm();
497 FI->RegParm = info.getRegParm();
498 FI->NumArgs = argTypes.size();
499 FI->getArgsBuffer()[0].type = resultType;
500 for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
501 FI->getArgsBuffer()[i + 1].type = argTypes[i];
502 return FI;
503 }
504
505 /***/
506
GetExpandedTypes(QualType type,SmallVectorImpl<llvm::Type * > & expandedTypes)507 void CodeGenTypes::GetExpandedTypes(QualType type,
508 SmallVectorImpl<llvm::Type*> &expandedTypes) {
509 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(type)) {
510 uint64_t NumElts = AT->getSize().getZExtValue();
511 for (uint64_t Elt = 0; Elt < NumElts; ++Elt)
512 GetExpandedTypes(AT->getElementType(), expandedTypes);
513 } else if (const RecordType *RT = type->getAs<RecordType>()) {
514 const RecordDecl *RD = RT->getDecl();
515 assert(!RD->hasFlexibleArrayMember() &&
516 "Cannot expand structure with flexible array.");
517 if (RD->isUnion()) {
518 // Unions can be here only in degenerative cases - all the fields are same
519 // after flattening. Thus we have to use the "largest" field.
520 const FieldDecl *LargestFD = 0;
521 CharUnits UnionSize = CharUnits::Zero();
522
523 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
524 i != e; ++i) {
525 const FieldDecl *FD = *i;
526 assert(!FD->isBitField() &&
527 "Cannot expand structure with bit-field members.");
528 CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
529 if (UnionSize < FieldSize) {
530 UnionSize = FieldSize;
531 LargestFD = FD;
532 }
533 }
534 if (LargestFD)
535 GetExpandedTypes(LargestFD->getType(), expandedTypes);
536 } else {
537 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
538 i != e; ++i) {
539 assert(!i->isBitField() &&
540 "Cannot expand structure with bit-field members.");
541 GetExpandedTypes(i->getType(), expandedTypes);
542 }
543 }
544 } else if (const ComplexType *CT = type->getAs<ComplexType>()) {
545 llvm::Type *EltTy = ConvertType(CT->getElementType());
546 expandedTypes.push_back(EltTy);
547 expandedTypes.push_back(EltTy);
548 } else
549 expandedTypes.push_back(ConvertType(type));
550 }
551
552 llvm::Function::arg_iterator
ExpandTypeFromArgs(QualType Ty,LValue LV,llvm::Function::arg_iterator AI)553 CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
554 llvm::Function::arg_iterator AI) {
555 assert(LV.isSimple() &&
556 "Unexpected non-simple lvalue during struct expansion.");
557
558 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
559 unsigned NumElts = AT->getSize().getZExtValue();
560 QualType EltTy = AT->getElementType();
561 for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
562 llvm::Value *EltAddr = Builder.CreateConstGEP2_32(LV.getAddress(), 0, Elt);
563 LValue LV = MakeAddrLValue(EltAddr, EltTy);
564 AI = ExpandTypeFromArgs(EltTy, LV, AI);
565 }
566 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
567 RecordDecl *RD = RT->getDecl();
568 if (RD->isUnion()) {
569 // Unions can be here only in degenerative cases - all the fields are same
570 // after flattening. Thus we have to use the "largest" field.
571 const FieldDecl *LargestFD = 0;
572 CharUnits UnionSize = CharUnits::Zero();
573
574 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
575 i != e; ++i) {
576 const FieldDecl *FD = *i;
577 assert(!FD->isBitField() &&
578 "Cannot expand structure with bit-field members.");
579 CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
580 if (UnionSize < FieldSize) {
581 UnionSize = FieldSize;
582 LargestFD = FD;
583 }
584 }
585 if (LargestFD) {
586 // FIXME: What are the right qualifiers here?
587 LValue SubLV = EmitLValueForField(LV, LargestFD);
588 AI = ExpandTypeFromArgs(LargestFD->getType(), SubLV, AI);
589 }
590 } else {
591 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
592 i != e; ++i) {
593 FieldDecl *FD = *i;
594 QualType FT = FD->getType();
595
596 // FIXME: What are the right qualifiers here?
597 LValue SubLV = EmitLValueForField(LV, FD);
598 AI = ExpandTypeFromArgs(FT, SubLV, AI);
599 }
600 }
601 } else if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
602 QualType EltTy = CT->getElementType();
603 llvm::Value *RealAddr = Builder.CreateStructGEP(LV.getAddress(), 0, "real");
604 EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(RealAddr, EltTy));
605 llvm::Value *ImagAddr = Builder.CreateStructGEP(LV.getAddress(), 1, "imag");
606 EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(ImagAddr, EltTy));
607 } else {
608 EmitStoreThroughLValue(RValue::get(AI), LV);
609 ++AI;
610 }
611
612 return AI;
613 }
614
615 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
616 /// accessing some number of bytes out of it, try to gep into the struct to get
617 /// at its inner goodness. Dive as deep as possible without entering an element
618 /// with an in-memory size smaller than DstSize.
619 static llvm::Value *
EnterStructPointerForCoercedAccess(llvm::Value * SrcPtr,llvm::StructType * SrcSTy,uint64_t DstSize,CodeGenFunction & CGF)620 EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
621 llvm::StructType *SrcSTy,
622 uint64_t DstSize, CodeGenFunction &CGF) {
623 // We can't dive into a zero-element struct.
624 if (SrcSTy->getNumElements() == 0) return SrcPtr;
625
626 llvm::Type *FirstElt = SrcSTy->getElementType(0);
627
628 // If the first elt is at least as large as what we're looking for, or if the
629 // first element is the same size as the whole struct, we can enter it.
630 uint64_t FirstEltSize =
631 CGF.CGM.getDataLayout().getTypeAllocSize(FirstElt);
632 if (FirstEltSize < DstSize &&
633 FirstEltSize < CGF.CGM.getDataLayout().getTypeAllocSize(SrcSTy))
634 return SrcPtr;
635
636 // GEP into the first element.
637 SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive");
638
639 // If the first element is a struct, recurse.
640 llvm::Type *SrcTy =
641 cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
642 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
643 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
644
645 return SrcPtr;
646 }
647
648 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
649 /// are either integers or pointers. This does a truncation of the value if it
650 /// is too large or a zero extension if it is too small.
651 ///
652 /// This behaves as if the value were coerced through memory, so on big-endian
653 /// targets the high bits are preserved in a truncation, while little-endian
654 /// targets preserve the low bits.
CoerceIntOrPtrToIntOrPtr(llvm::Value * Val,llvm::Type * Ty,CodeGenFunction & CGF)655 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
656 llvm::Type *Ty,
657 CodeGenFunction &CGF) {
658 if (Val->getType() == Ty)
659 return Val;
660
661 if (isa<llvm::PointerType>(Val->getType())) {
662 // If this is Pointer->Pointer avoid conversion to and from int.
663 if (isa<llvm::PointerType>(Ty))
664 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
665
666 // Convert the pointer to an integer so we can play with its width.
667 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
668 }
669
670 llvm::Type *DestIntTy = Ty;
671 if (isa<llvm::PointerType>(DestIntTy))
672 DestIntTy = CGF.IntPtrTy;
673
674 if (Val->getType() != DestIntTy) {
675 const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
676 if (DL.isBigEndian()) {
677 // Preserve the high bits on big-endian targets.
678 // That is what memory coercion does.
679 uint64_t SrcSize = DL.getTypeAllocSizeInBits(Val->getType());
680 uint64_t DstSize = DL.getTypeAllocSizeInBits(DestIntTy);
681 if (SrcSize > DstSize) {
682 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
683 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
684 } else {
685 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
686 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
687 }
688 } else {
689 // Little-endian targets preserve the low bits. No shifts required.
690 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
691 }
692 }
693
694 if (isa<llvm::PointerType>(Ty))
695 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
696 return Val;
697 }
698
699
700
701 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
702 /// a pointer to an object of type \arg Ty.
703 ///
704 /// This safely handles the case when the src type is smaller than the
705 /// destination type; in this situation the values of bits which not
706 /// present in the src are undefined.
CreateCoercedLoad(llvm::Value * SrcPtr,llvm::Type * Ty,CodeGenFunction & CGF)707 static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
708 llvm::Type *Ty,
709 CodeGenFunction &CGF) {
710 llvm::Type *SrcTy =
711 cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
712
713 // If SrcTy and Ty are the same, just do a load.
714 if (SrcTy == Ty)
715 return CGF.Builder.CreateLoad(SrcPtr);
716
717 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
718
719 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
720 SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
721 SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
722 }
723
724 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
725
726 // If the source and destination are integer or pointer types, just do an
727 // extension or truncation to the desired type.
728 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
729 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
730 llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr);
731 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
732 }
733
734 // If load is legal, just bitcast the src pointer.
735 if (SrcSize >= DstSize) {
736 // Generally SrcSize is never greater than DstSize, since this means we are
737 // losing bits. However, this can happen in cases where the structure has
738 // additional padding, for example due to a user specified alignment.
739 //
740 // FIXME: Assert that we aren't truncating non-padding bits when have access
741 // to that information.
742 llvm::Value *Casted =
743 CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
744 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
745 // FIXME: Use better alignment / avoid requiring aligned load.
746 Load->setAlignment(1);
747 return Load;
748 }
749
750 // Otherwise do coercion through memory. This is stupid, but
751 // simple.
752 llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
753 llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy();
754 llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy);
755 llvm::Value *SrcCasted = CGF.Builder.CreateBitCast(SrcPtr, I8PtrTy);
756 // FIXME: Use better alignment.
757 CGF.Builder.CreateMemCpy(Casted, SrcCasted,
758 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
759 1, false);
760 return CGF.Builder.CreateLoad(Tmp);
761 }
762
763 // Function to store a first-class aggregate into memory. We prefer to
764 // store the elements rather than the aggregate to be more friendly to
765 // fast-isel.
766 // FIXME: Do we need to recurse here?
BuildAggStore(CodeGenFunction & CGF,llvm::Value * Val,llvm::Value * DestPtr,bool DestIsVolatile,bool LowAlignment)767 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
768 llvm::Value *DestPtr, bool DestIsVolatile,
769 bool LowAlignment) {
770 // Prefer scalar stores to first-class aggregate stores.
771 if (llvm::StructType *STy =
772 dyn_cast<llvm::StructType>(Val->getType())) {
773 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
774 llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(DestPtr, 0, i);
775 llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
776 llvm::StoreInst *SI = CGF.Builder.CreateStore(Elt, EltPtr,
777 DestIsVolatile);
778 if (LowAlignment)
779 SI->setAlignment(1);
780 }
781 } else {
782 llvm::StoreInst *SI = CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile);
783 if (LowAlignment)
784 SI->setAlignment(1);
785 }
786 }
787
788 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
789 /// where the source and destination may have different types.
790 ///
791 /// This safely handles the case when the src type is larger than the
792 /// destination type; the upper bits of the src will be lost.
CreateCoercedStore(llvm::Value * Src,llvm::Value * DstPtr,bool DstIsVolatile,CodeGenFunction & CGF)793 static void CreateCoercedStore(llvm::Value *Src,
794 llvm::Value *DstPtr,
795 bool DstIsVolatile,
796 CodeGenFunction &CGF) {
797 llvm::Type *SrcTy = Src->getType();
798 llvm::Type *DstTy =
799 cast<llvm::PointerType>(DstPtr->getType())->getElementType();
800 if (SrcTy == DstTy) {
801 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
802 return;
803 }
804
805 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
806
807 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
808 DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
809 DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
810 }
811
812 // If the source and destination are integer or pointer types, just do an
813 // extension or truncation to the desired type.
814 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
815 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
816 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
817 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
818 return;
819 }
820
821 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
822
823 // If store is legal, just bitcast the src pointer.
824 if (SrcSize <= DstSize) {
825 llvm::Value *Casted =
826 CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
827 // FIXME: Use better alignment / avoid requiring aligned store.
828 BuildAggStore(CGF, Src, Casted, DstIsVolatile, true);
829 } else {
830 // Otherwise do coercion through memory. This is stupid, but
831 // simple.
832
833 // Generally SrcSize is never greater than DstSize, since this means we are
834 // losing bits. However, this can happen in cases where the structure has
835 // additional padding, for example due to a user specified alignment.
836 //
837 // FIXME: Assert that we aren't truncating non-padding bits when have access
838 // to that information.
839 llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
840 CGF.Builder.CreateStore(Src, Tmp);
841 llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy();
842 llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy);
843 llvm::Value *DstCasted = CGF.Builder.CreateBitCast(DstPtr, I8PtrTy);
844 // FIXME: Use better alignment.
845 CGF.Builder.CreateMemCpy(DstCasted, Casted,
846 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
847 1, false);
848 }
849 }
850
851 /***/
852
ReturnTypeUsesSRet(const CGFunctionInfo & FI)853 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
854 return FI.getReturnInfo().isIndirect();
855 }
856
ReturnTypeUsesFPRet(QualType ResultType)857 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
858 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
859 switch (BT->getKind()) {
860 default:
861 return false;
862 case BuiltinType::Float:
863 return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
864 case BuiltinType::Double:
865 return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
866 case BuiltinType::LongDouble:
867 return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
868 }
869 }
870
871 return false;
872 }
873
ReturnTypeUsesFP2Ret(QualType ResultType)874 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
875 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
876 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
877 if (BT->getKind() == BuiltinType::LongDouble)
878 return getTarget().useObjCFP2RetForComplexLongDouble();
879 }
880 }
881
882 return false;
883 }
884
GetFunctionType(GlobalDecl GD)885 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
886 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
887 return GetFunctionType(FI);
888 }
889
890 llvm::FunctionType *
GetFunctionType(const CGFunctionInfo & FI)891 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
892
893 bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted;
894 assert(Inserted && "Recursively being processed?");
895
896 SmallVector<llvm::Type*, 8> argTypes;
897 llvm::Type *resultType = 0;
898
899 const ABIArgInfo &retAI = FI.getReturnInfo();
900 switch (retAI.getKind()) {
901 case ABIArgInfo::Expand:
902 llvm_unreachable("Invalid ABI kind for return argument");
903
904 case ABIArgInfo::Extend:
905 case ABIArgInfo::Direct:
906 resultType = retAI.getCoerceToType();
907 break;
908
909 case ABIArgInfo::Indirect: {
910 assert(!retAI.getIndirectAlign() && "Align unused on indirect return.");
911 resultType = llvm::Type::getVoidTy(getLLVMContext());
912
913 QualType ret = FI.getReturnType();
914 llvm::Type *ty = ConvertType(ret);
915 unsigned addressSpace = Context.getTargetAddressSpace(ret);
916 argTypes.push_back(llvm::PointerType::get(ty, addressSpace));
917 break;
918 }
919
920 case ABIArgInfo::Ignore:
921 resultType = llvm::Type::getVoidTy(getLLVMContext());
922 break;
923 }
924
925 // Add in all of the required arguments.
926 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), ie;
927 if (FI.isVariadic()) {
928 ie = it + FI.getRequiredArgs().getNumRequiredArgs();
929 } else {
930 ie = FI.arg_end();
931 }
932 for (; it != ie; ++it) {
933 const ABIArgInfo &argAI = it->info;
934
935 // Insert a padding type to ensure proper alignment.
936 if (llvm::Type *PaddingType = argAI.getPaddingType())
937 argTypes.push_back(PaddingType);
938
939 switch (argAI.getKind()) {
940 case ABIArgInfo::Ignore:
941 break;
942
943 case ABIArgInfo::Indirect: {
944 // indirect arguments are always on the stack, which is addr space #0.
945 llvm::Type *LTy = ConvertTypeForMem(it->type);
946 argTypes.push_back(LTy->getPointerTo());
947 break;
948 }
949
950 case ABIArgInfo::Extend:
951 case ABIArgInfo::Direct: {
952 // If the coerce-to type is a first class aggregate, flatten it. Either
953 // way is semantically identical, but fast-isel and the optimizer
954 // generally likes scalar values better than FCAs.
955 llvm::Type *argType = argAI.getCoerceToType();
956 if (llvm::StructType *st = dyn_cast<llvm::StructType>(argType)) {
957 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
958 argTypes.push_back(st->getElementType(i));
959 } else {
960 argTypes.push_back(argType);
961 }
962 break;
963 }
964
965 case ABIArgInfo::Expand:
966 GetExpandedTypes(it->type, argTypes);
967 break;
968 }
969 }
970
971 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
972 assert(Erased && "Not in set?");
973
974 return llvm::FunctionType::get(resultType, argTypes, FI.isVariadic());
975 }
976
GetFunctionTypeForVTable(GlobalDecl GD)977 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
978 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
979 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
980
981 if (!isFuncTypeConvertible(FPT))
982 return llvm::StructType::get(getLLVMContext());
983
984 const CGFunctionInfo *Info;
985 if (isa<CXXDestructorDecl>(MD))
986 Info = &arrangeCXXDestructor(cast<CXXDestructorDecl>(MD), GD.getDtorType());
987 else
988 Info = &arrangeCXXMethodDeclaration(MD);
989 return GetFunctionType(*Info);
990 }
991
ConstructAttributeList(const CGFunctionInfo & FI,const Decl * TargetDecl,AttributeListType & PAL,unsigned & CallingConv,bool AttrOnCallSite)992 void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
993 const Decl *TargetDecl,
994 AttributeListType &PAL,
995 unsigned &CallingConv,
996 bool AttrOnCallSite) {
997 llvm::AttrBuilder FuncAttrs;
998 llvm::AttrBuilder RetAttrs;
999
1000 CallingConv = FI.getEffectiveCallingConvention();
1001
1002 if (FI.isNoReturn())
1003 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1004
1005 // FIXME: handle sseregparm someday...
1006 if (TargetDecl) {
1007 if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1008 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1009 if (TargetDecl->hasAttr<NoThrowAttr>())
1010 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1011 if (TargetDecl->hasAttr<NoReturnAttr>())
1012 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1013
1014 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1015 const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>();
1016 if (FPT && FPT->isNothrow(getContext()))
1017 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1018 // Don't use [[noreturn]] or _Noreturn for a call to a virtual function.
1019 // These attributes are not inherited by overloads.
1020 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1021 if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual()))
1022 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1023 }
1024
1025 // 'const' and 'pure' attribute functions are also nounwind.
1026 if (TargetDecl->hasAttr<ConstAttr>()) {
1027 FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1028 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1029 } else if (TargetDecl->hasAttr<PureAttr>()) {
1030 FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1031 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1032 }
1033 if (TargetDecl->hasAttr<MallocAttr>())
1034 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1035 }
1036
1037 if (CodeGenOpts.OptimizeSize)
1038 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1039 if (CodeGenOpts.OptimizeSize == 2)
1040 FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1041 if (CodeGenOpts.DisableRedZone)
1042 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1043 if (CodeGenOpts.NoImplicitFloat)
1044 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1045
1046 if (AttrOnCallSite) {
1047 // Attributes that should go on the call site only.
1048 if (!CodeGenOpts.SimplifyLibCalls)
1049 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1050 } else {
1051 // Attributes that should go on the function, but not the call site.
1052 if (!CodeGenOpts.DisableFPElim) {
1053 FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1054 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf", "false");
1055 } else if (CodeGenOpts.OmitLeafFramePointer) {
1056 FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1057 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf", "true");
1058 } else {
1059 FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
1060 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf", "true");
1061 }
1062
1063 FuncAttrs.addAttribute("less-precise-fpmad",
1064 llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1065 FuncAttrs.addAttribute("no-infs-fp-math",
1066 llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
1067 FuncAttrs.addAttribute("no-nans-fp-math",
1068 llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
1069 FuncAttrs.addAttribute("unsafe-fp-math",
1070 llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
1071 FuncAttrs.addAttribute("use-soft-float",
1072 llvm::toStringRef(CodeGenOpts.SoftFloat));
1073 FuncAttrs.addAttribute("stack-protector-buffer-size",
1074 llvm::utostr(CodeGenOpts.SSPBufferSize));
1075
1076 bool NoFramePointerElimNonLeaf;
1077 if (!CodeGenOpts.DisableFPElim) {
1078 NoFramePointerElimNonLeaf = false;
1079 } else if (CodeGenOpts.OmitLeafFramePointer) {
1080 NoFramePointerElimNonLeaf = true;
1081 } else {
1082 NoFramePointerElimNonLeaf = true;
1083 }
1084
1085 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf",
1086 llvm::toStringRef(NoFramePointerElimNonLeaf));
1087
1088 if (!CodeGenOpts.StackRealignment)
1089 FuncAttrs.addAttribute("no-realign-stack");
1090 }
1091
1092 QualType RetTy = FI.getReturnType();
1093 unsigned Index = 1;
1094 const ABIArgInfo &RetAI = FI.getReturnInfo();
1095 switch (RetAI.getKind()) {
1096 case ABIArgInfo::Extend:
1097 if (RetTy->hasSignedIntegerRepresentation())
1098 RetAttrs.addAttribute(llvm::Attribute::SExt);
1099 else if (RetTy->hasUnsignedIntegerRepresentation())
1100 RetAttrs.addAttribute(llvm::Attribute::ZExt);
1101 // FALL THROUGH
1102 case ABIArgInfo::Direct:
1103 if (RetAI.getInReg())
1104 RetAttrs.addAttribute(llvm::Attribute::InReg);
1105 break;
1106 case ABIArgInfo::Ignore:
1107 break;
1108
1109 case ABIArgInfo::Indirect: {
1110 llvm::AttrBuilder SRETAttrs;
1111 SRETAttrs.addAttribute(llvm::Attribute::StructRet);
1112 if (RetAI.getInReg())
1113 SRETAttrs.addAttribute(llvm::Attribute::InReg);
1114 PAL.push_back(llvm::
1115 AttributeSet::get(getLLVMContext(), Index, SRETAttrs));
1116
1117 ++Index;
1118 // sret disables readnone and readonly
1119 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1120 .removeAttribute(llvm::Attribute::ReadNone);
1121 break;
1122 }
1123
1124 case ABIArgInfo::Expand:
1125 llvm_unreachable("Invalid ABI kind for return argument");
1126 }
1127
1128 if (RetAttrs.hasAttributes())
1129 PAL.push_back(llvm::
1130 AttributeSet::get(getLLVMContext(),
1131 llvm::AttributeSet::ReturnIndex,
1132 RetAttrs));
1133
1134 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1135 ie = FI.arg_end(); it != ie; ++it) {
1136 QualType ParamType = it->type;
1137 const ABIArgInfo &AI = it->info;
1138 llvm::AttrBuilder Attrs;
1139
1140 if (AI.getPaddingType()) {
1141 if (AI.getPaddingInReg())
1142 PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index,
1143 llvm::Attribute::InReg));
1144 // Increment Index if there is padding.
1145 ++Index;
1146 }
1147
1148 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
1149 // have the corresponding parameter variable. It doesn't make
1150 // sense to do it here because parameters are so messed up.
1151 switch (AI.getKind()) {
1152 case ABIArgInfo::Extend:
1153 if (ParamType->isSignedIntegerOrEnumerationType())
1154 Attrs.addAttribute(llvm::Attribute::SExt);
1155 else if (ParamType->isUnsignedIntegerOrEnumerationType())
1156 Attrs.addAttribute(llvm::Attribute::ZExt);
1157 // FALL THROUGH
1158 case ABIArgInfo::Direct:
1159 if (AI.getInReg())
1160 Attrs.addAttribute(llvm::Attribute::InReg);
1161
1162 // FIXME: handle sseregparm someday...
1163
1164 if (llvm::StructType *STy =
1165 dyn_cast<llvm::StructType>(AI.getCoerceToType())) {
1166 unsigned Extra = STy->getNumElements()-1; // 1 will be added below.
1167 if (Attrs.hasAttributes())
1168 for (unsigned I = 0; I < Extra; ++I)
1169 PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index + I,
1170 Attrs));
1171 Index += Extra;
1172 }
1173 break;
1174
1175 case ABIArgInfo::Indirect:
1176 if (AI.getInReg())
1177 Attrs.addAttribute(llvm::Attribute::InReg);
1178
1179 if (AI.getIndirectByVal())
1180 Attrs.addAttribute(llvm::Attribute::ByVal);
1181
1182 Attrs.addAlignmentAttr(AI.getIndirectAlign());
1183
1184 // byval disables readnone and readonly.
1185 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1186 .removeAttribute(llvm::Attribute::ReadNone);
1187 break;
1188
1189 case ABIArgInfo::Ignore:
1190 // Skip increment, no matching LLVM parameter.
1191 continue;
1192
1193 case ABIArgInfo::Expand: {
1194 SmallVector<llvm::Type*, 8> types;
1195 // FIXME: This is rather inefficient. Do we ever actually need to do
1196 // anything here? The result should be just reconstructed on the other
1197 // side, so extension should be a non-issue.
1198 getTypes().GetExpandedTypes(ParamType, types);
1199 Index += types.size();
1200 continue;
1201 }
1202 }
1203
1204 if (Attrs.hasAttributes())
1205 PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index, Attrs));
1206 ++Index;
1207 }
1208 if (FuncAttrs.hasAttributes())
1209 PAL.push_back(llvm::
1210 AttributeSet::get(getLLVMContext(),
1211 llvm::AttributeSet::FunctionIndex,
1212 FuncAttrs));
1213 }
1214
1215 /// An argument came in as a promoted argument; demote it back to its
1216 /// declared type.
emitArgumentDemotion(CodeGenFunction & CGF,const VarDecl * var,llvm::Value * value)1217 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
1218 const VarDecl *var,
1219 llvm::Value *value) {
1220 llvm::Type *varType = CGF.ConvertType(var->getType());
1221
1222 // This can happen with promotions that actually don't change the
1223 // underlying type, like the enum promotions.
1224 if (value->getType() == varType) return value;
1225
1226 assert((varType->isIntegerTy() || varType->isFloatingPointTy())
1227 && "unexpected promotion type");
1228
1229 if (isa<llvm::IntegerType>(varType))
1230 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
1231
1232 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
1233 }
1234
EmitFunctionProlog(const CGFunctionInfo & FI,llvm::Function * Fn,const FunctionArgList & Args)1235 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
1236 llvm::Function *Fn,
1237 const FunctionArgList &Args) {
1238 // If this is an implicit-return-zero function, go ahead and
1239 // initialize the return value. TODO: it might be nice to have
1240 // a more general mechanism for this that didn't require synthesized
1241 // return statements.
1242 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
1243 if (FD->hasImplicitReturnZero()) {
1244 QualType RetTy = FD->getResultType().getUnqualifiedType();
1245 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
1246 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
1247 Builder.CreateStore(Zero, ReturnValue);
1248 }
1249 }
1250
1251 // FIXME: We no longer need the types from FunctionArgList; lift up and
1252 // simplify.
1253
1254 // Emit allocs for param decls. Give the LLVM Argument nodes names.
1255 llvm::Function::arg_iterator AI = Fn->arg_begin();
1256
1257 // Name the struct return argument.
1258 if (CGM.ReturnTypeUsesSRet(FI)) {
1259 AI->setName("agg.result");
1260 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1261 AI->getArgNo() + 1,
1262 llvm::Attribute::NoAlias));
1263 ++AI;
1264 }
1265
1266 assert(FI.arg_size() == Args.size() &&
1267 "Mismatch between function signature & arguments.");
1268 unsigned ArgNo = 1;
1269 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
1270 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
1271 i != e; ++i, ++info_it, ++ArgNo) {
1272 const VarDecl *Arg = *i;
1273 QualType Ty = info_it->type;
1274 const ABIArgInfo &ArgI = info_it->info;
1275
1276 bool isPromoted =
1277 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
1278
1279 // Skip the dummy padding argument.
1280 if (ArgI.getPaddingType())
1281 ++AI;
1282
1283 switch (ArgI.getKind()) {
1284 case ABIArgInfo::Indirect: {
1285 llvm::Value *V = AI;
1286
1287 if (!hasScalarEvaluationKind(Ty)) {
1288 // Aggregates and complex variables are accessed by reference. All we
1289 // need to do is realign the value, if requested
1290 if (ArgI.getIndirectRealign()) {
1291 llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce");
1292
1293 // Copy from the incoming argument pointer to the temporary with the
1294 // appropriate alignment.
1295 //
1296 // FIXME: We should have a common utility for generating an aggregate
1297 // copy.
1298 llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
1299 CharUnits Size = getContext().getTypeSizeInChars(Ty);
1300 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
1301 llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy);
1302 Builder.CreateMemCpy(Dst,
1303 Src,
1304 llvm::ConstantInt::get(IntPtrTy,
1305 Size.getQuantity()),
1306 ArgI.getIndirectAlign(),
1307 false);
1308 V = AlignedTemp;
1309 }
1310 } else {
1311 // Load scalar value from indirect argument.
1312 CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
1313 V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty);
1314
1315 if (isPromoted)
1316 V = emitArgumentDemotion(*this, Arg, V);
1317 }
1318 EmitParmDecl(*Arg, V, ArgNo);
1319 break;
1320 }
1321
1322 case ABIArgInfo::Extend:
1323 case ABIArgInfo::Direct: {
1324
1325 // If we have the trivial case, handle it with no muss and fuss.
1326 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
1327 ArgI.getCoerceToType() == ConvertType(Ty) &&
1328 ArgI.getDirectOffset() == 0) {
1329 assert(AI != Fn->arg_end() && "Argument mismatch!");
1330 llvm::Value *V = AI;
1331
1332 if (Arg->getType().isRestrictQualified())
1333 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1334 AI->getArgNo() + 1,
1335 llvm::Attribute::NoAlias));
1336
1337 // Ensure the argument is the correct type.
1338 if (V->getType() != ArgI.getCoerceToType())
1339 V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
1340
1341 if (isPromoted)
1342 V = emitArgumentDemotion(*this, Arg, V);
1343
1344 // Because of merging of function types from multiple decls it is
1345 // possible for the type of an argument to not match the corresponding
1346 // type in the function type. Since we are codegening the callee
1347 // in here, add a cast to the argument type.
1348 llvm::Type *LTy = ConvertType(Arg->getType());
1349 if (V->getType() != LTy)
1350 V = Builder.CreateBitCast(V, LTy);
1351
1352 EmitParmDecl(*Arg, V, ArgNo);
1353 break;
1354 }
1355
1356 llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName());
1357
1358 // The alignment we need to use is the max of the requested alignment for
1359 // the argument plus the alignment required by our access code below.
1360 unsigned AlignmentToUse =
1361 CGM.getDataLayout().getABITypeAlignment(ArgI.getCoerceToType());
1362 AlignmentToUse = std::max(AlignmentToUse,
1363 (unsigned)getContext().getDeclAlign(Arg).getQuantity());
1364
1365 Alloca->setAlignment(AlignmentToUse);
1366 llvm::Value *V = Alloca;
1367 llvm::Value *Ptr = V; // Pointer to store into.
1368
1369 // If the value is offset in memory, apply the offset now.
1370 if (unsigned Offs = ArgI.getDirectOffset()) {
1371 Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy());
1372 Ptr = Builder.CreateConstGEP1_32(Ptr, Offs);
1373 Ptr = Builder.CreateBitCast(Ptr,
1374 llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
1375 }
1376
1377 // If the coerce-to type is a first class aggregate, we flatten it and
1378 // pass the elements. Either way is semantically identical, but fast-isel
1379 // and the optimizer generally likes scalar values better than FCAs.
1380 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
1381 if (STy && STy->getNumElements() > 1) {
1382 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
1383 llvm::Type *DstTy =
1384 cast<llvm::PointerType>(Ptr->getType())->getElementType();
1385 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
1386
1387 if (SrcSize <= DstSize) {
1388 Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
1389
1390 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1391 assert(AI != Fn->arg_end() && "Argument mismatch!");
1392 AI->setName(Arg->getName() + ".coerce" + Twine(i));
1393 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i);
1394 Builder.CreateStore(AI++, EltPtr);
1395 }
1396 } else {
1397 llvm::AllocaInst *TempAlloca =
1398 CreateTempAlloca(ArgI.getCoerceToType(), "coerce");
1399 TempAlloca->setAlignment(AlignmentToUse);
1400 llvm::Value *TempV = TempAlloca;
1401
1402 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1403 assert(AI != Fn->arg_end() && "Argument mismatch!");
1404 AI->setName(Arg->getName() + ".coerce" + Twine(i));
1405 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(TempV, 0, i);
1406 Builder.CreateStore(AI++, EltPtr);
1407 }
1408
1409 Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse);
1410 }
1411 } else {
1412 // Simple case, just do a coerced store of the argument into the alloca.
1413 assert(AI != Fn->arg_end() && "Argument mismatch!");
1414 AI->setName(Arg->getName() + ".coerce");
1415 CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this);
1416 }
1417
1418
1419 // Match to what EmitParmDecl is expecting for this type.
1420 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
1421 V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty);
1422 if (isPromoted)
1423 V = emitArgumentDemotion(*this, Arg, V);
1424 }
1425 EmitParmDecl(*Arg, V, ArgNo);
1426 continue; // Skip ++AI increment, already done.
1427 }
1428
1429 case ABIArgInfo::Expand: {
1430 // If this structure was expanded into multiple arguments then
1431 // we need to create a temporary and reconstruct it from the
1432 // arguments.
1433 llvm::AllocaInst *Alloca = CreateMemTemp(Ty);
1434 CharUnits Align = getContext().getDeclAlign(Arg);
1435 Alloca->setAlignment(Align.getQuantity());
1436 LValue LV = MakeAddrLValue(Alloca, Ty, Align);
1437 llvm::Function::arg_iterator End = ExpandTypeFromArgs(Ty, LV, AI);
1438 EmitParmDecl(*Arg, Alloca, ArgNo);
1439
1440 // Name the arguments used in expansion and increment AI.
1441 unsigned Index = 0;
1442 for (; AI != End; ++AI, ++Index)
1443 AI->setName(Arg->getName() + "." + Twine(Index));
1444 continue;
1445 }
1446
1447 case ABIArgInfo::Ignore:
1448 // Initialize the local variable appropriately.
1449 if (!hasScalarEvaluationKind(Ty))
1450 EmitParmDecl(*Arg, CreateMemTemp(Ty), ArgNo);
1451 else
1452 EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())),
1453 ArgNo);
1454
1455 // Skip increment, no matching LLVM parameter.
1456 continue;
1457 }
1458
1459 ++AI;
1460 }
1461 assert(AI == Fn->arg_end() && "Argument mismatch!");
1462 }
1463
eraseUnusedBitCasts(llvm::Instruction * insn)1464 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
1465 while (insn->use_empty()) {
1466 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
1467 if (!bitcast) return;
1468
1469 // This is "safe" because we would have used a ConstantExpr otherwise.
1470 insn = cast<llvm::Instruction>(bitcast->getOperand(0));
1471 bitcast->eraseFromParent();
1472 }
1473 }
1474
1475 /// Try to emit a fused autorelease of a return result.
tryEmitFusedAutoreleaseOfResult(CodeGenFunction & CGF,llvm::Value * result)1476 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
1477 llvm::Value *result) {
1478 // We must be immediately followed the cast.
1479 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
1480 if (BB->empty()) return 0;
1481 if (&BB->back() != result) return 0;
1482
1483 llvm::Type *resultType = result->getType();
1484
1485 // result is in a BasicBlock and is therefore an Instruction.
1486 llvm::Instruction *generator = cast<llvm::Instruction>(result);
1487
1488 SmallVector<llvm::Instruction*,4> insnsToKill;
1489
1490 // Look for:
1491 // %generator = bitcast %type1* %generator2 to %type2*
1492 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
1493 // We would have emitted this as a constant if the operand weren't
1494 // an Instruction.
1495 generator = cast<llvm::Instruction>(bitcast->getOperand(0));
1496
1497 // Require the generator to be immediately followed by the cast.
1498 if (generator->getNextNode() != bitcast)
1499 return 0;
1500
1501 insnsToKill.push_back(bitcast);
1502 }
1503
1504 // Look for:
1505 // %generator = call i8* @objc_retain(i8* %originalResult)
1506 // or
1507 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
1508 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
1509 if (!call) return 0;
1510
1511 bool doRetainAutorelease;
1512
1513 if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) {
1514 doRetainAutorelease = true;
1515 } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints()
1516 .objc_retainAutoreleasedReturnValue) {
1517 doRetainAutorelease = false;
1518
1519 // If we emitted an assembly marker for this call (and the
1520 // ARCEntrypoints field should have been set if so), go looking
1521 // for that call. If we can't find it, we can't do this
1522 // optimization. But it should always be the immediately previous
1523 // instruction, unless we needed bitcasts around the call.
1524 if (CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker) {
1525 llvm::Instruction *prev = call->getPrevNode();
1526 assert(prev);
1527 if (isa<llvm::BitCastInst>(prev)) {
1528 prev = prev->getPrevNode();
1529 assert(prev);
1530 }
1531 assert(isa<llvm::CallInst>(prev));
1532 assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
1533 CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker);
1534 insnsToKill.push_back(prev);
1535 }
1536 } else {
1537 return 0;
1538 }
1539
1540 result = call->getArgOperand(0);
1541 insnsToKill.push_back(call);
1542
1543 // Keep killing bitcasts, for sanity. Note that we no longer care
1544 // about precise ordering as long as there's exactly one use.
1545 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
1546 if (!bitcast->hasOneUse()) break;
1547 insnsToKill.push_back(bitcast);
1548 result = bitcast->getOperand(0);
1549 }
1550
1551 // Delete all the unnecessary instructions, from latest to earliest.
1552 for (SmallVectorImpl<llvm::Instruction*>::iterator
1553 i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i)
1554 (*i)->eraseFromParent();
1555
1556 // Do the fused retain/autorelease if we were asked to.
1557 if (doRetainAutorelease)
1558 result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
1559
1560 // Cast back to the result type.
1561 return CGF.Builder.CreateBitCast(result, resultType);
1562 }
1563
1564 /// If this is a +1 of the value of an immutable 'self', remove it.
tryRemoveRetainOfSelf(CodeGenFunction & CGF,llvm::Value * result)1565 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
1566 llvm::Value *result) {
1567 // This is only applicable to a method with an immutable 'self'.
1568 const ObjCMethodDecl *method =
1569 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
1570 if (!method) return 0;
1571 const VarDecl *self = method->getSelfDecl();
1572 if (!self->getType().isConstQualified()) return 0;
1573
1574 // Look for a retain call.
1575 llvm::CallInst *retainCall =
1576 dyn_cast<llvm::CallInst>(result->stripPointerCasts());
1577 if (!retainCall ||
1578 retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain)
1579 return 0;
1580
1581 // Look for an ordinary load of 'self'.
1582 llvm::Value *retainedValue = retainCall->getArgOperand(0);
1583 llvm::LoadInst *load =
1584 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
1585 if (!load || load->isAtomic() || load->isVolatile() ||
1586 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self))
1587 return 0;
1588
1589 // Okay! Burn it all down. This relies for correctness on the
1590 // assumption that the retain is emitted as part of the return and
1591 // that thereafter everything is used "linearly".
1592 llvm::Type *resultType = result->getType();
1593 eraseUnusedBitCasts(cast<llvm::Instruction>(result));
1594 assert(retainCall->use_empty());
1595 retainCall->eraseFromParent();
1596 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
1597
1598 return CGF.Builder.CreateBitCast(load, resultType);
1599 }
1600
1601 /// Emit an ARC autorelease of the result of a function.
1602 ///
1603 /// \return the value to actually return from the function
emitAutoreleaseOfResult(CodeGenFunction & CGF,llvm::Value * result)1604 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
1605 llvm::Value *result) {
1606 // If we're returning 'self', kill the initial retain. This is a
1607 // heuristic attempt to "encourage correctness" in the really unfortunate
1608 // case where we have a return of self during a dealloc and we desperately
1609 // need to avoid the possible autorelease.
1610 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
1611 return self;
1612
1613 // At -O0, try to emit a fused retain/autorelease.
1614 if (CGF.shouldUseFusedARCCalls())
1615 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
1616 return fused;
1617
1618 return CGF.EmitARCAutoreleaseReturnValue(result);
1619 }
1620
1621 /// Heuristically search for a dominating store to the return-value slot.
findDominatingStoreToReturnValue(CodeGenFunction & CGF)1622 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
1623 // If there are multiple uses of the return-value slot, just check
1624 // for something immediately preceding the IP. Sometimes this can
1625 // happen with how we generate implicit-returns; it can also happen
1626 // with noreturn cleanups.
1627 if (!CGF.ReturnValue->hasOneUse()) {
1628 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
1629 if (IP->empty()) return 0;
1630 llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(&IP->back());
1631 if (!store) return 0;
1632 if (store->getPointerOperand() != CGF.ReturnValue) return 0;
1633 assert(!store->isAtomic() && !store->isVolatile()); // see below
1634 return store;
1635 }
1636
1637 llvm::StoreInst *store =
1638 dyn_cast<llvm::StoreInst>(CGF.ReturnValue->use_back());
1639 if (!store) return 0;
1640
1641 // These aren't actually possible for non-coerced returns, and we
1642 // only care about non-coerced returns on this code path.
1643 assert(!store->isAtomic() && !store->isVolatile());
1644
1645 // Now do a first-and-dirty dominance check: just walk up the
1646 // single-predecessors chain from the current insertion point.
1647 llvm::BasicBlock *StoreBB = store->getParent();
1648 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
1649 while (IP != StoreBB) {
1650 if (!(IP = IP->getSinglePredecessor()))
1651 return 0;
1652 }
1653
1654 // Okay, the store's basic block dominates the insertion point; we
1655 // can do our thing.
1656 return store;
1657 }
1658
EmitFunctionEpilog(const CGFunctionInfo & FI,bool EmitRetDbgLoc)1659 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
1660 bool EmitRetDbgLoc) {
1661 // Functions with no result always return void.
1662 if (ReturnValue == 0) {
1663 Builder.CreateRetVoid();
1664 return;
1665 }
1666
1667 llvm::DebugLoc RetDbgLoc;
1668 llvm::Value *RV = 0;
1669 QualType RetTy = FI.getReturnType();
1670 const ABIArgInfo &RetAI = FI.getReturnInfo();
1671
1672 switch (RetAI.getKind()) {
1673 case ABIArgInfo::Indirect: {
1674 switch (getEvaluationKind(RetTy)) {
1675 case TEK_Complex: {
1676 ComplexPairTy RT =
1677 EmitLoadOfComplex(MakeNaturalAlignAddrLValue(ReturnValue, RetTy));
1678 EmitStoreOfComplex(RT,
1679 MakeNaturalAlignAddrLValue(CurFn->arg_begin(), RetTy),
1680 /*isInit*/ true);
1681 break;
1682 }
1683 case TEK_Aggregate:
1684 // Do nothing; aggregrates get evaluated directly into the destination.
1685 break;
1686 case TEK_Scalar:
1687 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
1688 MakeNaturalAlignAddrLValue(CurFn->arg_begin(), RetTy),
1689 /*isInit*/ true);
1690 break;
1691 }
1692 break;
1693 }
1694
1695 case ABIArgInfo::Extend:
1696 case ABIArgInfo::Direct:
1697 if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
1698 RetAI.getDirectOffset() == 0) {
1699 // The internal return value temp always will have pointer-to-return-type
1700 // type, just do a load.
1701
1702 // If there is a dominating store to ReturnValue, we can elide
1703 // the load, zap the store, and usually zap the alloca.
1704 if (llvm::StoreInst *SI = findDominatingStoreToReturnValue(*this)) {
1705 // Reuse the debug location from the store unless there is
1706 // cleanup code to be emitted between the store and return
1707 // instruction.
1708 if (EmitRetDbgLoc && !AutoreleaseResult)
1709 RetDbgLoc = SI->getDebugLoc();
1710 // Get the stored value and nuke the now-dead store.
1711 RV = SI->getValueOperand();
1712 SI->eraseFromParent();
1713
1714 // If that was the only use of the return value, nuke it as well now.
1715 if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
1716 cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
1717 ReturnValue = 0;
1718 }
1719
1720 // Otherwise, we have to do a simple load.
1721 } else {
1722 RV = Builder.CreateLoad(ReturnValue);
1723 }
1724 } else {
1725 llvm::Value *V = ReturnValue;
1726 // If the value is offset in memory, apply the offset now.
1727 if (unsigned Offs = RetAI.getDirectOffset()) {
1728 V = Builder.CreateBitCast(V, Builder.getInt8PtrTy());
1729 V = Builder.CreateConstGEP1_32(V, Offs);
1730 V = Builder.CreateBitCast(V,
1731 llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
1732 }
1733
1734 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
1735 }
1736
1737 // In ARC, end functions that return a retainable type with a call
1738 // to objc_autoreleaseReturnValue.
1739 if (AutoreleaseResult) {
1740 assert(getLangOpts().ObjCAutoRefCount &&
1741 !FI.isReturnsRetained() &&
1742 RetTy->isObjCRetainableType());
1743 RV = emitAutoreleaseOfResult(*this, RV);
1744 }
1745
1746 break;
1747
1748 case ABIArgInfo::Ignore:
1749 break;
1750
1751 case ABIArgInfo::Expand:
1752 llvm_unreachable("Invalid ABI kind for return argument");
1753 }
1754
1755 llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid();
1756 if (!RetDbgLoc.isUnknown())
1757 Ret->setDebugLoc(RetDbgLoc);
1758 }
1759
EmitDelegateCallArg(CallArgList & args,const VarDecl * param)1760 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
1761 const VarDecl *param) {
1762 // StartFunction converted the ABI-lowered parameter(s) into a
1763 // local alloca. We need to turn that into an r-value suitable
1764 // for EmitCall.
1765 llvm::Value *local = GetAddrOfLocalVar(param);
1766
1767 QualType type = param->getType();
1768
1769 // For the most part, we just need to load the alloca, except:
1770 // 1) aggregate r-values are actually pointers to temporaries, and
1771 // 2) references to non-scalars are pointers directly to the aggregate.
1772 // I don't know why references to scalars are different here.
1773 if (const ReferenceType *ref = type->getAs<ReferenceType>()) {
1774 if (!hasScalarEvaluationKind(ref->getPointeeType()))
1775 return args.add(RValue::getAggregate(local), type);
1776
1777 // Locals which are references to scalars are represented
1778 // with allocas holding the pointer.
1779 return args.add(RValue::get(Builder.CreateLoad(local)), type);
1780 }
1781
1782 args.add(convertTempToRValue(local, type), type);
1783 }
1784
isProvablyNull(llvm::Value * addr)1785 static bool isProvablyNull(llvm::Value *addr) {
1786 return isa<llvm::ConstantPointerNull>(addr);
1787 }
1788
isProvablyNonNull(llvm::Value * addr)1789 static bool isProvablyNonNull(llvm::Value *addr) {
1790 return isa<llvm::AllocaInst>(addr);
1791 }
1792
1793 /// Emit the actual writing-back of a writeback.
emitWriteback(CodeGenFunction & CGF,const CallArgList::Writeback & writeback)1794 static void emitWriteback(CodeGenFunction &CGF,
1795 const CallArgList::Writeback &writeback) {
1796 const LValue &srcLV = writeback.Source;
1797 llvm::Value *srcAddr = srcLV.getAddress();
1798 assert(!isProvablyNull(srcAddr) &&
1799 "shouldn't have writeback for provably null argument");
1800
1801 llvm::BasicBlock *contBB = 0;
1802
1803 // If the argument wasn't provably non-null, we need to null check
1804 // before doing the store.
1805 bool provablyNonNull = isProvablyNonNull(srcAddr);
1806 if (!provablyNonNull) {
1807 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
1808 contBB = CGF.createBasicBlock("icr.done");
1809
1810 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
1811 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
1812 CGF.EmitBlock(writebackBB);
1813 }
1814
1815 // Load the value to writeback.
1816 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
1817
1818 // Cast it back, in case we're writing an id to a Foo* or something.
1819 value = CGF.Builder.CreateBitCast(value,
1820 cast<llvm::PointerType>(srcAddr->getType())->getElementType(),
1821 "icr.writeback-cast");
1822
1823 // Perform the writeback.
1824
1825 // If we have a "to use" value, it's something we need to emit a use
1826 // of. This has to be carefully threaded in: if it's done after the
1827 // release it's potentially undefined behavior (and the optimizer
1828 // will ignore it), and if it happens before the retain then the
1829 // optimizer could move the release there.
1830 if (writeback.ToUse) {
1831 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
1832
1833 // Retain the new value. No need to block-copy here: the block's
1834 // being passed up the stack.
1835 value = CGF.EmitARCRetainNonBlock(value);
1836
1837 // Emit the intrinsic use here.
1838 CGF.EmitARCIntrinsicUse(writeback.ToUse);
1839
1840 // Load the old value (primitively).
1841 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV);
1842
1843 // Put the new value in place (primitively).
1844 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
1845
1846 // Release the old value.
1847 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
1848
1849 // Otherwise, we can just do a normal lvalue store.
1850 } else {
1851 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
1852 }
1853
1854 // Jump to the continuation block.
1855 if (!provablyNonNull)
1856 CGF.EmitBlock(contBB);
1857 }
1858
emitWritebacks(CodeGenFunction & CGF,const CallArgList & args)1859 static void emitWritebacks(CodeGenFunction &CGF,
1860 const CallArgList &args) {
1861 for (CallArgList::writeback_iterator
1862 i = args.writeback_begin(), e = args.writeback_end(); i != e; ++i)
1863 emitWriteback(CGF, *i);
1864 }
1865
deactivateArgCleanupsBeforeCall(CodeGenFunction & CGF,const CallArgList & CallArgs)1866 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
1867 const CallArgList &CallArgs) {
1868 assert(CGF.getTarget().getCXXABI().isArgumentDestroyedByCallee());
1869 ArrayRef<CallArgList::CallArgCleanup> Cleanups =
1870 CallArgs.getCleanupsToDeactivate();
1871 // Iterate in reverse to increase the likelihood of popping the cleanup.
1872 for (ArrayRef<CallArgList::CallArgCleanup>::reverse_iterator
1873 I = Cleanups.rbegin(), E = Cleanups.rend(); I != E; ++I) {
1874 CGF.DeactivateCleanupBlock(I->Cleanup, I->IsActiveIP);
1875 I->IsActiveIP->eraseFromParent();
1876 }
1877 }
1878
maybeGetUnaryAddrOfOperand(const Expr * E)1879 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
1880 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
1881 if (uop->getOpcode() == UO_AddrOf)
1882 return uop->getSubExpr();
1883 return 0;
1884 }
1885
1886 /// Emit an argument that's being passed call-by-writeback. That is,
1887 /// we are passing the address of
emitWritebackArg(CodeGenFunction & CGF,CallArgList & args,const ObjCIndirectCopyRestoreExpr * CRE)1888 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
1889 const ObjCIndirectCopyRestoreExpr *CRE) {
1890 LValue srcLV;
1891
1892 // Make an optimistic effort to emit the address as an l-value.
1893 // This can fail if the the argument expression is more complicated.
1894 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
1895 srcLV = CGF.EmitLValue(lvExpr);
1896
1897 // Otherwise, just emit it as a scalar.
1898 } else {
1899 llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr());
1900
1901 QualType srcAddrType =
1902 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
1903 srcLV = CGF.MakeNaturalAlignAddrLValue(srcAddr, srcAddrType);
1904 }
1905 llvm::Value *srcAddr = srcLV.getAddress();
1906
1907 // The dest and src types don't necessarily match in LLVM terms
1908 // because of the crazy ObjC compatibility rules.
1909
1910 llvm::PointerType *destType =
1911 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
1912
1913 // If the address is a constant null, just pass the appropriate null.
1914 if (isProvablyNull(srcAddr)) {
1915 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
1916 CRE->getType());
1917 return;
1918 }
1919
1920 // Create the temporary.
1921 llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(),
1922 "icr.temp");
1923 // Loading an l-value can introduce a cleanup if the l-value is __weak,
1924 // and that cleanup will be conditional if we can't prove that the l-value
1925 // isn't null, so we need to register a dominating point so that the cleanups
1926 // system will make valid IR.
1927 CodeGenFunction::ConditionalEvaluation condEval(CGF);
1928
1929 // Zero-initialize it if we're not doing a copy-initialization.
1930 bool shouldCopy = CRE->shouldCopy();
1931 if (!shouldCopy) {
1932 llvm::Value *null =
1933 llvm::ConstantPointerNull::get(
1934 cast<llvm::PointerType>(destType->getElementType()));
1935 CGF.Builder.CreateStore(null, temp);
1936 }
1937
1938 llvm::BasicBlock *contBB = 0;
1939 llvm::BasicBlock *originBB = 0;
1940
1941 // If the address is *not* known to be non-null, we need to switch.
1942 llvm::Value *finalArgument;
1943
1944 bool provablyNonNull = isProvablyNonNull(srcAddr);
1945 if (provablyNonNull) {
1946 finalArgument = temp;
1947 } else {
1948 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
1949
1950 finalArgument = CGF.Builder.CreateSelect(isNull,
1951 llvm::ConstantPointerNull::get(destType),
1952 temp, "icr.argument");
1953
1954 // If we need to copy, then the load has to be conditional, which
1955 // means we need control flow.
1956 if (shouldCopy) {
1957 originBB = CGF.Builder.GetInsertBlock();
1958 contBB = CGF.createBasicBlock("icr.cont");
1959 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
1960 CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
1961 CGF.EmitBlock(copyBB);
1962 condEval.begin(CGF);
1963 }
1964 }
1965
1966 llvm::Value *valueToUse = 0;
1967
1968 // Perform a copy if necessary.
1969 if (shouldCopy) {
1970 RValue srcRV = CGF.EmitLoadOfLValue(srcLV);
1971 assert(srcRV.isScalar());
1972
1973 llvm::Value *src = srcRV.getScalarVal();
1974 src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
1975 "icr.cast");
1976
1977 // Use an ordinary store, not a store-to-lvalue.
1978 CGF.Builder.CreateStore(src, temp);
1979
1980 // If optimization is enabled, and the value was held in a
1981 // __strong variable, we need to tell the optimizer that this
1982 // value has to stay alive until we're doing the store back.
1983 // This is because the temporary is effectively unretained,
1984 // and so otherwise we can violate the high-level semantics.
1985 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
1986 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
1987 valueToUse = src;
1988 }
1989 }
1990
1991 // Finish the control flow if we needed it.
1992 if (shouldCopy && !provablyNonNull) {
1993 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
1994 CGF.EmitBlock(contBB);
1995
1996 // Make a phi for the value to intrinsically use.
1997 if (valueToUse) {
1998 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
1999 "icr.to-use");
2000 phiToUse->addIncoming(valueToUse, copyBB);
2001 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
2002 originBB);
2003 valueToUse = phiToUse;
2004 }
2005
2006 condEval.end(CGF);
2007 }
2008
2009 args.addWriteback(srcLV, temp, valueToUse);
2010 args.add(RValue::get(finalArgument), CRE->getType());
2011 }
2012
EmitCallArg(CallArgList & args,const Expr * E,QualType type)2013 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
2014 QualType type) {
2015 if (const ObjCIndirectCopyRestoreExpr *CRE
2016 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
2017 assert(getLangOpts().ObjCAutoRefCount);
2018 assert(getContext().hasSameType(E->getType(), type));
2019 return emitWritebackArg(*this, args, CRE);
2020 }
2021
2022 assert(type->isReferenceType() == E->isGLValue() &&
2023 "reference binding to unmaterialized r-value!");
2024
2025 if (E->isGLValue()) {
2026 assert(E->getObjectKind() == OK_Ordinary);
2027 return args.add(EmitReferenceBindingToExpr(E), type);
2028 }
2029
2030 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
2031
2032 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
2033 // However, we still have to push an EH-only cleanup in case we unwind before
2034 // we make it to the call.
2035 if (HasAggregateEvalKind &&
2036 CGM.getTarget().getCXXABI().isArgumentDestroyedByCallee()) {
2037 const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
2038 if (RD && RD->hasNonTrivialDestructor()) {
2039 AggValueSlot Slot = CreateAggTemp(type, "agg.arg.tmp");
2040 Slot.setExternallyDestructed();
2041 EmitAggExpr(E, Slot);
2042 RValue RV = Slot.asRValue();
2043 args.add(RV, type);
2044
2045 pushDestroy(EHCleanup, RV.getAggregateAddr(), type, destroyCXXObject,
2046 /*useEHCleanupForArray*/ true);
2047 // This unreachable is a temporary marker which will be removed later.
2048 llvm::Instruction *IsActive = Builder.CreateUnreachable();
2049 args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
2050 return;
2051 }
2052 }
2053
2054 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
2055 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
2056 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
2057 assert(L.isSimple());
2058 if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) {
2059 args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
2060 } else {
2061 // We can't represent a misaligned lvalue in the CallArgList, so copy
2062 // to an aligned temporary now.
2063 llvm::Value *tmp = CreateMemTemp(type);
2064 EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile(),
2065 L.getAlignment());
2066 args.add(RValue::getAggregate(tmp), type);
2067 }
2068 return;
2069 }
2070
2071 args.add(EmitAnyExprToTemp(E), type);
2072 }
2073
2074 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
2075 // optimizer it can aggressively ignore unwind edges.
2076 void
AddObjCARCExceptionMetadata(llvm::Instruction * Inst)2077 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
2078 if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
2079 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
2080 Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
2081 CGM.getNoObjCARCExceptionsMetadata());
2082 }
2083
2084 /// Emits a call to the given no-arguments nounwind runtime function.
2085 llvm::CallInst *
EmitNounwindRuntimeCall(llvm::Value * callee,const llvm::Twine & name)2086 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
2087 const llvm::Twine &name) {
2088 return EmitNounwindRuntimeCall(callee, ArrayRef<llvm::Value*>(), name);
2089 }
2090
2091 /// Emits a call to the given nounwind runtime function.
2092 llvm::CallInst *
EmitNounwindRuntimeCall(llvm::Value * callee,ArrayRef<llvm::Value * > args,const llvm::Twine & name)2093 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
2094 ArrayRef<llvm::Value*> args,
2095 const llvm::Twine &name) {
2096 llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
2097 call->setDoesNotThrow();
2098 return call;
2099 }
2100
2101 /// Emits a simple call (never an invoke) to the given no-arguments
2102 /// runtime function.
2103 llvm::CallInst *
EmitRuntimeCall(llvm::Value * callee,const llvm::Twine & name)2104 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
2105 const llvm::Twine &name) {
2106 return EmitRuntimeCall(callee, ArrayRef<llvm::Value*>(), name);
2107 }
2108
2109 /// Emits a simple call (never an invoke) to the given runtime
2110 /// function.
2111 llvm::CallInst *
EmitRuntimeCall(llvm::Value * callee,ArrayRef<llvm::Value * > args,const llvm::Twine & name)2112 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
2113 ArrayRef<llvm::Value*> args,
2114 const llvm::Twine &name) {
2115 llvm::CallInst *call = Builder.CreateCall(callee, args, name);
2116 call->setCallingConv(getRuntimeCC());
2117 return call;
2118 }
2119
2120 /// Emits a call or invoke to the given noreturn runtime function.
EmitNoreturnRuntimeCallOrInvoke(llvm::Value * callee,ArrayRef<llvm::Value * > args)2121 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
2122 ArrayRef<llvm::Value*> args) {
2123 if (getInvokeDest()) {
2124 llvm::InvokeInst *invoke =
2125 Builder.CreateInvoke(callee,
2126 getUnreachableBlock(),
2127 getInvokeDest(),
2128 args);
2129 invoke->setDoesNotReturn();
2130 invoke->setCallingConv(getRuntimeCC());
2131 } else {
2132 llvm::CallInst *call = Builder.CreateCall(callee, args);
2133 call->setDoesNotReturn();
2134 call->setCallingConv(getRuntimeCC());
2135 Builder.CreateUnreachable();
2136 }
2137 }
2138
2139 /// Emits a call or invoke instruction to the given nullary runtime
2140 /// function.
2141 llvm::CallSite
EmitRuntimeCallOrInvoke(llvm::Value * callee,const Twine & name)2142 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
2143 const Twine &name) {
2144 return EmitRuntimeCallOrInvoke(callee, ArrayRef<llvm::Value*>(), name);
2145 }
2146
2147 /// Emits a call or invoke instruction to the given runtime function.
2148 llvm::CallSite
EmitRuntimeCallOrInvoke(llvm::Value * callee,ArrayRef<llvm::Value * > args,const Twine & name)2149 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
2150 ArrayRef<llvm::Value*> args,
2151 const Twine &name) {
2152 llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name);
2153 callSite.setCallingConv(getRuntimeCC());
2154 return callSite;
2155 }
2156
2157 llvm::CallSite
EmitCallOrInvoke(llvm::Value * Callee,const Twine & Name)2158 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
2159 const Twine &Name) {
2160 return EmitCallOrInvoke(Callee, ArrayRef<llvm::Value *>(), Name);
2161 }
2162
2163 /// Emits a call or invoke instruction to the given function, depending
2164 /// on the current state of the EH stack.
2165 llvm::CallSite
EmitCallOrInvoke(llvm::Value * Callee,ArrayRef<llvm::Value * > Args,const Twine & Name)2166 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
2167 ArrayRef<llvm::Value *> Args,
2168 const Twine &Name) {
2169 llvm::BasicBlock *InvokeDest = getInvokeDest();
2170
2171 llvm::Instruction *Inst;
2172 if (!InvokeDest)
2173 Inst = Builder.CreateCall(Callee, Args, Name);
2174 else {
2175 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
2176 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name);
2177 EmitBlock(ContBB);
2178 }
2179
2180 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
2181 // optimizer it can aggressively ignore unwind edges.
2182 if (CGM.getLangOpts().ObjCAutoRefCount)
2183 AddObjCARCExceptionMetadata(Inst);
2184
2185 return Inst;
2186 }
2187
checkArgMatches(llvm::Value * Elt,unsigned & ArgNo,llvm::FunctionType * FTy)2188 static void checkArgMatches(llvm::Value *Elt, unsigned &ArgNo,
2189 llvm::FunctionType *FTy) {
2190 if (ArgNo < FTy->getNumParams())
2191 assert(Elt->getType() == FTy->getParamType(ArgNo));
2192 else
2193 assert(FTy->isVarArg());
2194 ++ArgNo;
2195 }
2196
ExpandTypeToArgs(QualType Ty,RValue RV,SmallVectorImpl<llvm::Value * > & Args,llvm::FunctionType * IRFuncTy)2197 void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
2198 SmallVectorImpl<llvm::Value *> &Args,
2199 llvm::FunctionType *IRFuncTy) {
2200 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
2201 unsigned NumElts = AT->getSize().getZExtValue();
2202 QualType EltTy = AT->getElementType();
2203 llvm::Value *Addr = RV.getAggregateAddr();
2204 for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
2205 llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt);
2206 RValue EltRV = convertTempToRValue(EltAddr, EltTy);
2207 ExpandTypeToArgs(EltTy, EltRV, Args, IRFuncTy);
2208 }
2209 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
2210 RecordDecl *RD = RT->getDecl();
2211 assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
2212 LValue LV = MakeAddrLValue(RV.getAggregateAddr(), Ty);
2213
2214 if (RD->isUnion()) {
2215 const FieldDecl *LargestFD = 0;
2216 CharUnits UnionSize = CharUnits::Zero();
2217
2218 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2219 i != e; ++i) {
2220 const FieldDecl *FD = *i;
2221 assert(!FD->isBitField() &&
2222 "Cannot expand structure with bit-field members.");
2223 CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
2224 if (UnionSize < FieldSize) {
2225 UnionSize = FieldSize;
2226 LargestFD = FD;
2227 }
2228 }
2229 if (LargestFD) {
2230 RValue FldRV = EmitRValueForField(LV, LargestFD);
2231 ExpandTypeToArgs(LargestFD->getType(), FldRV, Args, IRFuncTy);
2232 }
2233 } else {
2234 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2235 i != e; ++i) {
2236 FieldDecl *FD = *i;
2237
2238 RValue FldRV = EmitRValueForField(LV, FD);
2239 ExpandTypeToArgs(FD->getType(), FldRV, Args, IRFuncTy);
2240 }
2241 }
2242 } else if (Ty->isAnyComplexType()) {
2243 ComplexPairTy CV = RV.getComplexVal();
2244 Args.push_back(CV.first);
2245 Args.push_back(CV.second);
2246 } else {
2247 assert(RV.isScalar() &&
2248 "Unexpected non-scalar rvalue during struct expansion.");
2249
2250 // Insert a bitcast as needed.
2251 llvm::Value *V = RV.getScalarVal();
2252 if (Args.size() < IRFuncTy->getNumParams() &&
2253 V->getType() != IRFuncTy->getParamType(Args.size()))
2254 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(Args.size()));
2255
2256 Args.push_back(V);
2257 }
2258 }
2259
2260
EmitCall(const CGFunctionInfo & CallInfo,llvm::Value * Callee,ReturnValueSlot ReturnValue,const CallArgList & CallArgs,const Decl * TargetDecl,llvm::Instruction ** callOrInvoke)2261 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
2262 llvm::Value *Callee,
2263 ReturnValueSlot ReturnValue,
2264 const CallArgList &CallArgs,
2265 const Decl *TargetDecl,
2266 llvm::Instruction **callOrInvoke) {
2267 // FIXME: We no longer need the types from CallArgs; lift up and simplify.
2268 SmallVector<llvm::Value*, 16> Args;
2269
2270 // Handle struct-return functions by passing a pointer to the
2271 // location that we would like to return into.
2272 QualType RetTy = CallInfo.getReturnType();
2273 const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
2274
2275 // IRArgNo - Keep track of the argument number in the callee we're looking at.
2276 unsigned IRArgNo = 0;
2277 llvm::FunctionType *IRFuncTy =
2278 cast<llvm::FunctionType>(
2279 cast<llvm::PointerType>(Callee->getType())->getElementType());
2280
2281 // If the call returns a temporary with struct return, create a temporary
2282 // alloca to hold the result, unless one is given to us.
2283 if (CGM.ReturnTypeUsesSRet(CallInfo)) {
2284 llvm::Value *Value = ReturnValue.getValue();
2285 if (!Value)
2286 Value = CreateMemTemp(RetTy);
2287 Args.push_back(Value);
2288 checkArgMatches(Value, IRArgNo, IRFuncTy);
2289 }
2290
2291 assert(CallInfo.arg_size() == CallArgs.size() &&
2292 "Mismatch between function signature & arguments.");
2293 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
2294 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
2295 I != E; ++I, ++info_it) {
2296 const ABIArgInfo &ArgInfo = info_it->info;
2297 RValue RV = I->RV;
2298
2299 CharUnits TypeAlign = getContext().getTypeAlignInChars(I->Ty);
2300
2301 // Insert a padding argument to ensure proper alignment.
2302 if (llvm::Type *PaddingType = ArgInfo.getPaddingType()) {
2303 Args.push_back(llvm::UndefValue::get(PaddingType));
2304 ++IRArgNo;
2305 }
2306
2307 switch (ArgInfo.getKind()) {
2308 case ABIArgInfo::Indirect: {
2309 if (RV.isScalar() || RV.isComplex()) {
2310 // Make a temporary alloca to pass the argument.
2311 llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
2312 if (ArgInfo.getIndirectAlign() > AI->getAlignment())
2313 AI->setAlignment(ArgInfo.getIndirectAlign());
2314 Args.push_back(AI);
2315
2316 LValue argLV =
2317 MakeAddrLValue(Args.back(), I->Ty, TypeAlign);
2318
2319 if (RV.isScalar())
2320 EmitStoreOfScalar(RV.getScalarVal(), argLV, /*init*/ true);
2321 else
2322 EmitStoreOfComplex(RV.getComplexVal(), argLV, /*init*/ true);
2323
2324 // Validate argument match.
2325 checkArgMatches(AI, IRArgNo, IRFuncTy);
2326 } else {
2327 // We want to avoid creating an unnecessary temporary+copy here;
2328 // however, we need one in three cases:
2329 // 1. If the argument is not byval, and we are required to copy the
2330 // source. (This case doesn't occur on any common architecture.)
2331 // 2. If the argument is byval, RV is not sufficiently aligned, and
2332 // we cannot force it to be sufficiently aligned.
2333 // 3. If the argument is byval, but RV is located in an address space
2334 // different than that of the argument (0).
2335 llvm::Value *Addr = RV.getAggregateAddr();
2336 unsigned Align = ArgInfo.getIndirectAlign();
2337 const llvm::DataLayout *TD = &CGM.getDataLayout();
2338 const unsigned RVAddrSpace = Addr->getType()->getPointerAddressSpace();
2339 const unsigned ArgAddrSpace = (IRArgNo < IRFuncTy->getNumParams() ?
2340 IRFuncTy->getParamType(IRArgNo)->getPointerAddressSpace() : 0);
2341 if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
2342 (ArgInfo.getIndirectByVal() && TypeAlign.getQuantity() < Align &&
2343 llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align) ||
2344 (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) {
2345 // Create an aligned temporary, and copy to it.
2346 llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
2347 if (Align > AI->getAlignment())
2348 AI->setAlignment(Align);
2349 Args.push_back(AI);
2350 EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
2351
2352 // Validate argument match.
2353 checkArgMatches(AI, IRArgNo, IRFuncTy);
2354 } else {
2355 // Skip the extra memcpy call.
2356 Args.push_back(Addr);
2357
2358 // Validate argument match.
2359 checkArgMatches(Addr, IRArgNo, IRFuncTy);
2360 }
2361 }
2362 break;
2363 }
2364
2365 case ABIArgInfo::Ignore:
2366 break;
2367
2368 case ABIArgInfo::Extend:
2369 case ABIArgInfo::Direct: {
2370 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
2371 ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
2372 ArgInfo.getDirectOffset() == 0) {
2373 llvm::Value *V;
2374 if (RV.isScalar())
2375 V = RV.getScalarVal();
2376 else
2377 V = Builder.CreateLoad(RV.getAggregateAddr());
2378
2379 // If the argument doesn't match, perform a bitcast to coerce it. This
2380 // can happen due to trivial type mismatches.
2381 if (IRArgNo < IRFuncTy->getNumParams() &&
2382 V->getType() != IRFuncTy->getParamType(IRArgNo))
2383 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRArgNo));
2384 Args.push_back(V);
2385
2386 checkArgMatches(V, IRArgNo, IRFuncTy);
2387 break;
2388 }
2389
2390 // FIXME: Avoid the conversion through memory if possible.
2391 llvm::Value *SrcPtr;
2392 if (RV.isScalar() || RV.isComplex()) {
2393 SrcPtr = CreateMemTemp(I->Ty, "coerce");
2394 LValue SrcLV = MakeAddrLValue(SrcPtr, I->Ty, TypeAlign);
2395 if (RV.isScalar()) {
2396 EmitStoreOfScalar(RV.getScalarVal(), SrcLV, /*init*/ true);
2397 } else {
2398 EmitStoreOfComplex(RV.getComplexVal(), SrcLV, /*init*/ true);
2399 }
2400 } else
2401 SrcPtr = RV.getAggregateAddr();
2402
2403 // If the value is offset in memory, apply the offset now.
2404 if (unsigned Offs = ArgInfo.getDirectOffset()) {
2405 SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy());
2406 SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs);
2407 SrcPtr = Builder.CreateBitCast(SrcPtr,
2408 llvm::PointerType::getUnqual(ArgInfo.getCoerceToType()));
2409
2410 }
2411
2412 // If the coerce-to type is a first class aggregate, we flatten it and
2413 // pass the elements. Either way is semantically identical, but fast-isel
2414 // and the optimizer generally likes scalar values better than FCAs.
2415 if (llvm::StructType *STy =
2416 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) {
2417 llvm::Type *SrcTy =
2418 cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
2419 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
2420 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
2421
2422 // If the source type is smaller than the destination type of the
2423 // coerce-to logic, copy the source value into a temp alloca the size
2424 // of the destination type to allow loading all of it. The bits past
2425 // the source value are left undef.
2426 if (SrcSize < DstSize) {
2427 llvm::AllocaInst *TempAlloca
2428 = CreateTempAlloca(STy, SrcPtr->getName() + ".coerce");
2429 Builder.CreateMemCpy(TempAlloca, SrcPtr, SrcSize, 0);
2430 SrcPtr = TempAlloca;
2431 } else {
2432 SrcPtr = Builder.CreateBitCast(SrcPtr,
2433 llvm::PointerType::getUnqual(STy));
2434 }
2435
2436 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2437 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i);
2438 llvm::LoadInst *LI = Builder.CreateLoad(EltPtr);
2439 // We don't know what we're loading from.
2440 LI->setAlignment(1);
2441 Args.push_back(LI);
2442
2443 // Validate argument match.
2444 checkArgMatches(LI, IRArgNo, IRFuncTy);
2445 }
2446 } else {
2447 // In the simple case, just pass the coerced loaded value.
2448 Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
2449 *this));
2450
2451 // Validate argument match.
2452 checkArgMatches(Args.back(), IRArgNo, IRFuncTy);
2453 }
2454
2455 break;
2456 }
2457
2458 case ABIArgInfo::Expand:
2459 ExpandTypeToArgs(I->Ty, RV, Args, IRFuncTy);
2460 IRArgNo = Args.size();
2461 break;
2462 }
2463 }
2464
2465 if (!CallArgs.getCleanupsToDeactivate().empty())
2466 deactivateArgCleanupsBeforeCall(*this, CallArgs);
2467
2468 // If the callee is a bitcast of a function to a varargs pointer to function
2469 // type, check to see if we can remove the bitcast. This handles some cases
2470 // with unprototyped functions.
2471 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
2472 if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
2473 llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
2474 llvm::FunctionType *CurFT =
2475 cast<llvm::FunctionType>(CurPT->getElementType());
2476 llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
2477
2478 if (CE->getOpcode() == llvm::Instruction::BitCast &&
2479 ActualFT->getReturnType() == CurFT->getReturnType() &&
2480 ActualFT->getNumParams() == CurFT->getNumParams() &&
2481 ActualFT->getNumParams() == Args.size() &&
2482 (CurFT->isVarArg() || !ActualFT->isVarArg())) {
2483 bool ArgsMatch = true;
2484 for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
2485 if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
2486 ArgsMatch = false;
2487 break;
2488 }
2489
2490 // Strip the cast if we can get away with it. This is a nice cleanup,
2491 // but also allows us to inline the function at -O0 if it is marked
2492 // always_inline.
2493 if (ArgsMatch)
2494 Callee = CalleeF;
2495 }
2496 }
2497
2498 unsigned CallingConv;
2499 CodeGen::AttributeListType AttributeList;
2500 CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList,
2501 CallingConv, true);
2502 llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(),
2503 AttributeList);
2504
2505 llvm::BasicBlock *InvokeDest = 0;
2506 if (!Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex,
2507 llvm::Attribute::NoUnwind))
2508 InvokeDest = getInvokeDest();
2509
2510 llvm::CallSite CS;
2511 if (!InvokeDest) {
2512 CS = Builder.CreateCall(Callee, Args);
2513 } else {
2514 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
2515 CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, Args);
2516 EmitBlock(Cont);
2517 }
2518 if (callOrInvoke)
2519 *callOrInvoke = CS.getInstruction();
2520
2521 CS.setAttributes(Attrs);
2522 CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
2523
2524 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
2525 // optimizer it can aggressively ignore unwind edges.
2526 if (CGM.getLangOpts().ObjCAutoRefCount)
2527 AddObjCARCExceptionMetadata(CS.getInstruction());
2528
2529 // If the call doesn't return, finish the basic block and clear the
2530 // insertion point; this allows the rest of IRgen to discard
2531 // unreachable code.
2532 if (CS.doesNotReturn()) {
2533 Builder.CreateUnreachable();
2534 Builder.ClearInsertionPoint();
2535
2536 // FIXME: For now, emit a dummy basic block because expr emitters in
2537 // generally are not ready to handle emitting expressions at unreachable
2538 // points.
2539 EnsureInsertPoint();
2540
2541 // Return a reasonable RValue.
2542 return GetUndefRValue(RetTy);
2543 }
2544
2545 llvm::Instruction *CI = CS.getInstruction();
2546 if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
2547 CI->setName("call");
2548
2549 // Emit any writebacks immediately. Arguably this should happen
2550 // after any return-value munging.
2551 if (CallArgs.hasWritebacks())
2552 emitWritebacks(*this, CallArgs);
2553
2554 switch (RetAI.getKind()) {
2555 case ABIArgInfo::Indirect:
2556 return convertTempToRValue(Args[0], RetTy);
2557
2558 case ABIArgInfo::Ignore:
2559 // If we are ignoring an argument that had a result, make sure to
2560 // construct the appropriate return value for our caller.
2561 return GetUndefRValue(RetTy);
2562
2563 case ABIArgInfo::Extend:
2564 case ABIArgInfo::Direct: {
2565 llvm::Type *RetIRTy = ConvertType(RetTy);
2566 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
2567 switch (getEvaluationKind(RetTy)) {
2568 case TEK_Complex: {
2569 llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
2570 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
2571 return RValue::getComplex(std::make_pair(Real, Imag));
2572 }
2573 case TEK_Aggregate: {
2574 llvm::Value *DestPtr = ReturnValue.getValue();
2575 bool DestIsVolatile = ReturnValue.isVolatile();
2576
2577 if (!DestPtr) {
2578 DestPtr = CreateMemTemp(RetTy, "agg.tmp");
2579 DestIsVolatile = false;
2580 }
2581 BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false);
2582 return RValue::getAggregate(DestPtr);
2583 }
2584 case TEK_Scalar: {
2585 // If the argument doesn't match, perform a bitcast to coerce it. This
2586 // can happen due to trivial type mismatches.
2587 llvm::Value *V = CI;
2588 if (V->getType() != RetIRTy)
2589 V = Builder.CreateBitCast(V, RetIRTy);
2590 return RValue::get(V);
2591 }
2592 }
2593 llvm_unreachable("bad evaluation kind");
2594 }
2595
2596 llvm::Value *DestPtr = ReturnValue.getValue();
2597 bool DestIsVolatile = ReturnValue.isVolatile();
2598
2599 if (!DestPtr) {
2600 DestPtr = CreateMemTemp(RetTy, "coerce");
2601 DestIsVolatile = false;
2602 }
2603
2604 // If the value is offset in memory, apply the offset now.
2605 llvm::Value *StorePtr = DestPtr;
2606 if (unsigned Offs = RetAI.getDirectOffset()) {
2607 StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
2608 StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs);
2609 StorePtr = Builder.CreateBitCast(StorePtr,
2610 llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
2611 }
2612 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
2613
2614 return convertTempToRValue(DestPtr, RetTy);
2615 }
2616
2617 case ABIArgInfo::Expand:
2618 llvm_unreachable("Invalid ABI kind for return argument");
2619 }
2620
2621 llvm_unreachable("Unhandled ABIArgInfo::Kind");
2622 }
2623
2624 /* VarArg handling */
2625
EmitVAArg(llvm::Value * VAListAddr,QualType Ty)2626 llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
2627 return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
2628 }
2629