1 //===--- CGCall.cpp - Encapsulate calling convention details ----*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "CGCall.h"
16 #include "ABIInfo.h"
17 #include "CGCXXABI.h"
18 #include "CodeGenFunction.h"
19 #include "CodeGenModule.h"
20 #include "TargetInfo.h"
21 #include "clang/AST/Decl.h"
22 #include "clang/AST/DeclCXX.h"
23 #include "clang/AST/DeclObjC.h"
24 #include "clang/Basic/TargetInfo.h"
25 #include "clang/Frontend/CodeGenOptions.h"
26 #include "llvm/ADT/StringExtras.h"
27 #include "llvm/IR/Attributes.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/InlineAsm.h"
30 #include "llvm/MC/SubtargetFeature.h"
31 #include "llvm/Support/CallSite.h"
32 #include "llvm/Transforms/Utils/Local.h"
33 using namespace clang;
34 using namespace CodeGen;
35
36 /***/
37
ClangCallConvToLLVMCallConv(CallingConv CC)38 static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
39 switch (CC) {
40 default: return llvm::CallingConv::C;
41 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
42 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
43 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
44 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
45 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
46 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
47 // TODO: add support for CC_X86Pascal to llvm
48 }
49 }
50
51 /// Derives the 'this' type for codegen purposes, i.e. ignoring method
52 /// qualification.
53 /// FIXME: address space qualification?
GetThisType(ASTContext & Context,const CXXRecordDecl * RD)54 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
55 QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
56 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
57 }
58
59 /// Returns the canonical formal type of the given C++ method.
GetFormalType(const CXXMethodDecl * MD)60 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
61 return MD->getType()->getCanonicalTypeUnqualified()
62 .getAs<FunctionProtoType>();
63 }
64
65 /// Returns the "extra-canonicalized" return type, which discards
66 /// qualifiers on the return type. Codegen doesn't care about them,
67 /// and it makes ABI code a little easier to be able to assume that
68 /// all parameter and return types are top-level unqualified.
GetReturnType(QualType RetTy)69 static CanQualType GetReturnType(QualType RetTy) {
70 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
71 }
72
73 /// Arrange the argument and result information for a value of the given
74 /// unprototyped freestanding function type.
75 const CGFunctionInfo &
arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP)76 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
77 // When translating an unprototyped function type, always use a
78 // variadic type.
79 return arrangeLLVMFunctionInfo(FTNP->getResultType().getUnqualifiedType(),
80 ArrayRef<CanQualType>(),
81 FTNP->getExtInfo(),
82 RequiredArgs(0));
83 }
84
85 /// Arrange the LLVM function layout for a value of the given function
86 /// type, on top of any implicit parameters already stored. Use the
87 /// given ExtInfo instead of the ExtInfo from the function type.
arrangeLLVMFunctionInfo(CodeGenTypes & CGT,SmallVectorImpl<CanQualType> & prefix,CanQual<FunctionProtoType> FTP,FunctionType::ExtInfo extInfo)88 static const CGFunctionInfo &arrangeLLVMFunctionInfo(CodeGenTypes &CGT,
89 SmallVectorImpl<CanQualType> &prefix,
90 CanQual<FunctionProtoType> FTP,
91 FunctionType::ExtInfo extInfo) {
92 RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
93 // FIXME: Kill copy.
94 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
95 prefix.push_back(FTP->getArgType(i));
96 CanQualType resultType = FTP->getResultType().getUnqualifiedType();
97 return CGT.arrangeLLVMFunctionInfo(resultType, prefix, extInfo, required);
98 }
99
100 /// Arrange the argument and result information for a free function (i.e.
101 /// not a C++ or ObjC instance method) of the given type.
arrangeFreeFunctionType(CodeGenTypes & CGT,SmallVectorImpl<CanQualType> & prefix,CanQual<FunctionProtoType> FTP)102 static const CGFunctionInfo &arrangeFreeFunctionType(CodeGenTypes &CGT,
103 SmallVectorImpl<CanQualType> &prefix,
104 CanQual<FunctionProtoType> FTP) {
105 return arrangeLLVMFunctionInfo(CGT, prefix, FTP, FTP->getExtInfo());
106 }
107
108 /// Given the formal ext-info of a C++ instance method, adjust it
109 /// according to the C++ ABI in effect.
adjustCXXMethodInfo(CodeGenTypes & CGT,FunctionType::ExtInfo & extInfo,bool isVariadic)110 static void adjustCXXMethodInfo(CodeGenTypes &CGT,
111 FunctionType::ExtInfo &extInfo,
112 bool isVariadic) {
113 if (extInfo.getCC() == CC_Default) {
114 CallingConv CC = CGT.getContext().getDefaultCXXMethodCallConv(isVariadic);
115 extInfo = extInfo.withCallingConv(CC);
116 }
117 }
118
119 /// Arrange the argument and result information for a free function (i.e.
120 /// not a C++ or ObjC instance method) of the given type.
arrangeCXXMethodType(CodeGenTypes & CGT,SmallVectorImpl<CanQualType> & prefix,CanQual<FunctionProtoType> FTP)121 static const CGFunctionInfo &arrangeCXXMethodType(CodeGenTypes &CGT,
122 SmallVectorImpl<CanQualType> &prefix,
123 CanQual<FunctionProtoType> FTP) {
124 FunctionType::ExtInfo extInfo = FTP->getExtInfo();
125 adjustCXXMethodInfo(CGT, extInfo, FTP->isVariadic());
126 return arrangeLLVMFunctionInfo(CGT, prefix, FTP, extInfo);
127 }
128
129 /// Arrange the argument and result information for a value of the
130 /// given freestanding function type.
131 const CGFunctionInfo &
arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP)132 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
133 SmallVector<CanQualType, 16> argTypes;
134 return ::arrangeFreeFunctionType(*this, argTypes, FTP);
135 }
136
getCallingConventionForDecl(const Decl * D)137 static CallingConv getCallingConventionForDecl(const Decl *D) {
138 // Set the appropriate calling convention for the Function.
139 if (D->hasAttr<StdCallAttr>())
140 return CC_X86StdCall;
141
142 if (D->hasAttr<FastCallAttr>())
143 return CC_X86FastCall;
144
145 if (D->hasAttr<ThisCallAttr>())
146 return CC_X86ThisCall;
147
148 if (D->hasAttr<PascalAttr>())
149 return CC_X86Pascal;
150
151 if (PcsAttr *PCS = D->getAttr<PcsAttr>())
152 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
153
154 if (D->hasAttr<PnaclCallAttr>())
155 return CC_PnaclCall;
156
157 if (D->hasAttr<IntelOclBiccAttr>())
158 return CC_IntelOclBicc;
159
160 return CC_C;
161 }
162
163 /// Arrange the argument and result information for a call to an
164 /// unknown C++ non-static member function of the given abstract type.
165 /// The member function must be an ordinary function, i.e. not a
166 /// constructor or destructor.
167 const CGFunctionInfo &
arrangeCXXMethodType(const CXXRecordDecl * RD,const FunctionProtoType * FTP)168 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
169 const FunctionProtoType *FTP) {
170 SmallVector<CanQualType, 16> argTypes;
171
172 // Add the 'this' pointer.
173 argTypes.push_back(GetThisType(Context, RD));
174
175 return ::arrangeCXXMethodType(*this, argTypes,
176 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
177 }
178
179 /// Arrange the argument and result information for a declaration or
180 /// definition of the given C++ non-static member function. The
181 /// member function must be an ordinary function, i.e. not a
182 /// constructor or destructor.
183 const CGFunctionInfo &
arrangeCXXMethodDeclaration(const CXXMethodDecl * MD)184 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
185 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for contructors!");
186 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
187
188 CanQual<FunctionProtoType> prototype = GetFormalType(MD);
189
190 if (MD->isInstance()) {
191 // The abstract case is perfectly fine.
192 return arrangeCXXMethodType(MD->getParent(), prototype.getTypePtr());
193 }
194
195 return arrangeFreeFunctionType(prototype);
196 }
197
198 /// Arrange the argument and result information for a declaration
199 /// or definition to the given constructor variant.
200 const CGFunctionInfo &
arrangeCXXConstructorDeclaration(const CXXConstructorDecl * D,CXXCtorType ctorKind)201 CodeGenTypes::arrangeCXXConstructorDeclaration(const CXXConstructorDecl *D,
202 CXXCtorType ctorKind) {
203 SmallVector<CanQualType, 16> argTypes;
204 argTypes.push_back(GetThisType(Context, D->getParent()));
205 CanQualType resultType = Context.VoidTy;
206
207 TheCXXABI.BuildConstructorSignature(D, ctorKind, resultType, argTypes);
208
209 CanQual<FunctionProtoType> FTP = GetFormalType(D);
210
211 RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, argTypes.size());
212
213 // Add the formal parameters.
214 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
215 argTypes.push_back(FTP->getArgType(i));
216
217 FunctionType::ExtInfo extInfo = FTP->getExtInfo();
218 adjustCXXMethodInfo(*this, extInfo, FTP->isVariadic());
219 return arrangeLLVMFunctionInfo(resultType, argTypes, extInfo, required);
220 }
221
222 /// Arrange the argument and result information for a declaration,
223 /// definition, or call to the given destructor variant. It so
224 /// happens that all three cases produce the same information.
225 const CGFunctionInfo &
arrangeCXXDestructor(const CXXDestructorDecl * D,CXXDtorType dtorKind)226 CodeGenTypes::arrangeCXXDestructor(const CXXDestructorDecl *D,
227 CXXDtorType dtorKind) {
228 SmallVector<CanQualType, 2> argTypes;
229 argTypes.push_back(GetThisType(Context, D->getParent()));
230 CanQualType resultType = Context.VoidTy;
231
232 TheCXXABI.BuildDestructorSignature(D, dtorKind, resultType, argTypes);
233
234 CanQual<FunctionProtoType> FTP = GetFormalType(D);
235 assert(FTP->getNumArgs() == 0 && "dtor with formal parameters");
236 assert(FTP->isVariadic() == 0 && "dtor with formal parameters");
237
238 FunctionType::ExtInfo extInfo = FTP->getExtInfo();
239 adjustCXXMethodInfo(*this, extInfo, false);
240 return arrangeLLVMFunctionInfo(resultType, argTypes, extInfo,
241 RequiredArgs::All);
242 }
243
244 /// Arrange the argument and result information for the declaration or
245 /// definition of the given function.
246 const CGFunctionInfo &
arrangeFunctionDeclaration(const FunctionDecl * FD)247 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
248 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
249 if (MD->isInstance())
250 return arrangeCXXMethodDeclaration(MD);
251
252 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
253
254 assert(isa<FunctionType>(FTy));
255
256 // When declaring a function without a prototype, always use a
257 // non-variadic type.
258 if (isa<FunctionNoProtoType>(FTy)) {
259 CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>();
260 return arrangeLLVMFunctionInfo(noProto->getResultType(),
261 ArrayRef<CanQualType>(),
262 noProto->getExtInfo(),
263 RequiredArgs::All);
264 }
265
266 assert(isa<FunctionProtoType>(FTy));
267 return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>());
268 }
269
270 /// Arrange the argument and result information for the declaration or
271 /// definition of an Objective-C method.
272 const CGFunctionInfo &
arrangeObjCMethodDeclaration(const ObjCMethodDecl * MD)273 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
274 // It happens that this is the same as a call with no optional
275 // arguments, except also using the formal 'self' type.
276 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
277 }
278
279 /// Arrange the argument and result information for the function type
280 /// through which to perform a send to the given Objective-C method,
281 /// using the given receiver type. The receiver type is not always
282 /// the 'self' type of the method or even an Objective-C pointer type.
283 /// This is *not* the right method for actually performing such a
284 /// message send, due to the possibility of optional arguments.
285 const CGFunctionInfo &
arrangeObjCMessageSendSignature(const ObjCMethodDecl * MD,QualType receiverType)286 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
287 QualType receiverType) {
288 SmallVector<CanQualType, 16> argTys;
289 argTys.push_back(Context.getCanonicalParamType(receiverType));
290 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
291 // FIXME: Kill copy?
292 for (ObjCMethodDecl::param_const_iterator i = MD->param_begin(),
293 e = MD->param_end(); i != e; ++i) {
294 argTys.push_back(Context.getCanonicalParamType((*i)->getType()));
295 }
296
297 FunctionType::ExtInfo einfo;
298 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD));
299
300 if (getContext().getLangOpts().ObjCAutoRefCount &&
301 MD->hasAttr<NSReturnsRetainedAttr>())
302 einfo = einfo.withProducesResult(true);
303
304 RequiredArgs required =
305 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
306
307 return arrangeLLVMFunctionInfo(GetReturnType(MD->getResultType()), argTys,
308 einfo, required);
309 }
310
311 const CGFunctionInfo &
arrangeGlobalDeclaration(GlobalDecl GD)312 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
313 // FIXME: Do we need to handle ObjCMethodDecl?
314 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
315
316 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
317 return arrangeCXXConstructorDeclaration(CD, GD.getCtorType());
318
319 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
320 return arrangeCXXDestructor(DD, GD.getDtorType());
321
322 return arrangeFunctionDeclaration(FD);
323 }
324
325 /// Arrange a call as unto a free function, except possibly with an
326 /// additional number of formal parameters considered required.
327 static const CGFunctionInfo &
arrangeFreeFunctionLikeCall(CodeGenTypes & CGT,const CallArgList & args,const FunctionType * fnType,unsigned numExtraRequiredArgs)328 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
329 const CallArgList &args,
330 const FunctionType *fnType,
331 unsigned numExtraRequiredArgs) {
332 assert(args.size() >= numExtraRequiredArgs);
333
334 // In most cases, there are no optional arguments.
335 RequiredArgs required = RequiredArgs::All;
336
337 // If we have a variadic prototype, the required arguments are the
338 // extra prefix plus the arguments in the prototype.
339 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
340 if (proto->isVariadic())
341 required = RequiredArgs(proto->getNumArgs() + numExtraRequiredArgs);
342
343 // If we don't have a prototype at all, but we're supposed to
344 // explicitly use the variadic convention for unprototyped calls,
345 // treat all of the arguments as required but preserve the nominal
346 // possibility of variadics.
347 } else if (CGT.CGM.getTargetCodeGenInfo()
348 .isNoProtoCallVariadic(args, cast<FunctionNoProtoType>(fnType))) {
349 required = RequiredArgs(args.size());
350 }
351
352 return CGT.arrangeFreeFunctionCall(fnType->getResultType(), args,
353 fnType->getExtInfo(), required);
354 }
355
356 /// Figure out the rules for calling a function with the given formal
357 /// type using the given arguments. The arguments are necessary
358 /// because the function might be unprototyped, in which case it's
359 /// target-dependent in crazy ways.
360 const CGFunctionInfo &
arrangeFreeFunctionCall(const CallArgList & args,const FunctionType * fnType)361 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
362 const FunctionType *fnType) {
363 return arrangeFreeFunctionLikeCall(*this, args, fnType, 0);
364 }
365
366 /// A block function call is essentially a free-function call with an
367 /// extra implicit argument.
368 const CGFunctionInfo &
arrangeBlockFunctionCall(const CallArgList & args,const FunctionType * fnType)369 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
370 const FunctionType *fnType) {
371 return arrangeFreeFunctionLikeCall(*this, args, fnType, 1);
372 }
373
374 const CGFunctionInfo &
arrangeFreeFunctionCall(QualType resultType,const CallArgList & args,FunctionType::ExtInfo info,RequiredArgs required)375 CodeGenTypes::arrangeFreeFunctionCall(QualType resultType,
376 const CallArgList &args,
377 FunctionType::ExtInfo info,
378 RequiredArgs required) {
379 // FIXME: Kill copy.
380 SmallVector<CanQualType, 16> argTypes;
381 for (CallArgList::const_iterator i = args.begin(), e = args.end();
382 i != e; ++i)
383 argTypes.push_back(Context.getCanonicalParamType(i->Ty));
384 return arrangeLLVMFunctionInfo(GetReturnType(resultType), argTypes, info,
385 required);
386 }
387
388 /// Arrange a call to a C++ method, passing the given arguments.
389 const CGFunctionInfo &
arrangeCXXMethodCall(const CallArgList & args,const FunctionProtoType * FPT,RequiredArgs required)390 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
391 const FunctionProtoType *FPT,
392 RequiredArgs required) {
393 // FIXME: Kill copy.
394 SmallVector<CanQualType, 16> argTypes;
395 for (CallArgList::const_iterator i = args.begin(), e = args.end();
396 i != e; ++i)
397 argTypes.push_back(Context.getCanonicalParamType(i->Ty));
398
399 FunctionType::ExtInfo info = FPT->getExtInfo();
400 adjustCXXMethodInfo(*this, info, FPT->isVariadic());
401 return arrangeLLVMFunctionInfo(GetReturnType(FPT->getResultType()),
402 argTypes, info, required);
403 }
404
405 const CGFunctionInfo &
arrangeFunctionDeclaration(QualType resultType,const FunctionArgList & args,const FunctionType::ExtInfo & info,bool isVariadic)406 CodeGenTypes::arrangeFunctionDeclaration(QualType resultType,
407 const FunctionArgList &args,
408 const FunctionType::ExtInfo &info,
409 bool isVariadic) {
410 // FIXME: Kill copy.
411 SmallVector<CanQualType, 16> argTypes;
412 for (FunctionArgList::const_iterator i = args.begin(), e = args.end();
413 i != e; ++i)
414 argTypes.push_back(Context.getCanonicalParamType((*i)->getType()));
415
416 RequiredArgs required =
417 (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All);
418 return arrangeLLVMFunctionInfo(GetReturnType(resultType), argTypes, info,
419 required);
420 }
421
arrangeNullaryFunction()422 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
423 return arrangeLLVMFunctionInfo(getContext().VoidTy, ArrayRef<CanQualType>(),
424 FunctionType::ExtInfo(), RequiredArgs::All);
425 }
426
427 /// Arrange the argument and result information for an abstract value
428 /// of a given function type. This is the method which all of the
429 /// above functions ultimately defer to.
430 const CGFunctionInfo &
arrangeLLVMFunctionInfo(CanQualType resultType,ArrayRef<CanQualType> argTypes,FunctionType::ExtInfo info,RequiredArgs required)431 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
432 ArrayRef<CanQualType> argTypes,
433 FunctionType::ExtInfo info,
434 RequiredArgs required) {
435 #ifndef NDEBUG
436 for (ArrayRef<CanQualType>::const_iterator
437 I = argTypes.begin(), E = argTypes.end(); I != E; ++I)
438 assert(I->isCanonicalAsParam());
439 #endif
440
441 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
442
443 // Lookup or create unique function info.
444 llvm::FoldingSetNodeID ID;
445 CGFunctionInfo::Profile(ID, info, required, resultType, argTypes);
446
447 void *insertPos = 0;
448 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
449 if (FI)
450 return *FI;
451
452 // Construct the function info. We co-allocate the ArgInfos.
453 FI = CGFunctionInfo::create(CC, info, resultType, argTypes, required);
454 FunctionInfos.InsertNode(FI, insertPos);
455
456 bool inserted = FunctionsBeingProcessed.insert(FI); (void)inserted;
457 assert(inserted && "Recursively being processed?");
458
459 // Compute ABI information.
460 getABIInfo().computeInfo(*FI);
461
462 // Loop over all of the computed argument and return value info. If any of
463 // them are direct or extend without a specified coerce type, specify the
464 // default now.
465 ABIArgInfo &retInfo = FI->getReturnInfo();
466 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == 0)
467 retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
468
469 for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end();
470 I != E; ++I)
471 if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0)
472 I->info.setCoerceToType(ConvertType(I->type));
473
474 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
475 assert(erased && "Not in set?");
476
477 return *FI;
478 }
479
create(unsigned llvmCC,const FunctionType::ExtInfo & info,CanQualType resultType,ArrayRef<CanQualType> argTypes,RequiredArgs required)480 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
481 const FunctionType::ExtInfo &info,
482 CanQualType resultType,
483 ArrayRef<CanQualType> argTypes,
484 RequiredArgs required) {
485 void *buffer = operator new(sizeof(CGFunctionInfo) +
486 sizeof(ArgInfo) * (argTypes.size() + 1));
487 CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
488 FI->CallingConvention = llvmCC;
489 FI->EffectiveCallingConvention = llvmCC;
490 FI->ASTCallingConvention = info.getCC();
491 FI->NoReturn = info.getNoReturn();
492 FI->ReturnsRetained = info.getProducesResult();
493 FI->Required = required;
494 FI->HasRegParm = info.getHasRegParm();
495 FI->RegParm = info.getRegParm();
496 FI->NumArgs = argTypes.size();
497 FI->getArgsBuffer()[0].type = resultType;
498 for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
499 FI->getArgsBuffer()[i + 1].type = argTypes[i];
500 return FI;
501 }
502
503 /***/
504
GetExpandedTypes(QualType type,SmallVectorImpl<llvm::Type * > & expandedTypes)505 void CodeGenTypes::GetExpandedTypes(QualType type,
506 SmallVectorImpl<llvm::Type*> &expandedTypes) {
507 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(type)) {
508 uint64_t NumElts = AT->getSize().getZExtValue();
509 for (uint64_t Elt = 0; Elt < NumElts; ++Elt)
510 GetExpandedTypes(AT->getElementType(), expandedTypes);
511 } else if (const RecordType *RT = type->getAs<RecordType>()) {
512 const RecordDecl *RD = RT->getDecl();
513 assert(!RD->hasFlexibleArrayMember() &&
514 "Cannot expand structure with flexible array.");
515 if (RD->isUnion()) {
516 // Unions can be here only in degenerative cases - all the fields are same
517 // after flattening. Thus we have to use the "largest" field.
518 const FieldDecl *LargestFD = 0;
519 CharUnits UnionSize = CharUnits::Zero();
520
521 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
522 i != e; ++i) {
523 const FieldDecl *FD = *i;
524 assert(!FD->isBitField() &&
525 "Cannot expand structure with bit-field members.");
526 CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
527 if (UnionSize < FieldSize) {
528 UnionSize = FieldSize;
529 LargestFD = FD;
530 }
531 }
532 if (LargestFD)
533 GetExpandedTypes(LargestFD->getType(), expandedTypes);
534 } else {
535 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
536 i != e; ++i) {
537 assert(!i->isBitField() &&
538 "Cannot expand structure with bit-field members.");
539 GetExpandedTypes(i->getType(), expandedTypes);
540 }
541 }
542 } else if (const ComplexType *CT = type->getAs<ComplexType>()) {
543 llvm::Type *EltTy = ConvertType(CT->getElementType());
544 expandedTypes.push_back(EltTy);
545 expandedTypes.push_back(EltTy);
546 } else
547 expandedTypes.push_back(ConvertType(type));
548 }
549
550 llvm::Function::arg_iterator
ExpandTypeFromArgs(QualType Ty,LValue LV,llvm::Function::arg_iterator AI)551 CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
552 llvm::Function::arg_iterator AI) {
553 assert(LV.isSimple() &&
554 "Unexpected non-simple lvalue during struct expansion.");
555
556 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
557 unsigned NumElts = AT->getSize().getZExtValue();
558 QualType EltTy = AT->getElementType();
559 for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
560 llvm::Value *EltAddr = Builder.CreateConstGEP2_32(LV.getAddress(), 0, Elt);
561 LValue LV = MakeAddrLValue(EltAddr, EltTy);
562 AI = ExpandTypeFromArgs(EltTy, LV, AI);
563 }
564 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
565 RecordDecl *RD = RT->getDecl();
566 if (RD->isUnion()) {
567 // Unions can be here only in degenerative cases - all the fields are same
568 // after flattening. Thus we have to use the "largest" field.
569 const FieldDecl *LargestFD = 0;
570 CharUnits UnionSize = CharUnits::Zero();
571
572 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
573 i != e; ++i) {
574 const FieldDecl *FD = *i;
575 assert(!FD->isBitField() &&
576 "Cannot expand structure with bit-field members.");
577 CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
578 if (UnionSize < FieldSize) {
579 UnionSize = FieldSize;
580 LargestFD = FD;
581 }
582 }
583 if (LargestFD) {
584 // FIXME: What are the right qualifiers here?
585 LValue SubLV = EmitLValueForField(LV, LargestFD);
586 AI = ExpandTypeFromArgs(LargestFD->getType(), SubLV, AI);
587 }
588 } else {
589 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
590 i != e; ++i) {
591 FieldDecl *FD = *i;
592 QualType FT = FD->getType();
593
594 // FIXME: What are the right qualifiers here?
595 LValue SubLV = EmitLValueForField(LV, FD);
596 AI = ExpandTypeFromArgs(FT, SubLV, AI);
597 }
598 }
599 } else if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
600 QualType EltTy = CT->getElementType();
601 llvm::Value *RealAddr = Builder.CreateStructGEP(LV.getAddress(), 0, "real");
602 EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(RealAddr, EltTy));
603 llvm::Value *ImagAddr = Builder.CreateStructGEP(LV.getAddress(), 1, "imag");
604 EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(ImagAddr, EltTy));
605 } else {
606 EmitStoreThroughLValue(RValue::get(AI), LV);
607 ++AI;
608 }
609
610 return AI;
611 }
612
613 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
614 /// accessing some number of bytes out of it, try to gep into the struct to get
615 /// at its inner goodness. Dive as deep as possible without entering an element
616 /// with an in-memory size smaller than DstSize.
617 static llvm::Value *
EnterStructPointerForCoercedAccess(llvm::Value * SrcPtr,llvm::StructType * SrcSTy,uint64_t DstSize,CodeGenFunction & CGF)618 EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
619 llvm::StructType *SrcSTy,
620 uint64_t DstSize, CodeGenFunction &CGF) {
621 // We can't dive into a zero-element struct.
622 if (SrcSTy->getNumElements() == 0) return SrcPtr;
623
624 llvm::Type *FirstElt = SrcSTy->getElementType(0);
625
626 // If the first elt is at least as large as what we're looking for, or if the
627 // first element is the same size as the whole struct, we can enter it.
628 uint64_t FirstEltSize =
629 CGF.CGM.getDataLayout().getTypeAllocSize(FirstElt);
630 if (FirstEltSize < DstSize &&
631 FirstEltSize < CGF.CGM.getDataLayout().getTypeAllocSize(SrcSTy))
632 return SrcPtr;
633
634 // GEP into the first element.
635 SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive");
636
637 // If the first element is a struct, recurse.
638 llvm::Type *SrcTy =
639 cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
640 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
641 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
642
643 return SrcPtr;
644 }
645
646 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
647 /// are either integers or pointers. This does a truncation of the value if it
648 /// is too large or a zero extension if it is too small.
CoerceIntOrPtrToIntOrPtr(llvm::Value * Val,llvm::Type * Ty,CodeGenFunction & CGF)649 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
650 llvm::Type *Ty,
651 CodeGenFunction &CGF) {
652 if (Val->getType() == Ty)
653 return Val;
654
655 if (isa<llvm::PointerType>(Val->getType())) {
656 // If this is Pointer->Pointer avoid conversion to and from int.
657 if (isa<llvm::PointerType>(Ty))
658 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
659
660 // Convert the pointer to an integer so we can play with its width.
661 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
662 }
663
664 llvm::Type *DestIntTy = Ty;
665 if (isa<llvm::PointerType>(DestIntTy))
666 DestIntTy = CGF.IntPtrTy;
667
668 if (Val->getType() != DestIntTy)
669 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
670
671 if (isa<llvm::PointerType>(Ty))
672 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
673 return Val;
674 }
675
676
677
678 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
679 /// a pointer to an object of type \arg Ty.
680 ///
681 /// This safely handles the case when the src type is smaller than the
682 /// destination type; in this situation the values of bits which not
683 /// present in the src are undefined.
CreateCoercedLoad(llvm::Value * SrcPtr,llvm::Type * Ty,CodeGenFunction & CGF)684 static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
685 llvm::Type *Ty,
686 CodeGenFunction &CGF) {
687 llvm::Type *SrcTy =
688 cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
689
690 // If SrcTy and Ty are the same, just do a load.
691 if (SrcTy == Ty)
692 return CGF.Builder.CreateLoad(SrcPtr);
693
694 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
695
696 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
697 SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
698 SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
699 }
700
701 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
702
703 // If the source and destination are integer or pointer types, just do an
704 // extension or truncation to the desired type.
705 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
706 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
707 llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr);
708 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
709 }
710
711 // If load is legal, just bitcast the src pointer.
712 if (SrcSize >= DstSize) {
713 // Generally SrcSize is never greater than DstSize, since this means we are
714 // losing bits. However, this can happen in cases where the structure has
715 // additional padding, for example due to a user specified alignment.
716 //
717 // FIXME: Assert that we aren't truncating non-padding bits when have access
718 // to that information.
719 llvm::Value *Casted =
720 CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
721 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
722 // FIXME: Use better alignment / avoid requiring aligned load.
723 Load->setAlignment(1);
724 return Load;
725 }
726
727 // Otherwise do coercion through memory. This is stupid, but
728 // simple.
729 llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
730 llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy();
731 llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy);
732 llvm::Value *SrcCasted = CGF.Builder.CreateBitCast(SrcPtr, I8PtrTy);
733 // FIXME: Use better alignment.
734 CGF.Builder.CreateMemCpy(Casted, SrcCasted,
735 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
736 1, false);
737 return CGF.Builder.CreateLoad(Tmp);
738 }
739
740 // Function to store a first-class aggregate into memory. We prefer to
741 // store the elements rather than the aggregate to be more friendly to
742 // fast-isel.
743 // FIXME: Do we need to recurse here?
BuildAggStore(CodeGenFunction & CGF,llvm::Value * Val,llvm::Value * DestPtr,bool DestIsVolatile,bool LowAlignment)744 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
745 llvm::Value *DestPtr, bool DestIsVolatile,
746 bool LowAlignment) {
747 // Prefer scalar stores to first-class aggregate stores.
748 if (llvm::StructType *STy =
749 dyn_cast<llvm::StructType>(Val->getType())) {
750 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
751 llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(DestPtr, 0, i);
752 llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
753 llvm::StoreInst *SI = CGF.Builder.CreateStore(Elt, EltPtr,
754 DestIsVolatile);
755 if (LowAlignment)
756 SI->setAlignment(1);
757 }
758 } else {
759 llvm::StoreInst *SI = CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile);
760 if (LowAlignment)
761 SI->setAlignment(1);
762 }
763 }
764
765 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
766 /// where the source and destination may have different types.
767 ///
768 /// This safely handles the case when the src type is larger than the
769 /// destination type; the upper bits of the src will be lost.
CreateCoercedStore(llvm::Value * Src,llvm::Value * DstPtr,bool DstIsVolatile,CodeGenFunction & CGF)770 static void CreateCoercedStore(llvm::Value *Src,
771 llvm::Value *DstPtr,
772 bool DstIsVolatile,
773 CodeGenFunction &CGF) {
774 llvm::Type *SrcTy = Src->getType();
775 llvm::Type *DstTy =
776 cast<llvm::PointerType>(DstPtr->getType())->getElementType();
777 if (SrcTy == DstTy) {
778 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
779 return;
780 }
781
782 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
783
784 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
785 DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
786 DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
787 }
788
789 // If the source and destination are integer or pointer types, just do an
790 // extension or truncation to the desired type.
791 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
792 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
793 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
794 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
795 return;
796 }
797
798 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
799
800 // If store is legal, just bitcast the src pointer.
801 if (SrcSize <= DstSize) {
802 llvm::Value *Casted =
803 CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
804 // FIXME: Use better alignment / avoid requiring aligned store.
805 BuildAggStore(CGF, Src, Casted, DstIsVolatile, true);
806 } else {
807 // Otherwise do coercion through memory. This is stupid, but
808 // simple.
809
810 // Generally SrcSize is never greater than DstSize, since this means we are
811 // losing bits. However, this can happen in cases where the structure has
812 // additional padding, for example due to a user specified alignment.
813 //
814 // FIXME: Assert that we aren't truncating non-padding bits when have access
815 // to that information.
816 llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
817 CGF.Builder.CreateStore(Src, Tmp);
818 llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy();
819 llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy);
820 llvm::Value *DstCasted = CGF.Builder.CreateBitCast(DstPtr, I8PtrTy);
821 // FIXME: Use better alignment.
822 CGF.Builder.CreateMemCpy(DstCasted, Casted,
823 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
824 1, false);
825 }
826 }
827
828 /***/
829
ReturnTypeUsesSRet(const CGFunctionInfo & FI)830 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
831 return FI.getReturnInfo().isIndirect();
832 }
833
ReturnTypeUsesFPRet(QualType ResultType)834 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
835 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
836 switch (BT->getKind()) {
837 default:
838 return false;
839 case BuiltinType::Float:
840 return getContext().getTargetInfo().useObjCFPRetForRealType(TargetInfo::Float);
841 case BuiltinType::Double:
842 return getContext().getTargetInfo().useObjCFPRetForRealType(TargetInfo::Double);
843 case BuiltinType::LongDouble:
844 return getContext().getTargetInfo().useObjCFPRetForRealType(
845 TargetInfo::LongDouble);
846 }
847 }
848
849 return false;
850 }
851
ReturnTypeUsesFP2Ret(QualType ResultType)852 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
853 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
854 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
855 if (BT->getKind() == BuiltinType::LongDouble)
856 return getContext().getTargetInfo().useObjCFP2RetForComplexLongDouble();
857 }
858 }
859
860 return false;
861 }
862
GetFunctionType(GlobalDecl GD)863 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
864 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
865 return GetFunctionType(FI);
866 }
867
868 llvm::FunctionType *
GetFunctionType(const CGFunctionInfo & FI)869 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
870
871 bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted;
872 assert(Inserted && "Recursively being processed?");
873
874 SmallVector<llvm::Type*, 8> argTypes;
875 llvm::Type *resultType = 0;
876
877 const ABIArgInfo &retAI = FI.getReturnInfo();
878 switch (retAI.getKind()) {
879 case ABIArgInfo::Expand:
880 llvm_unreachable("Invalid ABI kind for return argument");
881
882 case ABIArgInfo::Extend:
883 case ABIArgInfo::Direct:
884 resultType = retAI.getCoerceToType();
885 break;
886
887 case ABIArgInfo::Indirect: {
888 assert(!retAI.getIndirectAlign() && "Align unused on indirect return.");
889 resultType = llvm::Type::getVoidTy(getLLVMContext());
890
891 QualType ret = FI.getReturnType();
892 llvm::Type *ty = ConvertType(ret);
893 unsigned addressSpace = Context.getTargetAddressSpace(ret);
894 argTypes.push_back(llvm::PointerType::get(ty, addressSpace));
895 break;
896 }
897
898 case ABIArgInfo::Ignore:
899 resultType = llvm::Type::getVoidTy(getLLVMContext());
900 break;
901 }
902
903 // Add in all of the required arguments.
904 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), ie;
905 if (FI.isVariadic()) {
906 ie = it + FI.getRequiredArgs().getNumRequiredArgs();
907 } else {
908 ie = FI.arg_end();
909 }
910 for (; it != ie; ++it) {
911 const ABIArgInfo &argAI = it->info;
912
913 // Insert a padding type to ensure proper alignment.
914 if (llvm::Type *PaddingType = argAI.getPaddingType())
915 argTypes.push_back(PaddingType);
916
917 switch (argAI.getKind()) {
918 case ABIArgInfo::Ignore:
919 break;
920
921 case ABIArgInfo::Indirect: {
922 // indirect arguments are always on the stack, which is addr space #0.
923 llvm::Type *LTy = ConvertTypeForMem(it->type);
924 argTypes.push_back(LTy->getPointerTo());
925 break;
926 }
927
928 case ABIArgInfo::Extend:
929 case ABIArgInfo::Direct: {
930 // If the coerce-to type is a first class aggregate, flatten it. Either
931 // way is semantically identical, but fast-isel and the optimizer
932 // generally likes scalar values better than FCAs.
933 llvm::Type *argType = argAI.getCoerceToType();
934 if (llvm::StructType *st = dyn_cast<llvm::StructType>(argType)) {
935 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
936 argTypes.push_back(st->getElementType(i));
937 } else {
938 argTypes.push_back(argType);
939 }
940 break;
941 }
942
943 case ABIArgInfo::Expand:
944 GetExpandedTypes(it->type, argTypes);
945 break;
946 }
947 }
948
949 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
950 assert(Erased && "Not in set?");
951
952 return llvm::FunctionType::get(resultType, argTypes, FI.isVariadic());
953 }
954
GetFunctionTypeForVTable(GlobalDecl GD)955 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
956 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
957 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
958
959 if (!isFuncTypeConvertible(FPT))
960 return llvm::StructType::get(getLLVMContext());
961
962 const CGFunctionInfo *Info;
963 if (isa<CXXDestructorDecl>(MD))
964 Info = &arrangeCXXDestructor(cast<CXXDestructorDecl>(MD), GD.getDtorType());
965 else
966 Info = &arrangeCXXMethodDeclaration(MD);
967 return GetFunctionType(*Info);
968 }
969
ConstructAttributeList(const CGFunctionInfo & FI,const Decl * TargetDecl,AttributeListType & PAL,unsigned & CallingConv,bool AttrOnCallSite)970 void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
971 const Decl *TargetDecl,
972 AttributeListType &PAL,
973 unsigned &CallingConv,
974 bool AttrOnCallSite) {
975 llvm::AttrBuilder FuncAttrs;
976 llvm::AttrBuilder RetAttrs;
977
978 CallingConv = FI.getEffectiveCallingConvention();
979
980 if (FI.isNoReturn())
981 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
982
983 // FIXME: handle sseregparm someday...
984 if (TargetDecl) {
985 if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
986 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
987 if (TargetDecl->hasAttr<NoThrowAttr>())
988 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
989 if (TargetDecl->hasAttr<NoReturnAttr>())
990 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
991
992 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
993 const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>();
994 if (FPT && FPT->isNothrow(getContext()))
995 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
996 // Don't use [[noreturn]] or _Noreturn for a call to a virtual function.
997 // These attributes are not inherited by overloads.
998 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
999 if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual()))
1000 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1001 }
1002
1003 // 'const' and 'pure' attribute functions are also nounwind.
1004 if (TargetDecl->hasAttr<ConstAttr>()) {
1005 FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1006 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1007 } else if (TargetDecl->hasAttr<PureAttr>()) {
1008 FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1009 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1010 }
1011 if (TargetDecl->hasAttr<MallocAttr>())
1012 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1013 }
1014
1015 if (CodeGenOpts.OptimizeSize)
1016 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1017 if (CodeGenOpts.OptimizeSize == 2)
1018 FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1019 if (CodeGenOpts.DisableRedZone)
1020 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1021 if (CodeGenOpts.NoImplicitFloat)
1022 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1023
1024 if (AttrOnCallSite) {
1025 // Attributes that should go on the call site only.
1026 if (!CodeGenOpts.SimplifyLibCalls)
1027 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1028 } else {
1029 // Attributes that should go on the function, but not the call site.
1030 if (!CodeGenOpts.DisableFPElim) {
1031 FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1032 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf", "false");
1033 } else if (CodeGenOpts.OmitLeafFramePointer) {
1034 FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1035 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf", "true");
1036 } else {
1037 FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
1038 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf", "true");
1039 }
1040
1041 FuncAttrs.addAttribute("less-precise-fpmad",
1042 CodeGenOpts.LessPreciseFPMAD ? "true" : "false");
1043 FuncAttrs.addAttribute("no-infs-fp-math",
1044 CodeGenOpts.NoInfsFPMath ? "true" : "false");
1045 FuncAttrs.addAttribute("no-nans-fp-math",
1046 CodeGenOpts.NoNaNsFPMath ? "true" : "false");
1047 FuncAttrs.addAttribute("unsafe-fp-math",
1048 CodeGenOpts.UnsafeFPMath ? "true" : "false");
1049 FuncAttrs.addAttribute("use-soft-float",
1050 CodeGenOpts.SoftFloat ? "true" : "false");
1051 }
1052
1053 QualType RetTy = FI.getReturnType();
1054 unsigned Index = 1;
1055 const ABIArgInfo &RetAI = FI.getReturnInfo();
1056 switch (RetAI.getKind()) {
1057 case ABIArgInfo::Extend:
1058 if (RetTy->hasSignedIntegerRepresentation())
1059 RetAttrs.addAttribute(llvm::Attribute::SExt);
1060 else if (RetTy->hasUnsignedIntegerRepresentation())
1061 RetAttrs.addAttribute(llvm::Attribute::ZExt);
1062 break;
1063 case ABIArgInfo::Direct:
1064 case ABIArgInfo::Ignore:
1065 break;
1066
1067 case ABIArgInfo::Indirect: {
1068 llvm::AttrBuilder SRETAttrs;
1069 SRETAttrs.addAttribute(llvm::Attribute::StructRet);
1070 if (RetAI.getInReg())
1071 SRETAttrs.addAttribute(llvm::Attribute::InReg);
1072 PAL.push_back(llvm::
1073 AttributeSet::get(getLLVMContext(), Index, SRETAttrs));
1074
1075 ++Index;
1076 // sret disables readnone and readonly
1077 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1078 .removeAttribute(llvm::Attribute::ReadNone);
1079 break;
1080 }
1081
1082 case ABIArgInfo::Expand:
1083 llvm_unreachable("Invalid ABI kind for return argument");
1084 }
1085
1086 if (RetAttrs.hasAttributes())
1087 PAL.push_back(llvm::
1088 AttributeSet::get(getLLVMContext(),
1089 llvm::AttributeSet::ReturnIndex,
1090 RetAttrs));
1091
1092 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1093 ie = FI.arg_end(); it != ie; ++it) {
1094 QualType ParamType = it->type;
1095 const ABIArgInfo &AI = it->info;
1096 llvm::AttrBuilder Attrs;
1097
1098 if (AI.getPaddingType()) {
1099 if (AI.getPaddingInReg())
1100 PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index,
1101 llvm::Attribute::InReg));
1102 // Increment Index if there is padding.
1103 ++Index;
1104 }
1105
1106 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
1107 // have the corresponding parameter variable. It doesn't make
1108 // sense to do it here because parameters are so messed up.
1109 switch (AI.getKind()) {
1110 case ABIArgInfo::Extend:
1111 if (ParamType->isSignedIntegerOrEnumerationType())
1112 Attrs.addAttribute(llvm::Attribute::SExt);
1113 else if (ParamType->isUnsignedIntegerOrEnumerationType())
1114 Attrs.addAttribute(llvm::Attribute::ZExt);
1115 // FALL THROUGH
1116 case ABIArgInfo::Direct:
1117 if (AI.getInReg())
1118 Attrs.addAttribute(llvm::Attribute::InReg);
1119
1120 // FIXME: handle sseregparm someday...
1121
1122 if (llvm::StructType *STy =
1123 dyn_cast<llvm::StructType>(AI.getCoerceToType())) {
1124 unsigned Extra = STy->getNumElements()-1; // 1 will be added below.
1125 if (Attrs.hasAttributes())
1126 for (unsigned I = 0; I < Extra; ++I)
1127 PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index + I,
1128 Attrs));
1129 Index += Extra;
1130 }
1131 break;
1132
1133 case ABIArgInfo::Indirect:
1134 if (AI.getInReg())
1135 Attrs.addAttribute(llvm::Attribute::InReg);
1136
1137 if (AI.getIndirectByVal())
1138 Attrs.addAttribute(llvm::Attribute::ByVal);
1139
1140 Attrs.addAlignmentAttr(AI.getIndirectAlign());
1141
1142 // byval disables readnone and readonly.
1143 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1144 .removeAttribute(llvm::Attribute::ReadNone);
1145 break;
1146
1147 case ABIArgInfo::Ignore:
1148 // Skip increment, no matching LLVM parameter.
1149 continue;
1150
1151 case ABIArgInfo::Expand: {
1152 SmallVector<llvm::Type*, 8> types;
1153 // FIXME: This is rather inefficient. Do we ever actually need to do
1154 // anything here? The result should be just reconstructed on the other
1155 // side, so extension should be a non-issue.
1156 getTypes().GetExpandedTypes(ParamType, types);
1157 Index += types.size();
1158 continue;
1159 }
1160 }
1161
1162 if (Attrs.hasAttributes())
1163 PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index, Attrs));
1164 ++Index;
1165 }
1166 if (FuncAttrs.hasAttributes())
1167 PAL.push_back(llvm::
1168 AttributeSet::get(getLLVMContext(),
1169 llvm::AttributeSet::FunctionIndex,
1170 FuncAttrs));
1171 }
1172
1173 /// An argument came in as a promoted argument; demote it back to its
1174 /// declared type.
emitArgumentDemotion(CodeGenFunction & CGF,const VarDecl * var,llvm::Value * value)1175 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
1176 const VarDecl *var,
1177 llvm::Value *value) {
1178 llvm::Type *varType = CGF.ConvertType(var->getType());
1179
1180 // This can happen with promotions that actually don't change the
1181 // underlying type, like the enum promotions.
1182 if (value->getType() == varType) return value;
1183
1184 assert((varType->isIntegerTy() || varType->isFloatingPointTy())
1185 && "unexpected promotion type");
1186
1187 if (isa<llvm::IntegerType>(varType))
1188 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
1189
1190 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
1191 }
1192
EmitFunctionProlog(const CGFunctionInfo & FI,llvm::Function * Fn,const FunctionArgList & Args)1193 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
1194 llvm::Function *Fn,
1195 const FunctionArgList &Args) {
1196 // If this is an implicit-return-zero function, go ahead and
1197 // initialize the return value. TODO: it might be nice to have
1198 // a more general mechanism for this that didn't require synthesized
1199 // return statements.
1200 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
1201 if (FD->hasImplicitReturnZero()) {
1202 QualType RetTy = FD->getResultType().getUnqualifiedType();
1203 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
1204 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
1205 Builder.CreateStore(Zero, ReturnValue);
1206 }
1207 }
1208
1209 // FIXME: We no longer need the types from FunctionArgList; lift up and
1210 // simplify.
1211
1212 // Emit allocs for param decls. Give the LLVM Argument nodes names.
1213 llvm::Function::arg_iterator AI = Fn->arg_begin();
1214
1215 // Name the struct return argument.
1216 if (CGM.ReturnTypeUsesSRet(FI)) {
1217 AI->setName("agg.result");
1218 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1219 AI->getArgNo() + 1,
1220 llvm::Attribute::NoAlias));
1221 ++AI;
1222 }
1223
1224 assert(FI.arg_size() == Args.size() &&
1225 "Mismatch between function signature & arguments.");
1226 unsigned ArgNo = 1;
1227 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
1228 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
1229 i != e; ++i, ++info_it, ++ArgNo) {
1230 const VarDecl *Arg = *i;
1231 QualType Ty = info_it->type;
1232 const ABIArgInfo &ArgI = info_it->info;
1233
1234 bool isPromoted =
1235 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
1236
1237 // Skip the dummy padding argument.
1238 if (ArgI.getPaddingType())
1239 ++AI;
1240
1241 switch (ArgI.getKind()) {
1242 case ABIArgInfo::Indirect: {
1243 llvm::Value *V = AI;
1244
1245 if (!hasScalarEvaluationKind(Ty)) {
1246 // Aggregates and complex variables are accessed by reference. All we
1247 // need to do is realign the value, if requested
1248 if (ArgI.getIndirectRealign()) {
1249 llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce");
1250
1251 // Copy from the incoming argument pointer to the temporary with the
1252 // appropriate alignment.
1253 //
1254 // FIXME: We should have a common utility for generating an aggregate
1255 // copy.
1256 llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
1257 CharUnits Size = getContext().getTypeSizeInChars(Ty);
1258 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
1259 llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy);
1260 Builder.CreateMemCpy(Dst,
1261 Src,
1262 llvm::ConstantInt::get(IntPtrTy,
1263 Size.getQuantity()),
1264 ArgI.getIndirectAlign(),
1265 false);
1266 V = AlignedTemp;
1267 }
1268 } else {
1269 // Load scalar value from indirect argument.
1270 CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
1271 V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty);
1272
1273 if (isPromoted)
1274 V = emitArgumentDemotion(*this, Arg, V);
1275 }
1276 EmitParmDecl(*Arg, V, ArgNo);
1277 break;
1278 }
1279
1280 case ABIArgInfo::Extend:
1281 case ABIArgInfo::Direct: {
1282
1283 // If we have the trivial case, handle it with no muss and fuss.
1284 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
1285 ArgI.getCoerceToType() == ConvertType(Ty) &&
1286 ArgI.getDirectOffset() == 0) {
1287 assert(AI != Fn->arg_end() && "Argument mismatch!");
1288 llvm::Value *V = AI;
1289
1290 if (Arg->getType().isRestrictQualified())
1291 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1292 AI->getArgNo() + 1,
1293 llvm::Attribute::NoAlias));
1294
1295 // Ensure the argument is the correct type.
1296 if (V->getType() != ArgI.getCoerceToType())
1297 V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
1298
1299 if (isPromoted)
1300 V = emitArgumentDemotion(*this, Arg, V);
1301
1302 // Because of merging of function types from multiple decls it is
1303 // possible for the type of an argument to not match the corresponding
1304 // type in the function type. Since we are codegening the callee
1305 // in here, add a cast to the argument type.
1306 llvm::Type *LTy = ConvertType(Arg->getType());
1307 if (V->getType() != LTy)
1308 V = Builder.CreateBitCast(V, LTy);
1309
1310 EmitParmDecl(*Arg, V, ArgNo);
1311 break;
1312 }
1313
1314 llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName());
1315
1316 // The alignment we need to use is the max of the requested alignment for
1317 // the argument plus the alignment required by our access code below.
1318 unsigned AlignmentToUse =
1319 CGM.getDataLayout().getABITypeAlignment(ArgI.getCoerceToType());
1320 AlignmentToUse = std::max(AlignmentToUse,
1321 (unsigned)getContext().getDeclAlign(Arg).getQuantity());
1322
1323 Alloca->setAlignment(AlignmentToUse);
1324 llvm::Value *V = Alloca;
1325 llvm::Value *Ptr = V; // Pointer to store into.
1326
1327 // If the value is offset in memory, apply the offset now.
1328 if (unsigned Offs = ArgI.getDirectOffset()) {
1329 Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy());
1330 Ptr = Builder.CreateConstGEP1_32(Ptr, Offs);
1331 Ptr = Builder.CreateBitCast(Ptr,
1332 llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
1333 }
1334
1335 // If the coerce-to type is a first class aggregate, we flatten it and
1336 // pass the elements. Either way is semantically identical, but fast-isel
1337 // and the optimizer generally likes scalar values better than FCAs.
1338 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
1339 if (STy && STy->getNumElements() > 1) {
1340 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
1341 llvm::Type *DstTy =
1342 cast<llvm::PointerType>(Ptr->getType())->getElementType();
1343 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
1344
1345 if (SrcSize <= DstSize) {
1346 Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
1347
1348 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1349 assert(AI != Fn->arg_end() && "Argument mismatch!");
1350 AI->setName(Arg->getName() + ".coerce" + Twine(i));
1351 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i);
1352 Builder.CreateStore(AI++, EltPtr);
1353 }
1354 } else {
1355 llvm::AllocaInst *TempAlloca =
1356 CreateTempAlloca(ArgI.getCoerceToType(), "coerce");
1357 TempAlloca->setAlignment(AlignmentToUse);
1358 llvm::Value *TempV = TempAlloca;
1359
1360 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1361 assert(AI != Fn->arg_end() && "Argument mismatch!");
1362 AI->setName(Arg->getName() + ".coerce" + Twine(i));
1363 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(TempV, 0, i);
1364 Builder.CreateStore(AI++, EltPtr);
1365 }
1366
1367 Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse);
1368 }
1369 } else {
1370 // Simple case, just do a coerced store of the argument into the alloca.
1371 assert(AI != Fn->arg_end() && "Argument mismatch!");
1372 AI->setName(Arg->getName() + ".coerce");
1373 CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this);
1374 }
1375
1376
1377 // Match to what EmitParmDecl is expecting for this type.
1378 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
1379 V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty);
1380 if (isPromoted)
1381 V = emitArgumentDemotion(*this, Arg, V);
1382 }
1383 EmitParmDecl(*Arg, V, ArgNo);
1384 continue; // Skip ++AI increment, already done.
1385 }
1386
1387 case ABIArgInfo::Expand: {
1388 // If this structure was expanded into multiple arguments then
1389 // we need to create a temporary and reconstruct it from the
1390 // arguments.
1391 llvm::AllocaInst *Alloca = CreateMemTemp(Ty);
1392 CharUnits Align = getContext().getDeclAlign(Arg);
1393 Alloca->setAlignment(Align.getQuantity());
1394 LValue LV = MakeAddrLValue(Alloca, Ty, Align);
1395 llvm::Function::arg_iterator End = ExpandTypeFromArgs(Ty, LV, AI);
1396 EmitParmDecl(*Arg, Alloca, ArgNo);
1397
1398 // Name the arguments used in expansion and increment AI.
1399 unsigned Index = 0;
1400 for (; AI != End; ++AI, ++Index)
1401 AI->setName(Arg->getName() + "." + Twine(Index));
1402 continue;
1403 }
1404
1405 case ABIArgInfo::Ignore:
1406 // Initialize the local variable appropriately.
1407 if (!hasScalarEvaluationKind(Ty))
1408 EmitParmDecl(*Arg, CreateMemTemp(Ty), ArgNo);
1409 else
1410 EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())),
1411 ArgNo);
1412
1413 // Skip increment, no matching LLVM parameter.
1414 continue;
1415 }
1416
1417 ++AI;
1418 }
1419 assert(AI == Fn->arg_end() && "Argument mismatch!");
1420 }
1421
eraseUnusedBitCasts(llvm::Instruction * insn)1422 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
1423 while (insn->use_empty()) {
1424 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
1425 if (!bitcast) return;
1426
1427 // This is "safe" because we would have used a ConstantExpr otherwise.
1428 insn = cast<llvm::Instruction>(bitcast->getOperand(0));
1429 bitcast->eraseFromParent();
1430 }
1431 }
1432
1433 /// Try to emit a fused autorelease of a return result.
tryEmitFusedAutoreleaseOfResult(CodeGenFunction & CGF,llvm::Value * result)1434 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
1435 llvm::Value *result) {
1436 // We must be immediately followed the cast.
1437 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
1438 if (BB->empty()) return 0;
1439 if (&BB->back() != result) return 0;
1440
1441 llvm::Type *resultType = result->getType();
1442
1443 // result is in a BasicBlock and is therefore an Instruction.
1444 llvm::Instruction *generator = cast<llvm::Instruction>(result);
1445
1446 SmallVector<llvm::Instruction*,4> insnsToKill;
1447
1448 // Look for:
1449 // %generator = bitcast %type1* %generator2 to %type2*
1450 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
1451 // We would have emitted this as a constant if the operand weren't
1452 // an Instruction.
1453 generator = cast<llvm::Instruction>(bitcast->getOperand(0));
1454
1455 // Require the generator to be immediately followed by the cast.
1456 if (generator->getNextNode() != bitcast)
1457 return 0;
1458
1459 insnsToKill.push_back(bitcast);
1460 }
1461
1462 // Look for:
1463 // %generator = call i8* @objc_retain(i8* %originalResult)
1464 // or
1465 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
1466 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
1467 if (!call) return 0;
1468
1469 bool doRetainAutorelease;
1470
1471 if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) {
1472 doRetainAutorelease = true;
1473 } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints()
1474 .objc_retainAutoreleasedReturnValue) {
1475 doRetainAutorelease = false;
1476
1477 // If we emitted an assembly marker for this call (and the
1478 // ARCEntrypoints field should have been set if so), go looking
1479 // for that call. If we can't find it, we can't do this
1480 // optimization. But it should always be the immediately previous
1481 // instruction, unless we needed bitcasts around the call.
1482 if (CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker) {
1483 llvm::Instruction *prev = call->getPrevNode();
1484 assert(prev);
1485 if (isa<llvm::BitCastInst>(prev)) {
1486 prev = prev->getPrevNode();
1487 assert(prev);
1488 }
1489 assert(isa<llvm::CallInst>(prev));
1490 assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
1491 CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker);
1492 insnsToKill.push_back(prev);
1493 }
1494 } else {
1495 return 0;
1496 }
1497
1498 result = call->getArgOperand(0);
1499 insnsToKill.push_back(call);
1500
1501 // Keep killing bitcasts, for sanity. Note that we no longer care
1502 // about precise ordering as long as there's exactly one use.
1503 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
1504 if (!bitcast->hasOneUse()) break;
1505 insnsToKill.push_back(bitcast);
1506 result = bitcast->getOperand(0);
1507 }
1508
1509 // Delete all the unnecessary instructions, from latest to earliest.
1510 for (SmallVectorImpl<llvm::Instruction*>::iterator
1511 i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i)
1512 (*i)->eraseFromParent();
1513
1514 // Do the fused retain/autorelease if we were asked to.
1515 if (doRetainAutorelease)
1516 result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
1517
1518 // Cast back to the result type.
1519 return CGF.Builder.CreateBitCast(result, resultType);
1520 }
1521
1522 /// If this is a +1 of the value of an immutable 'self', remove it.
tryRemoveRetainOfSelf(CodeGenFunction & CGF,llvm::Value * result)1523 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
1524 llvm::Value *result) {
1525 // This is only applicable to a method with an immutable 'self'.
1526 const ObjCMethodDecl *method =
1527 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
1528 if (!method) return 0;
1529 const VarDecl *self = method->getSelfDecl();
1530 if (!self->getType().isConstQualified()) return 0;
1531
1532 // Look for a retain call.
1533 llvm::CallInst *retainCall =
1534 dyn_cast<llvm::CallInst>(result->stripPointerCasts());
1535 if (!retainCall ||
1536 retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain)
1537 return 0;
1538
1539 // Look for an ordinary load of 'self'.
1540 llvm::Value *retainedValue = retainCall->getArgOperand(0);
1541 llvm::LoadInst *load =
1542 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
1543 if (!load || load->isAtomic() || load->isVolatile() ||
1544 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self))
1545 return 0;
1546
1547 // Okay! Burn it all down. This relies for correctness on the
1548 // assumption that the retain is emitted as part of the return and
1549 // that thereafter everything is used "linearly".
1550 llvm::Type *resultType = result->getType();
1551 eraseUnusedBitCasts(cast<llvm::Instruction>(result));
1552 assert(retainCall->use_empty());
1553 retainCall->eraseFromParent();
1554 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
1555
1556 return CGF.Builder.CreateBitCast(load, resultType);
1557 }
1558
1559 /// Emit an ARC autorelease of the result of a function.
1560 ///
1561 /// \return the value to actually return from the function
emitAutoreleaseOfResult(CodeGenFunction & CGF,llvm::Value * result)1562 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
1563 llvm::Value *result) {
1564 // If we're returning 'self', kill the initial retain. This is a
1565 // heuristic attempt to "encourage correctness" in the really unfortunate
1566 // case where we have a return of self during a dealloc and we desperately
1567 // need to avoid the possible autorelease.
1568 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
1569 return self;
1570
1571 // At -O0, try to emit a fused retain/autorelease.
1572 if (CGF.shouldUseFusedARCCalls())
1573 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
1574 return fused;
1575
1576 return CGF.EmitARCAutoreleaseReturnValue(result);
1577 }
1578
1579 /// Heuristically search for a dominating store to the return-value slot.
findDominatingStoreToReturnValue(CodeGenFunction & CGF)1580 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
1581 // If there are multiple uses of the return-value slot, just check
1582 // for something immediately preceding the IP. Sometimes this can
1583 // happen with how we generate implicit-returns; it can also happen
1584 // with noreturn cleanups.
1585 if (!CGF.ReturnValue->hasOneUse()) {
1586 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
1587 if (IP->empty()) return 0;
1588 llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(&IP->back());
1589 if (!store) return 0;
1590 if (store->getPointerOperand() != CGF.ReturnValue) return 0;
1591 assert(!store->isAtomic() && !store->isVolatile()); // see below
1592 return store;
1593 }
1594
1595 llvm::StoreInst *store =
1596 dyn_cast<llvm::StoreInst>(CGF.ReturnValue->use_back());
1597 if (!store) return 0;
1598
1599 // These aren't actually possible for non-coerced returns, and we
1600 // only care about non-coerced returns on this code path.
1601 assert(!store->isAtomic() && !store->isVolatile());
1602
1603 // Now do a first-and-dirty dominance check: just walk up the
1604 // single-predecessors chain from the current insertion point.
1605 llvm::BasicBlock *StoreBB = store->getParent();
1606 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
1607 while (IP != StoreBB) {
1608 if (!(IP = IP->getSinglePredecessor()))
1609 return 0;
1610 }
1611
1612 // Okay, the store's basic block dominates the insertion point; we
1613 // can do our thing.
1614 return store;
1615 }
1616
EmitFunctionEpilog(const CGFunctionInfo & FI)1617 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
1618 // Functions with no result always return void.
1619 if (ReturnValue == 0) {
1620 Builder.CreateRetVoid();
1621 return;
1622 }
1623
1624 llvm::DebugLoc RetDbgLoc;
1625 llvm::Value *RV = 0;
1626 QualType RetTy = FI.getReturnType();
1627 const ABIArgInfo &RetAI = FI.getReturnInfo();
1628
1629 switch (RetAI.getKind()) {
1630 case ABIArgInfo::Indirect: {
1631 switch (getEvaluationKind(RetTy)) {
1632 case TEK_Complex: {
1633 ComplexPairTy RT =
1634 EmitLoadOfComplex(MakeNaturalAlignAddrLValue(ReturnValue, RetTy));
1635 EmitStoreOfComplex(RT,
1636 MakeNaturalAlignAddrLValue(CurFn->arg_begin(), RetTy),
1637 /*isInit*/ true);
1638 break;
1639 }
1640 case TEK_Aggregate:
1641 // Do nothing; aggregrates get evaluated directly into the destination.
1642 break;
1643 case TEK_Scalar:
1644 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
1645 MakeNaturalAlignAddrLValue(CurFn->arg_begin(), RetTy),
1646 /*isInit*/ true);
1647 break;
1648 }
1649 break;
1650 }
1651
1652 case ABIArgInfo::Extend:
1653 case ABIArgInfo::Direct:
1654 if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
1655 RetAI.getDirectOffset() == 0) {
1656 // The internal return value temp always will have pointer-to-return-type
1657 // type, just do a load.
1658
1659 // If there is a dominating store to ReturnValue, we can elide
1660 // the load, zap the store, and usually zap the alloca.
1661 if (llvm::StoreInst *SI = findDominatingStoreToReturnValue(*this)) {
1662 // Get the stored value and nuke the now-dead store.
1663 RetDbgLoc = SI->getDebugLoc();
1664 RV = SI->getValueOperand();
1665 SI->eraseFromParent();
1666
1667 // If that was the only use of the return value, nuke it as well now.
1668 if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
1669 cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
1670 ReturnValue = 0;
1671 }
1672
1673 // Otherwise, we have to do a simple load.
1674 } else {
1675 RV = Builder.CreateLoad(ReturnValue);
1676 }
1677 } else {
1678 llvm::Value *V = ReturnValue;
1679 // If the value is offset in memory, apply the offset now.
1680 if (unsigned Offs = RetAI.getDirectOffset()) {
1681 V = Builder.CreateBitCast(V, Builder.getInt8PtrTy());
1682 V = Builder.CreateConstGEP1_32(V, Offs);
1683 V = Builder.CreateBitCast(V,
1684 llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
1685 }
1686
1687 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
1688 }
1689
1690 // In ARC, end functions that return a retainable type with a call
1691 // to objc_autoreleaseReturnValue.
1692 if (AutoreleaseResult) {
1693 assert(getLangOpts().ObjCAutoRefCount &&
1694 !FI.isReturnsRetained() &&
1695 RetTy->isObjCRetainableType());
1696 RV = emitAutoreleaseOfResult(*this, RV);
1697 }
1698
1699 break;
1700
1701 case ABIArgInfo::Ignore:
1702 break;
1703
1704 case ABIArgInfo::Expand:
1705 llvm_unreachable("Invalid ABI kind for return argument");
1706 }
1707
1708 llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid();
1709 if (!RetDbgLoc.isUnknown())
1710 Ret->setDebugLoc(RetDbgLoc);
1711 }
1712
EmitDelegateCallArg(CallArgList & args,const VarDecl * param)1713 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
1714 const VarDecl *param) {
1715 // StartFunction converted the ABI-lowered parameter(s) into a
1716 // local alloca. We need to turn that into an r-value suitable
1717 // for EmitCall.
1718 llvm::Value *local = GetAddrOfLocalVar(param);
1719
1720 QualType type = param->getType();
1721
1722 // For the most part, we just need to load the alloca, except:
1723 // 1) aggregate r-values are actually pointers to temporaries, and
1724 // 2) references to non-scalars are pointers directly to the aggregate.
1725 // I don't know why references to scalars are different here.
1726 if (const ReferenceType *ref = type->getAs<ReferenceType>()) {
1727 if (!hasScalarEvaluationKind(ref->getPointeeType()))
1728 return args.add(RValue::getAggregate(local), type);
1729
1730 // Locals which are references to scalars are represented
1731 // with allocas holding the pointer.
1732 return args.add(RValue::get(Builder.CreateLoad(local)), type);
1733 }
1734
1735 args.add(convertTempToRValue(local, type), type);
1736 }
1737
isProvablyNull(llvm::Value * addr)1738 static bool isProvablyNull(llvm::Value *addr) {
1739 return isa<llvm::ConstantPointerNull>(addr);
1740 }
1741
isProvablyNonNull(llvm::Value * addr)1742 static bool isProvablyNonNull(llvm::Value *addr) {
1743 return isa<llvm::AllocaInst>(addr);
1744 }
1745
1746 /// Emit the actual writing-back of a writeback.
emitWriteback(CodeGenFunction & CGF,const CallArgList::Writeback & writeback)1747 static void emitWriteback(CodeGenFunction &CGF,
1748 const CallArgList::Writeback &writeback) {
1749 llvm::Value *srcAddr = writeback.Address;
1750 assert(!isProvablyNull(srcAddr) &&
1751 "shouldn't have writeback for provably null argument");
1752
1753 llvm::BasicBlock *contBB = 0;
1754
1755 // If the argument wasn't provably non-null, we need to null check
1756 // before doing the store.
1757 bool provablyNonNull = isProvablyNonNull(srcAddr);
1758 if (!provablyNonNull) {
1759 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
1760 contBB = CGF.createBasicBlock("icr.done");
1761
1762 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
1763 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
1764 CGF.EmitBlock(writebackBB);
1765 }
1766
1767 // Load the value to writeback.
1768 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
1769
1770 // Cast it back, in case we're writing an id to a Foo* or something.
1771 value = CGF.Builder.CreateBitCast(value,
1772 cast<llvm::PointerType>(srcAddr->getType())->getElementType(),
1773 "icr.writeback-cast");
1774
1775 // Perform the writeback.
1776 QualType srcAddrType = writeback.AddressType;
1777 CGF.EmitStoreThroughLValue(RValue::get(value),
1778 CGF.MakeAddrLValue(srcAddr, srcAddrType));
1779
1780 // Jump to the continuation block.
1781 if (!provablyNonNull)
1782 CGF.EmitBlock(contBB);
1783 }
1784
emitWritebacks(CodeGenFunction & CGF,const CallArgList & args)1785 static void emitWritebacks(CodeGenFunction &CGF,
1786 const CallArgList &args) {
1787 for (CallArgList::writeback_iterator
1788 i = args.writeback_begin(), e = args.writeback_end(); i != e; ++i)
1789 emitWriteback(CGF, *i);
1790 }
1791
1792 /// Emit an argument that's being passed call-by-writeback. That is,
1793 /// we are passing the address of
emitWritebackArg(CodeGenFunction & CGF,CallArgList & args,const ObjCIndirectCopyRestoreExpr * CRE)1794 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
1795 const ObjCIndirectCopyRestoreExpr *CRE) {
1796 llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr());
1797
1798 // The dest and src types don't necessarily match in LLVM terms
1799 // because of the crazy ObjC compatibility rules.
1800
1801 llvm::PointerType *destType =
1802 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
1803
1804 // If the address is a constant null, just pass the appropriate null.
1805 if (isProvablyNull(srcAddr)) {
1806 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
1807 CRE->getType());
1808 return;
1809 }
1810
1811 QualType srcAddrType =
1812 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
1813
1814 // Create the temporary.
1815 llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(),
1816 "icr.temp");
1817 // Loading an l-value can introduce a cleanup if the l-value is __weak,
1818 // and that cleanup will be conditional if we can't prove that the l-value
1819 // isn't null, so we need to register a dominating point so that the cleanups
1820 // system will make valid IR.
1821 CodeGenFunction::ConditionalEvaluation condEval(CGF);
1822
1823 // Zero-initialize it if we're not doing a copy-initialization.
1824 bool shouldCopy = CRE->shouldCopy();
1825 if (!shouldCopy) {
1826 llvm::Value *null =
1827 llvm::ConstantPointerNull::get(
1828 cast<llvm::PointerType>(destType->getElementType()));
1829 CGF.Builder.CreateStore(null, temp);
1830 }
1831
1832 llvm::BasicBlock *contBB = 0;
1833
1834 // If the address is *not* known to be non-null, we need to switch.
1835 llvm::Value *finalArgument;
1836
1837 bool provablyNonNull = isProvablyNonNull(srcAddr);
1838 if (provablyNonNull) {
1839 finalArgument = temp;
1840 } else {
1841 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
1842
1843 finalArgument = CGF.Builder.CreateSelect(isNull,
1844 llvm::ConstantPointerNull::get(destType),
1845 temp, "icr.argument");
1846
1847 // If we need to copy, then the load has to be conditional, which
1848 // means we need control flow.
1849 if (shouldCopy) {
1850 contBB = CGF.createBasicBlock("icr.cont");
1851 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
1852 CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
1853 CGF.EmitBlock(copyBB);
1854 condEval.begin(CGF);
1855 }
1856 }
1857
1858 // Perform a copy if necessary.
1859 if (shouldCopy) {
1860 LValue srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
1861 RValue srcRV = CGF.EmitLoadOfLValue(srcLV);
1862 assert(srcRV.isScalar());
1863
1864 llvm::Value *src = srcRV.getScalarVal();
1865 src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
1866 "icr.cast");
1867
1868 // Use an ordinary store, not a store-to-lvalue.
1869 CGF.Builder.CreateStore(src, temp);
1870 }
1871
1872 // Finish the control flow if we needed it.
1873 if (shouldCopy && !provablyNonNull) {
1874 CGF.EmitBlock(contBB);
1875 condEval.end(CGF);
1876 }
1877
1878 args.addWriteback(srcAddr, srcAddrType, temp);
1879 args.add(RValue::get(finalArgument), CRE->getType());
1880 }
1881
EmitCallArg(CallArgList & args,const Expr * E,QualType type)1882 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
1883 QualType type) {
1884 if (const ObjCIndirectCopyRestoreExpr *CRE
1885 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
1886 assert(getLangOpts().ObjCAutoRefCount);
1887 assert(getContext().hasSameType(E->getType(), type));
1888 return emitWritebackArg(*this, args, CRE);
1889 }
1890
1891 assert(type->isReferenceType() == E->isGLValue() &&
1892 "reference binding to unmaterialized r-value!");
1893
1894 if (E->isGLValue()) {
1895 assert(E->getObjectKind() == OK_Ordinary);
1896 return args.add(EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0),
1897 type);
1898 }
1899
1900 if (hasAggregateEvaluationKind(type) &&
1901 isa<ImplicitCastExpr>(E) &&
1902 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
1903 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
1904 assert(L.isSimple());
1905 args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
1906 return;
1907 }
1908
1909 args.add(EmitAnyExprToTemp(E), type);
1910 }
1911
1912 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
1913 // optimizer it can aggressively ignore unwind edges.
1914 void
AddObjCARCExceptionMetadata(llvm::Instruction * Inst)1915 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
1916 if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
1917 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
1918 Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
1919 CGM.getNoObjCARCExceptionsMetadata());
1920 }
1921
1922 /// Emits a call to the given no-arguments nounwind runtime function.
1923 llvm::CallInst *
EmitNounwindRuntimeCall(llvm::Value * callee,const llvm::Twine & name)1924 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
1925 const llvm::Twine &name) {
1926 return EmitNounwindRuntimeCall(callee, ArrayRef<llvm::Value*>(), name);
1927 }
1928
1929 /// Emits a call to the given nounwind runtime function.
1930 llvm::CallInst *
EmitNounwindRuntimeCall(llvm::Value * callee,ArrayRef<llvm::Value * > args,const llvm::Twine & name)1931 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
1932 ArrayRef<llvm::Value*> args,
1933 const llvm::Twine &name) {
1934 llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
1935 call->setDoesNotThrow();
1936 return call;
1937 }
1938
1939 /// Emits a simple call (never an invoke) to the given no-arguments
1940 /// runtime function.
1941 llvm::CallInst *
EmitRuntimeCall(llvm::Value * callee,const llvm::Twine & name)1942 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
1943 const llvm::Twine &name) {
1944 return EmitRuntimeCall(callee, ArrayRef<llvm::Value*>(), name);
1945 }
1946
1947 /// Emits a simple call (never an invoke) to the given runtime
1948 /// function.
1949 llvm::CallInst *
EmitRuntimeCall(llvm::Value * callee,ArrayRef<llvm::Value * > args,const llvm::Twine & name)1950 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
1951 ArrayRef<llvm::Value*> args,
1952 const llvm::Twine &name) {
1953 llvm::CallInst *call = Builder.CreateCall(callee, args, name);
1954 call->setCallingConv(getRuntimeCC());
1955 return call;
1956 }
1957
1958 /// Emits a call or invoke to the given noreturn runtime function.
EmitNoreturnRuntimeCallOrInvoke(llvm::Value * callee,ArrayRef<llvm::Value * > args)1959 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
1960 ArrayRef<llvm::Value*> args) {
1961 if (getInvokeDest()) {
1962 llvm::InvokeInst *invoke =
1963 Builder.CreateInvoke(callee,
1964 getUnreachableBlock(),
1965 getInvokeDest(),
1966 args);
1967 invoke->setDoesNotReturn();
1968 invoke->setCallingConv(getRuntimeCC());
1969 } else {
1970 llvm::CallInst *call = Builder.CreateCall(callee, args);
1971 call->setDoesNotReturn();
1972 call->setCallingConv(getRuntimeCC());
1973 Builder.CreateUnreachable();
1974 }
1975 }
1976
1977 /// Emits a call or invoke instruction to the given nullary runtime
1978 /// function.
1979 llvm::CallSite
EmitRuntimeCallOrInvoke(llvm::Value * callee,const Twine & name)1980 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
1981 const Twine &name) {
1982 return EmitRuntimeCallOrInvoke(callee, ArrayRef<llvm::Value*>(), name);
1983 }
1984
1985 /// Emits a call or invoke instruction to the given runtime function.
1986 llvm::CallSite
EmitRuntimeCallOrInvoke(llvm::Value * callee,ArrayRef<llvm::Value * > args,const Twine & name)1987 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
1988 ArrayRef<llvm::Value*> args,
1989 const Twine &name) {
1990 llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name);
1991 callSite.setCallingConv(getRuntimeCC());
1992 return callSite;
1993 }
1994
1995 llvm::CallSite
EmitCallOrInvoke(llvm::Value * Callee,const Twine & Name)1996 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
1997 const Twine &Name) {
1998 return EmitCallOrInvoke(Callee, ArrayRef<llvm::Value *>(), Name);
1999 }
2000
2001 /// Emits a call or invoke instruction to the given function, depending
2002 /// on the current state of the EH stack.
2003 llvm::CallSite
EmitCallOrInvoke(llvm::Value * Callee,ArrayRef<llvm::Value * > Args,const Twine & Name)2004 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
2005 ArrayRef<llvm::Value *> Args,
2006 const Twine &Name) {
2007 llvm::BasicBlock *InvokeDest = getInvokeDest();
2008
2009 llvm::Instruction *Inst;
2010 if (!InvokeDest)
2011 Inst = Builder.CreateCall(Callee, Args, Name);
2012 else {
2013 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
2014 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name);
2015 EmitBlock(ContBB);
2016 }
2017
2018 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
2019 // optimizer it can aggressively ignore unwind edges.
2020 if (CGM.getLangOpts().ObjCAutoRefCount)
2021 AddObjCARCExceptionMetadata(Inst);
2022
2023 return Inst;
2024 }
2025
checkArgMatches(llvm::Value * Elt,unsigned & ArgNo,llvm::FunctionType * FTy)2026 static void checkArgMatches(llvm::Value *Elt, unsigned &ArgNo,
2027 llvm::FunctionType *FTy) {
2028 if (ArgNo < FTy->getNumParams())
2029 assert(Elt->getType() == FTy->getParamType(ArgNo));
2030 else
2031 assert(FTy->isVarArg());
2032 ++ArgNo;
2033 }
2034
ExpandTypeToArgs(QualType Ty,RValue RV,SmallVector<llvm::Value *,16> & Args,llvm::FunctionType * IRFuncTy)2035 void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
2036 SmallVector<llvm::Value*,16> &Args,
2037 llvm::FunctionType *IRFuncTy) {
2038 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
2039 unsigned NumElts = AT->getSize().getZExtValue();
2040 QualType EltTy = AT->getElementType();
2041 llvm::Value *Addr = RV.getAggregateAddr();
2042 for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
2043 llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt);
2044 RValue EltRV = convertTempToRValue(EltAddr, EltTy);
2045 ExpandTypeToArgs(EltTy, EltRV, Args, IRFuncTy);
2046 }
2047 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
2048 RecordDecl *RD = RT->getDecl();
2049 assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
2050 LValue LV = MakeAddrLValue(RV.getAggregateAddr(), Ty);
2051
2052 if (RD->isUnion()) {
2053 const FieldDecl *LargestFD = 0;
2054 CharUnits UnionSize = CharUnits::Zero();
2055
2056 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2057 i != e; ++i) {
2058 const FieldDecl *FD = *i;
2059 assert(!FD->isBitField() &&
2060 "Cannot expand structure with bit-field members.");
2061 CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
2062 if (UnionSize < FieldSize) {
2063 UnionSize = FieldSize;
2064 LargestFD = FD;
2065 }
2066 }
2067 if (LargestFD) {
2068 RValue FldRV = EmitRValueForField(LV, LargestFD);
2069 ExpandTypeToArgs(LargestFD->getType(), FldRV, Args, IRFuncTy);
2070 }
2071 } else {
2072 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2073 i != e; ++i) {
2074 FieldDecl *FD = *i;
2075
2076 RValue FldRV = EmitRValueForField(LV, FD);
2077 ExpandTypeToArgs(FD->getType(), FldRV, Args, IRFuncTy);
2078 }
2079 }
2080 } else if (Ty->isAnyComplexType()) {
2081 ComplexPairTy CV = RV.getComplexVal();
2082 Args.push_back(CV.first);
2083 Args.push_back(CV.second);
2084 } else {
2085 assert(RV.isScalar() &&
2086 "Unexpected non-scalar rvalue during struct expansion.");
2087
2088 // Insert a bitcast as needed.
2089 llvm::Value *V = RV.getScalarVal();
2090 if (Args.size() < IRFuncTy->getNumParams() &&
2091 V->getType() != IRFuncTy->getParamType(Args.size()))
2092 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(Args.size()));
2093
2094 Args.push_back(V);
2095 }
2096 }
2097
2098
EmitCall(const CGFunctionInfo & CallInfo,llvm::Value * Callee,ReturnValueSlot ReturnValue,const CallArgList & CallArgs,const Decl * TargetDecl,llvm::Instruction ** callOrInvoke)2099 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
2100 llvm::Value *Callee,
2101 ReturnValueSlot ReturnValue,
2102 const CallArgList &CallArgs,
2103 const Decl *TargetDecl,
2104 llvm::Instruction **callOrInvoke) {
2105 // FIXME: We no longer need the types from CallArgs; lift up and simplify.
2106 SmallVector<llvm::Value*, 16> Args;
2107
2108 // Handle struct-return functions by passing a pointer to the
2109 // location that we would like to return into.
2110 QualType RetTy = CallInfo.getReturnType();
2111 const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
2112
2113 // IRArgNo - Keep track of the argument number in the callee we're looking at.
2114 unsigned IRArgNo = 0;
2115 llvm::FunctionType *IRFuncTy =
2116 cast<llvm::FunctionType>(
2117 cast<llvm::PointerType>(Callee->getType())->getElementType());
2118
2119 // If the call returns a temporary with struct return, create a temporary
2120 // alloca to hold the result, unless one is given to us.
2121 if (CGM.ReturnTypeUsesSRet(CallInfo)) {
2122 llvm::Value *Value = ReturnValue.getValue();
2123 if (!Value)
2124 Value = CreateMemTemp(RetTy);
2125 Args.push_back(Value);
2126 checkArgMatches(Value, IRArgNo, IRFuncTy);
2127 }
2128
2129 assert(CallInfo.arg_size() == CallArgs.size() &&
2130 "Mismatch between function signature & arguments.");
2131 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
2132 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
2133 I != E; ++I, ++info_it) {
2134 const ABIArgInfo &ArgInfo = info_it->info;
2135 RValue RV = I->RV;
2136
2137 CharUnits TypeAlign = getContext().getTypeAlignInChars(I->Ty);
2138
2139 // Insert a padding argument to ensure proper alignment.
2140 if (llvm::Type *PaddingType = ArgInfo.getPaddingType()) {
2141 Args.push_back(llvm::UndefValue::get(PaddingType));
2142 ++IRArgNo;
2143 }
2144
2145 switch (ArgInfo.getKind()) {
2146 case ABIArgInfo::Indirect: {
2147 if (RV.isScalar() || RV.isComplex()) {
2148 // Make a temporary alloca to pass the argument.
2149 llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
2150 if (ArgInfo.getIndirectAlign() > AI->getAlignment())
2151 AI->setAlignment(ArgInfo.getIndirectAlign());
2152 Args.push_back(AI);
2153
2154 LValue argLV =
2155 MakeAddrLValue(Args.back(), I->Ty, TypeAlign);
2156
2157 if (RV.isScalar())
2158 EmitStoreOfScalar(RV.getScalarVal(), argLV, /*init*/ true);
2159 else
2160 EmitStoreOfComplex(RV.getComplexVal(), argLV, /*init*/ true);
2161
2162 // Validate argument match.
2163 checkArgMatches(AI, IRArgNo, IRFuncTy);
2164 } else {
2165 // We want to avoid creating an unnecessary temporary+copy here;
2166 // however, we need one in three cases:
2167 // 1. If the argument is not byval, and we are required to copy the
2168 // source. (This case doesn't occur on any common architecture.)
2169 // 2. If the argument is byval, RV is not sufficiently aligned, and
2170 // we cannot force it to be sufficiently aligned.
2171 // 3. If the argument is byval, but RV is located in an address space
2172 // different than that of the argument (0).
2173 llvm::Value *Addr = RV.getAggregateAddr();
2174 unsigned Align = ArgInfo.getIndirectAlign();
2175 const llvm::DataLayout *TD = &CGM.getDataLayout();
2176 const unsigned RVAddrSpace = Addr->getType()->getPointerAddressSpace();
2177 const unsigned ArgAddrSpace = (IRArgNo < IRFuncTy->getNumParams() ?
2178 IRFuncTy->getParamType(IRArgNo)->getPointerAddressSpace() : 0);
2179 if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
2180 (ArgInfo.getIndirectByVal() && TypeAlign.getQuantity() < Align &&
2181 llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align) ||
2182 (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) {
2183 // Create an aligned temporary, and copy to it.
2184 llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
2185 if (Align > AI->getAlignment())
2186 AI->setAlignment(Align);
2187 Args.push_back(AI);
2188 EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
2189
2190 // Validate argument match.
2191 checkArgMatches(AI, IRArgNo, IRFuncTy);
2192 } else {
2193 // Skip the extra memcpy call.
2194 Args.push_back(Addr);
2195
2196 // Validate argument match.
2197 checkArgMatches(Addr, IRArgNo, IRFuncTy);
2198 }
2199 }
2200 break;
2201 }
2202
2203 case ABIArgInfo::Ignore:
2204 break;
2205
2206 case ABIArgInfo::Extend:
2207 case ABIArgInfo::Direct: {
2208 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
2209 ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
2210 ArgInfo.getDirectOffset() == 0) {
2211 llvm::Value *V;
2212 if (RV.isScalar())
2213 V = RV.getScalarVal();
2214 else
2215 V = Builder.CreateLoad(RV.getAggregateAddr());
2216
2217 // If the argument doesn't match, perform a bitcast to coerce it. This
2218 // can happen due to trivial type mismatches.
2219 if (IRArgNo < IRFuncTy->getNumParams() &&
2220 V->getType() != IRFuncTy->getParamType(IRArgNo))
2221 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRArgNo));
2222 Args.push_back(V);
2223
2224 checkArgMatches(V, IRArgNo, IRFuncTy);
2225 break;
2226 }
2227
2228 // FIXME: Avoid the conversion through memory if possible.
2229 llvm::Value *SrcPtr;
2230 if (RV.isScalar() || RV.isComplex()) {
2231 SrcPtr = CreateMemTemp(I->Ty, "coerce");
2232 LValue SrcLV = MakeAddrLValue(SrcPtr, I->Ty, TypeAlign);
2233 if (RV.isScalar()) {
2234 EmitStoreOfScalar(RV.getScalarVal(), SrcLV, /*init*/ true);
2235 } else {
2236 EmitStoreOfComplex(RV.getComplexVal(), SrcLV, /*init*/ true);
2237 }
2238 } else
2239 SrcPtr = RV.getAggregateAddr();
2240
2241 // If the value is offset in memory, apply the offset now.
2242 if (unsigned Offs = ArgInfo.getDirectOffset()) {
2243 SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy());
2244 SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs);
2245 SrcPtr = Builder.CreateBitCast(SrcPtr,
2246 llvm::PointerType::getUnqual(ArgInfo.getCoerceToType()));
2247
2248 }
2249
2250 // If the coerce-to type is a first class aggregate, we flatten it and
2251 // pass the elements. Either way is semantically identical, but fast-isel
2252 // and the optimizer generally likes scalar values better than FCAs.
2253 if (llvm::StructType *STy =
2254 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) {
2255 llvm::Type *SrcTy =
2256 cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
2257 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
2258 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
2259
2260 // If the source type is smaller than the destination type of the
2261 // coerce-to logic, copy the source value into a temp alloca the size
2262 // of the destination type to allow loading all of it. The bits past
2263 // the source value are left undef.
2264 if (SrcSize < DstSize) {
2265 llvm::AllocaInst *TempAlloca
2266 = CreateTempAlloca(STy, SrcPtr->getName() + ".coerce");
2267 Builder.CreateMemCpy(TempAlloca, SrcPtr, SrcSize, 0);
2268 SrcPtr = TempAlloca;
2269 } else {
2270 SrcPtr = Builder.CreateBitCast(SrcPtr,
2271 llvm::PointerType::getUnqual(STy));
2272 }
2273
2274 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2275 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i);
2276 llvm::LoadInst *LI = Builder.CreateLoad(EltPtr);
2277 // We don't know what we're loading from.
2278 LI->setAlignment(1);
2279 Args.push_back(LI);
2280
2281 // Validate argument match.
2282 checkArgMatches(LI, IRArgNo, IRFuncTy);
2283 }
2284 } else {
2285 // In the simple case, just pass the coerced loaded value.
2286 Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
2287 *this));
2288
2289 // Validate argument match.
2290 checkArgMatches(Args.back(), IRArgNo, IRFuncTy);
2291 }
2292
2293 break;
2294 }
2295
2296 case ABIArgInfo::Expand:
2297 ExpandTypeToArgs(I->Ty, RV, Args, IRFuncTy);
2298 IRArgNo = Args.size();
2299 break;
2300 }
2301 }
2302
2303 // If the callee is a bitcast of a function to a varargs pointer to function
2304 // type, check to see if we can remove the bitcast. This handles some cases
2305 // with unprototyped functions.
2306 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
2307 if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
2308 llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
2309 llvm::FunctionType *CurFT =
2310 cast<llvm::FunctionType>(CurPT->getElementType());
2311 llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
2312
2313 if (CE->getOpcode() == llvm::Instruction::BitCast &&
2314 ActualFT->getReturnType() == CurFT->getReturnType() &&
2315 ActualFT->getNumParams() == CurFT->getNumParams() &&
2316 ActualFT->getNumParams() == Args.size() &&
2317 (CurFT->isVarArg() || !ActualFT->isVarArg())) {
2318 bool ArgsMatch = true;
2319 for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
2320 if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
2321 ArgsMatch = false;
2322 break;
2323 }
2324
2325 // Strip the cast if we can get away with it. This is a nice cleanup,
2326 // but also allows us to inline the function at -O0 if it is marked
2327 // always_inline.
2328 if (ArgsMatch)
2329 Callee = CalleeF;
2330 }
2331 }
2332
2333 unsigned CallingConv;
2334 CodeGen::AttributeListType AttributeList;
2335 CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList,
2336 CallingConv, true);
2337 llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(),
2338 AttributeList);
2339
2340 llvm::BasicBlock *InvokeDest = 0;
2341 if (!Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex,
2342 llvm::Attribute::NoUnwind))
2343 InvokeDest = getInvokeDest();
2344
2345 llvm::CallSite CS;
2346 if (!InvokeDest) {
2347 CS = Builder.CreateCall(Callee, Args);
2348 } else {
2349 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
2350 CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, Args);
2351 EmitBlock(Cont);
2352 }
2353 if (callOrInvoke)
2354 *callOrInvoke = CS.getInstruction();
2355
2356 CS.setAttributes(Attrs);
2357 CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
2358
2359 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
2360 // optimizer it can aggressively ignore unwind edges.
2361 if (CGM.getLangOpts().ObjCAutoRefCount)
2362 AddObjCARCExceptionMetadata(CS.getInstruction());
2363
2364 // If the call doesn't return, finish the basic block and clear the
2365 // insertion point; this allows the rest of IRgen to discard
2366 // unreachable code.
2367 if (CS.doesNotReturn()) {
2368 Builder.CreateUnreachable();
2369 Builder.ClearInsertionPoint();
2370
2371 // FIXME: For now, emit a dummy basic block because expr emitters in
2372 // generally are not ready to handle emitting expressions at unreachable
2373 // points.
2374 EnsureInsertPoint();
2375
2376 // Return a reasonable RValue.
2377 return GetUndefRValue(RetTy);
2378 }
2379
2380 llvm::Instruction *CI = CS.getInstruction();
2381 if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
2382 CI->setName("call");
2383
2384 // Emit any writebacks immediately. Arguably this should happen
2385 // after any return-value munging.
2386 if (CallArgs.hasWritebacks())
2387 emitWritebacks(*this, CallArgs);
2388
2389 switch (RetAI.getKind()) {
2390 case ABIArgInfo::Indirect:
2391 return convertTempToRValue(Args[0], RetTy);
2392
2393 case ABIArgInfo::Ignore:
2394 // If we are ignoring an argument that had a result, make sure to
2395 // construct the appropriate return value for our caller.
2396 return GetUndefRValue(RetTy);
2397
2398 case ABIArgInfo::Extend:
2399 case ABIArgInfo::Direct: {
2400 llvm::Type *RetIRTy = ConvertType(RetTy);
2401 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
2402 switch (getEvaluationKind(RetTy)) {
2403 case TEK_Complex: {
2404 llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
2405 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
2406 return RValue::getComplex(std::make_pair(Real, Imag));
2407 }
2408 case TEK_Aggregate: {
2409 llvm::Value *DestPtr = ReturnValue.getValue();
2410 bool DestIsVolatile = ReturnValue.isVolatile();
2411
2412 if (!DestPtr) {
2413 DestPtr = CreateMemTemp(RetTy, "agg.tmp");
2414 DestIsVolatile = false;
2415 }
2416 BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false);
2417 return RValue::getAggregate(DestPtr);
2418 }
2419 case TEK_Scalar: {
2420 // If the argument doesn't match, perform a bitcast to coerce it. This
2421 // can happen due to trivial type mismatches.
2422 llvm::Value *V = CI;
2423 if (V->getType() != RetIRTy)
2424 V = Builder.CreateBitCast(V, RetIRTy);
2425 return RValue::get(V);
2426 }
2427 }
2428 llvm_unreachable("bad evaluation kind");
2429 }
2430
2431 llvm::Value *DestPtr = ReturnValue.getValue();
2432 bool DestIsVolatile = ReturnValue.isVolatile();
2433
2434 if (!DestPtr) {
2435 DestPtr = CreateMemTemp(RetTy, "coerce");
2436 DestIsVolatile = false;
2437 }
2438
2439 // If the value is offset in memory, apply the offset now.
2440 llvm::Value *StorePtr = DestPtr;
2441 if (unsigned Offs = RetAI.getDirectOffset()) {
2442 StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
2443 StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs);
2444 StorePtr = Builder.CreateBitCast(StorePtr,
2445 llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
2446 }
2447 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
2448
2449 return convertTempToRValue(DestPtr, RetTy);
2450 }
2451
2452 case ABIArgInfo::Expand:
2453 llvm_unreachable("Invalid ABI kind for return argument");
2454 }
2455
2456 llvm_unreachable("Unhandled ABIArgInfo::Kind");
2457 }
2458
2459 /* VarArg handling */
2460
EmitVAArg(llvm::Value * VAListAddr,QualType Ty)2461 llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
2462 return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
2463 }
2464