1 //===--- CGCall.cpp - Encapsulate calling convention details ----*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "CGCall.h"
16 #include "CGCXXABI.h"
17 #include "ABIInfo.h"
18 #include "CodeGenFunction.h"
19 #include "CodeGenModule.h"
20 #include "TargetInfo.h"
21 #include "clang/Basic/TargetInfo.h"
22 #include "clang/AST/Decl.h"
23 #include "clang/AST/DeclCXX.h"
24 #include "clang/AST/DeclObjC.h"
25 #include "clang/Frontend/CodeGenOptions.h"
26 #include "llvm/Attributes.h"
27 #include "llvm/Support/CallSite.h"
28 #include "llvm/Target/TargetData.h"
29 #include "llvm/InlineAsm.h"
30 #include "llvm/Transforms/Utils/Local.h"
31 using namespace clang;
32 using namespace CodeGen;
33
34 /***/
35
ClangCallConvToLLVMCallConv(CallingConv CC)36 static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
37 switch (CC) {
38 default: return llvm::CallingConv::C;
39 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
40 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
41 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
42 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
43 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
44 // TODO: add support for CC_X86Pascal to llvm
45 }
46 }
47
48 /// Derives the 'this' type for codegen purposes, i.e. ignoring method
49 /// qualification.
50 /// FIXME: address space qualification?
GetThisType(ASTContext & Context,const CXXRecordDecl * RD)51 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
52 QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
53 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
54 }
55
56 /// Returns the canonical formal type of the given C++ method.
GetFormalType(const CXXMethodDecl * MD)57 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
58 return MD->getType()->getCanonicalTypeUnqualified()
59 .getAs<FunctionProtoType>();
60 }
61
62 /// Returns the "extra-canonicalized" return type, which discards
63 /// qualifiers on the return type. Codegen doesn't care about them,
64 /// and it makes ABI code a little easier to be able to assume that
65 /// all parameter and return types are top-level unqualified.
GetReturnType(QualType RetTy)66 static CanQualType GetReturnType(QualType RetTy) {
67 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
68 }
69
70 /// Arrange the argument and result information for a value of the
71 /// given unprototyped function type.
72 const CGFunctionInfo &
arrangeFunctionType(CanQual<FunctionNoProtoType> FTNP)73 CodeGenTypes::arrangeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
74 // When translating an unprototyped function type, always use a
75 // variadic type.
76 return arrangeFunctionType(FTNP->getResultType().getUnqualifiedType(),
77 ArrayRef<CanQualType>(),
78 FTNP->getExtInfo(),
79 RequiredArgs(0));
80 }
81
82 /// Arrange the argument and result information for a value of the
83 /// given function type, on top of any implicit parameters already
84 /// stored.
arrangeFunctionType(CodeGenTypes & CGT,SmallVectorImpl<CanQualType> & argTypes,CanQual<FunctionProtoType> FTP)85 static const CGFunctionInfo &arrangeFunctionType(CodeGenTypes &CGT,
86 SmallVectorImpl<CanQualType> &argTypes,
87 CanQual<FunctionProtoType> FTP) {
88 RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, argTypes.size());
89 // FIXME: Kill copy.
90 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
91 argTypes.push_back(FTP->getArgType(i));
92 CanQualType resultType = FTP->getResultType().getUnqualifiedType();
93 return CGT.arrangeFunctionType(resultType, argTypes,
94 FTP->getExtInfo(), required);
95 }
96
97 /// Arrange the argument and result information for a value of the
98 /// given function type.
99 const CGFunctionInfo &
arrangeFunctionType(CanQual<FunctionProtoType> FTP)100 CodeGenTypes::arrangeFunctionType(CanQual<FunctionProtoType> FTP) {
101 SmallVector<CanQualType, 16> argTypes;
102 return ::arrangeFunctionType(*this, argTypes, FTP);
103 }
104
getCallingConventionForDecl(const Decl * D)105 static CallingConv getCallingConventionForDecl(const Decl *D) {
106 // Set the appropriate calling convention for the Function.
107 if (D->hasAttr<StdCallAttr>())
108 return CC_X86StdCall;
109
110 if (D->hasAttr<FastCallAttr>())
111 return CC_X86FastCall;
112
113 if (D->hasAttr<ThisCallAttr>())
114 return CC_X86ThisCall;
115
116 if (D->hasAttr<PascalAttr>())
117 return CC_X86Pascal;
118
119 if (PcsAttr *PCS = D->getAttr<PcsAttr>())
120 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
121
122 return CC_C;
123 }
124
125 /// Arrange the argument and result information for a call to an
126 /// unknown C++ non-static member function of the given abstract type.
127 /// The member function must be an ordinary function, i.e. not a
128 /// constructor or destructor.
129 const CGFunctionInfo &
arrangeCXXMethodType(const CXXRecordDecl * RD,const FunctionProtoType * FTP)130 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
131 const FunctionProtoType *FTP) {
132 SmallVector<CanQualType, 16> argTypes;
133
134 // Add the 'this' pointer.
135 argTypes.push_back(GetThisType(Context, RD));
136
137 return ::arrangeFunctionType(*this, argTypes,
138 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
139 }
140
141 /// Arrange the argument and result information for a declaration or
142 /// definition of the given C++ non-static member function. The
143 /// member function must be an ordinary function, i.e. not a
144 /// constructor or destructor.
145 const CGFunctionInfo &
arrangeCXXMethodDeclaration(const CXXMethodDecl * MD)146 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
147 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for contructors!");
148 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
149
150 CanQual<FunctionProtoType> prototype = GetFormalType(MD);
151
152 if (MD->isInstance()) {
153 // The abstract case is perfectly fine.
154 return arrangeCXXMethodType(MD->getParent(), prototype.getTypePtr());
155 }
156
157 return arrangeFunctionType(prototype);
158 }
159
160 /// Arrange the argument and result information for a declaration
161 /// or definition to the given constructor variant.
162 const CGFunctionInfo &
arrangeCXXConstructorDeclaration(const CXXConstructorDecl * D,CXXCtorType ctorKind)163 CodeGenTypes::arrangeCXXConstructorDeclaration(const CXXConstructorDecl *D,
164 CXXCtorType ctorKind) {
165 SmallVector<CanQualType, 16> argTypes;
166 argTypes.push_back(GetThisType(Context, D->getParent()));
167 CanQualType resultType = Context.VoidTy;
168
169 TheCXXABI.BuildConstructorSignature(D, ctorKind, resultType, argTypes);
170
171 CanQual<FunctionProtoType> FTP = GetFormalType(D);
172
173 RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, argTypes.size());
174
175 // Add the formal parameters.
176 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
177 argTypes.push_back(FTP->getArgType(i));
178
179 return arrangeFunctionType(resultType, argTypes, FTP->getExtInfo(), required);
180 }
181
182 /// Arrange the argument and result information for a declaration,
183 /// definition, or call to the given destructor variant. It so
184 /// happens that all three cases produce the same information.
185 const CGFunctionInfo &
arrangeCXXDestructor(const CXXDestructorDecl * D,CXXDtorType dtorKind)186 CodeGenTypes::arrangeCXXDestructor(const CXXDestructorDecl *D,
187 CXXDtorType dtorKind) {
188 SmallVector<CanQualType, 2> argTypes;
189 argTypes.push_back(GetThisType(Context, D->getParent()));
190 CanQualType resultType = Context.VoidTy;
191
192 TheCXXABI.BuildDestructorSignature(D, dtorKind, resultType, argTypes);
193
194 CanQual<FunctionProtoType> FTP = GetFormalType(D);
195 assert(FTP->getNumArgs() == 0 && "dtor with formal parameters");
196
197 return arrangeFunctionType(resultType, argTypes, FTP->getExtInfo(),
198 RequiredArgs::All);
199 }
200
201 /// Arrange the argument and result information for the declaration or
202 /// definition of the given function.
203 const CGFunctionInfo &
arrangeFunctionDeclaration(const FunctionDecl * FD)204 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
205 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
206 if (MD->isInstance())
207 return arrangeCXXMethodDeclaration(MD);
208
209 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
210
211 assert(isa<FunctionType>(FTy));
212
213 // When declaring a function without a prototype, always use a
214 // non-variadic type.
215 if (isa<FunctionNoProtoType>(FTy)) {
216 CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>();
217 return arrangeFunctionType(noProto->getResultType(),
218 ArrayRef<CanQualType>(),
219 noProto->getExtInfo(),
220 RequiredArgs::All);
221 }
222
223 assert(isa<FunctionProtoType>(FTy));
224 return arrangeFunctionType(FTy.getAs<FunctionProtoType>());
225 }
226
227 /// Arrange the argument and result information for the declaration or
228 /// definition of an Objective-C method.
229 const CGFunctionInfo &
arrangeObjCMethodDeclaration(const ObjCMethodDecl * MD)230 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
231 // It happens that this is the same as a call with no optional
232 // arguments, except also using the formal 'self' type.
233 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
234 }
235
236 /// Arrange the argument and result information for the function type
237 /// through which to perform a send to the given Objective-C method,
238 /// using the given receiver type. The receiver type is not always
239 /// the 'self' type of the method or even an Objective-C pointer type.
240 /// This is *not* the right method for actually performing such a
241 /// message send, due to the possibility of optional arguments.
242 const CGFunctionInfo &
arrangeObjCMessageSendSignature(const ObjCMethodDecl * MD,QualType receiverType)243 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
244 QualType receiverType) {
245 SmallVector<CanQualType, 16> argTys;
246 argTys.push_back(Context.getCanonicalParamType(receiverType));
247 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
248 // FIXME: Kill copy?
249 for (ObjCMethodDecl::param_const_iterator i = MD->param_begin(),
250 e = MD->param_end(); i != e; ++i) {
251 argTys.push_back(Context.getCanonicalParamType((*i)->getType()));
252 }
253
254 FunctionType::ExtInfo einfo;
255 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD));
256
257 if (getContext().getLangOpts().ObjCAutoRefCount &&
258 MD->hasAttr<NSReturnsRetainedAttr>())
259 einfo = einfo.withProducesResult(true);
260
261 RequiredArgs required =
262 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
263
264 return arrangeFunctionType(GetReturnType(MD->getResultType()), argTys,
265 einfo, required);
266 }
267
268 const CGFunctionInfo &
arrangeGlobalDeclaration(GlobalDecl GD)269 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
270 // FIXME: Do we need to handle ObjCMethodDecl?
271 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
272
273 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
274 return arrangeCXXConstructorDeclaration(CD, GD.getCtorType());
275
276 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
277 return arrangeCXXDestructor(DD, GD.getDtorType());
278
279 return arrangeFunctionDeclaration(FD);
280 }
281
282 /// Figure out the rules for calling a function with the given formal
283 /// type using the given arguments. The arguments are necessary
284 /// because the function might be unprototyped, in which case it's
285 /// target-dependent in crazy ways.
286 const CGFunctionInfo &
arrangeFunctionCall(const CallArgList & args,const FunctionType * fnType)287 CodeGenTypes::arrangeFunctionCall(const CallArgList &args,
288 const FunctionType *fnType) {
289 RequiredArgs required = RequiredArgs::All;
290 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
291 if (proto->isVariadic())
292 required = RequiredArgs(proto->getNumArgs());
293 } else if (CGM.getTargetCodeGenInfo()
294 .isNoProtoCallVariadic(args, cast<FunctionNoProtoType>(fnType))) {
295 required = RequiredArgs(0);
296 }
297
298 return arrangeFunctionCall(fnType->getResultType(), args,
299 fnType->getExtInfo(), required);
300 }
301
302 const CGFunctionInfo &
arrangeFunctionCall(QualType resultType,const CallArgList & args,const FunctionType::ExtInfo & info,RequiredArgs required)303 CodeGenTypes::arrangeFunctionCall(QualType resultType,
304 const CallArgList &args,
305 const FunctionType::ExtInfo &info,
306 RequiredArgs required) {
307 // FIXME: Kill copy.
308 SmallVector<CanQualType, 16> argTypes;
309 for (CallArgList::const_iterator i = args.begin(), e = args.end();
310 i != e; ++i)
311 argTypes.push_back(Context.getCanonicalParamType(i->Ty));
312 return arrangeFunctionType(GetReturnType(resultType), argTypes, info,
313 required);
314 }
315
316 const CGFunctionInfo &
arrangeFunctionDeclaration(QualType resultType,const FunctionArgList & args,const FunctionType::ExtInfo & info,bool isVariadic)317 CodeGenTypes::arrangeFunctionDeclaration(QualType resultType,
318 const FunctionArgList &args,
319 const FunctionType::ExtInfo &info,
320 bool isVariadic) {
321 // FIXME: Kill copy.
322 SmallVector<CanQualType, 16> argTypes;
323 for (FunctionArgList::const_iterator i = args.begin(), e = args.end();
324 i != e; ++i)
325 argTypes.push_back(Context.getCanonicalParamType((*i)->getType()));
326
327 RequiredArgs required =
328 (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All);
329 return arrangeFunctionType(GetReturnType(resultType), argTypes, info,
330 required);
331 }
332
arrangeNullaryFunction()333 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
334 return arrangeFunctionType(getContext().VoidTy, ArrayRef<CanQualType>(),
335 FunctionType::ExtInfo(), RequiredArgs::All);
336 }
337
338 /// Arrange the argument and result information for an abstract value
339 /// of a given function type. This is the method which all of the
340 /// above functions ultimately defer to.
341 const CGFunctionInfo &
arrangeFunctionType(CanQualType resultType,ArrayRef<CanQualType> argTypes,const FunctionType::ExtInfo & info,RequiredArgs required)342 CodeGenTypes::arrangeFunctionType(CanQualType resultType,
343 ArrayRef<CanQualType> argTypes,
344 const FunctionType::ExtInfo &info,
345 RequiredArgs required) {
346 #ifndef NDEBUG
347 for (ArrayRef<CanQualType>::const_iterator
348 I = argTypes.begin(), E = argTypes.end(); I != E; ++I)
349 assert(I->isCanonicalAsParam());
350 #endif
351
352 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
353
354 // Lookup or create unique function info.
355 llvm::FoldingSetNodeID ID;
356 CGFunctionInfo::Profile(ID, info, required, resultType, argTypes);
357
358 void *insertPos = 0;
359 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
360 if (FI)
361 return *FI;
362
363 // Construct the function info. We co-allocate the ArgInfos.
364 FI = CGFunctionInfo::create(CC, info, resultType, argTypes, required);
365 FunctionInfos.InsertNode(FI, insertPos);
366
367 bool inserted = FunctionsBeingProcessed.insert(FI); (void)inserted;
368 assert(inserted && "Recursively being processed?");
369
370 // Compute ABI information.
371 getABIInfo().computeInfo(*FI);
372
373 // Loop over all of the computed argument and return value info. If any of
374 // them are direct or extend without a specified coerce type, specify the
375 // default now.
376 ABIArgInfo &retInfo = FI->getReturnInfo();
377 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == 0)
378 retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
379
380 for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end();
381 I != E; ++I)
382 if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0)
383 I->info.setCoerceToType(ConvertType(I->type));
384
385 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
386 assert(erased && "Not in set?");
387
388 return *FI;
389 }
390
create(unsigned llvmCC,const FunctionType::ExtInfo & info,CanQualType resultType,ArrayRef<CanQualType> argTypes,RequiredArgs required)391 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
392 const FunctionType::ExtInfo &info,
393 CanQualType resultType,
394 ArrayRef<CanQualType> argTypes,
395 RequiredArgs required) {
396 void *buffer = operator new(sizeof(CGFunctionInfo) +
397 sizeof(ArgInfo) * (argTypes.size() + 1));
398 CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
399 FI->CallingConvention = llvmCC;
400 FI->EffectiveCallingConvention = llvmCC;
401 FI->ASTCallingConvention = info.getCC();
402 FI->NoReturn = info.getNoReturn();
403 FI->ReturnsRetained = info.getProducesResult();
404 FI->Required = required;
405 FI->HasRegParm = info.getHasRegParm();
406 FI->RegParm = info.getRegParm();
407 FI->NumArgs = argTypes.size();
408 FI->getArgsBuffer()[0].type = resultType;
409 for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
410 FI->getArgsBuffer()[i + 1].type = argTypes[i];
411 return FI;
412 }
413
414 /***/
415
GetExpandedTypes(QualType type,SmallVectorImpl<llvm::Type * > & expandedTypes)416 void CodeGenTypes::GetExpandedTypes(QualType type,
417 SmallVectorImpl<llvm::Type*> &expandedTypes) {
418 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(type)) {
419 uint64_t NumElts = AT->getSize().getZExtValue();
420 for (uint64_t Elt = 0; Elt < NumElts; ++Elt)
421 GetExpandedTypes(AT->getElementType(), expandedTypes);
422 } else if (const RecordType *RT = type->getAs<RecordType>()) {
423 const RecordDecl *RD = RT->getDecl();
424 assert(!RD->hasFlexibleArrayMember() &&
425 "Cannot expand structure with flexible array.");
426 if (RD->isUnion()) {
427 // Unions can be here only in degenerative cases - all the fields are same
428 // after flattening. Thus we have to use the "largest" field.
429 const FieldDecl *LargestFD = 0;
430 CharUnits UnionSize = CharUnits::Zero();
431
432 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
433 i != e; ++i) {
434 const FieldDecl *FD = *i;
435 assert(!FD->isBitField() &&
436 "Cannot expand structure with bit-field members.");
437 CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
438 if (UnionSize < FieldSize) {
439 UnionSize = FieldSize;
440 LargestFD = FD;
441 }
442 }
443 if (LargestFD)
444 GetExpandedTypes(LargestFD->getType(), expandedTypes);
445 } else {
446 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
447 i != e; ++i) {
448 const FieldDecl *FD = *i;
449 assert(!FD->isBitField() &&
450 "Cannot expand structure with bit-field members.");
451 GetExpandedTypes(FD->getType(), expandedTypes);
452 }
453 }
454 } else if (const ComplexType *CT = type->getAs<ComplexType>()) {
455 llvm::Type *EltTy = ConvertType(CT->getElementType());
456 expandedTypes.push_back(EltTy);
457 expandedTypes.push_back(EltTy);
458 } else
459 expandedTypes.push_back(ConvertType(type));
460 }
461
462 llvm::Function::arg_iterator
ExpandTypeFromArgs(QualType Ty,LValue LV,llvm::Function::arg_iterator AI)463 CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
464 llvm::Function::arg_iterator AI) {
465 assert(LV.isSimple() &&
466 "Unexpected non-simple lvalue during struct expansion.");
467
468 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
469 unsigned NumElts = AT->getSize().getZExtValue();
470 QualType EltTy = AT->getElementType();
471 for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
472 llvm::Value *EltAddr = Builder.CreateConstGEP2_32(LV.getAddress(), 0, Elt);
473 LValue LV = MakeAddrLValue(EltAddr, EltTy);
474 AI = ExpandTypeFromArgs(EltTy, LV, AI);
475 }
476 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
477 RecordDecl *RD = RT->getDecl();
478 if (RD->isUnion()) {
479 // Unions can be here only in degenerative cases - all the fields are same
480 // after flattening. Thus we have to use the "largest" field.
481 const FieldDecl *LargestFD = 0;
482 CharUnits UnionSize = CharUnits::Zero();
483
484 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
485 i != e; ++i) {
486 const FieldDecl *FD = *i;
487 assert(!FD->isBitField() &&
488 "Cannot expand structure with bit-field members.");
489 CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
490 if (UnionSize < FieldSize) {
491 UnionSize = FieldSize;
492 LargestFD = FD;
493 }
494 }
495 if (LargestFD) {
496 // FIXME: What are the right qualifiers here?
497 LValue SubLV = EmitLValueForField(LV, LargestFD);
498 AI = ExpandTypeFromArgs(LargestFD->getType(), SubLV, AI);
499 }
500 } else {
501 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
502 i != e; ++i) {
503 FieldDecl *FD = *i;
504 QualType FT = FD->getType();
505
506 // FIXME: What are the right qualifiers here?
507 LValue SubLV = EmitLValueForField(LV, FD);
508 AI = ExpandTypeFromArgs(FT, SubLV, AI);
509 }
510 }
511 } else if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
512 QualType EltTy = CT->getElementType();
513 llvm::Value *RealAddr = Builder.CreateStructGEP(LV.getAddress(), 0, "real");
514 EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(RealAddr, EltTy));
515 llvm::Value *ImagAddr = Builder.CreateStructGEP(LV.getAddress(), 1, "imag");
516 EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(ImagAddr, EltTy));
517 } else {
518 EmitStoreThroughLValue(RValue::get(AI), LV);
519 ++AI;
520 }
521
522 return AI;
523 }
524
525 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
526 /// accessing some number of bytes out of it, try to gep into the struct to get
527 /// at its inner goodness. Dive as deep as possible without entering an element
528 /// with an in-memory size smaller than DstSize.
529 static llvm::Value *
EnterStructPointerForCoercedAccess(llvm::Value * SrcPtr,llvm::StructType * SrcSTy,uint64_t DstSize,CodeGenFunction & CGF)530 EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
531 llvm::StructType *SrcSTy,
532 uint64_t DstSize, CodeGenFunction &CGF) {
533 // We can't dive into a zero-element struct.
534 if (SrcSTy->getNumElements() == 0) return SrcPtr;
535
536 llvm::Type *FirstElt = SrcSTy->getElementType(0);
537
538 // If the first elt is at least as large as what we're looking for, or if the
539 // first element is the same size as the whole struct, we can enter it.
540 uint64_t FirstEltSize =
541 CGF.CGM.getTargetData().getTypeAllocSize(FirstElt);
542 if (FirstEltSize < DstSize &&
543 FirstEltSize < CGF.CGM.getTargetData().getTypeAllocSize(SrcSTy))
544 return SrcPtr;
545
546 // GEP into the first element.
547 SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive");
548
549 // If the first element is a struct, recurse.
550 llvm::Type *SrcTy =
551 cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
552 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
553 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
554
555 return SrcPtr;
556 }
557
558 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
559 /// are either integers or pointers. This does a truncation of the value if it
560 /// is too large or a zero extension if it is too small.
CoerceIntOrPtrToIntOrPtr(llvm::Value * Val,llvm::Type * Ty,CodeGenFunction & CGF)561 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
562 llvm::Type *Ty,
563 CodeGenFunction &CGF) {
564 if (Val->getType() == Ty)
565 return Val;
566
567 if (isa<llvm::PointerType>(Val->getType())) {
568 // If this is Pointer->Pointer avoid conversion to and from int.
569 if (isa<llvm::PointerType>(Ty))
570 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
571
572 // Convert the pointer to an integer so we can play with its width.
573 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
574 }
575
576 llvm::Type *DestIntTy = Ty;
577 if (isa<llvm::PointerType>(DestIntTy))
578 DestIntTy = CGF.IntPtrTy;
579
580 if (Val->getType() != DestIntTy)
581 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
582
583 if (isa<llvm::PointerType>(Ty))
584 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
585 return Val;
586 }
587
588
589
590 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
591 /// a pointer to an object of type \arg Ty.
592 ///
593 /// This safely handles the case when the src type is smaller than the
594 /// destination type; in this situation the values of bits which not
595 /// present in the src are undefined.
CreateCoercedLoad(llvm::Value * SrcPtr,llvm::Type * Ty,CodeGenFunction & CGF)596 static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
597 llvm::Type *Ty,
598 CodeGenFunction &CGF) {
599 llvm::Type *SrcTy =
600 cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
601
602 // If SrcTy and Ty are the same, just do a load.
603 if (SrcTy == Ty)
604 return CGF.Builder.CreateLoad(SrcPtr);
605
606 uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
607
608 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
609 SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
610 SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
611 }
612
613 uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
614
615 // If the source and destination are integer or pointer types, just do an
616 // extension or truncation to the desired type.
617 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
618 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
619 llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr);
620 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
621 }
622
623 // If load is legal, just bitcast the src pointer.
624 if (SrcSize >= DstSize) {
625 // Generally SrcSize is never greater than DstSize, since this means we are
626 // losing bits. However, this can happen in cases where the structure has
627 // additional padding, for example due to a user specified alignment.
628 //
629 // FIXME: Assert that we aren't truncating non-padding bits when have access
630 // to that information.
631 llvm::Value *Casted =
632 CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
633 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
634 // FIXME: Use better alignment / avoid requiring aligned load.
635 Load->setAlignment(1);
636 return Load;
637 }
638
639 // Otherwise do coercion through memory. This is stupid, but
640 // simple.
641 llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
642 llvm::Value *Casted =
643 CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
644 llvm::StoreInst *Store =
645 CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
646 // FIXME: Use better alignment / avoid requiring aligned store.
647 Store->setAlignment(1);
648 return CGF.Builder.CreateLoad(Tmp);
649 }
650
651 // Function to store a first-class aggregate into memory. We prefer to
652 // store the elements rather than the aggregate to be more friendly to
653 // fast-isel.
654 // FIXME: Do we need to recurse here?
BuildAggStore(CodeGenFunction & CGF,llvm::Value * Val,llvm::Value * DestPtr,bool DestIsVolatile,bool LowAlignment)655 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
656 llvm::Value *DestPtr, bool DestIsVolatile,
657 bool LowAlignment) {
658 // Prefer scalar stores to first-class aggregate stores.
659 if (llvm::StructType *STy =
660 dyn_cast<llvm::StructType>(Val->getType())) {
661 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
662 llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(DestPtr, 0, i);
663 llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
664 llvm::StoreInst *SI = CGF.Builder.CreateStore(Elt, EltPtr,
665 DestIsVolatile);
666 if (LowAlignment)
667 SI->setAlignment(1);
668 }
669 } else {
670 llvm::StoreInst *SI = CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile);
671 if (LowAlignment)
672 SI->setAlignment(1);
673 }
674 }
675
676 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
677 /// where the source and destination may have different types.
678 ///
679 /// This safely handles the case when the src type is larger than the
680 /// destination type; the upper bits of the src will be lost.
CreateCoercedStore(llvm::Value * Src,llvm::Value * DstPtr,bool DstIsVolatile,CodeGenFunction & CGF)681 static void CreateCoercedStore(llvm::Value *Src,
682 llvm::Value *DstPtr,
683 bool DstIsVolatile,
684 CodeGenFunction &CGF) {
685 llvm::Type *SrcTy = Src->getType();
686 llvm::Type *DstTy =
687 cast<llvm::PointerType>(DstPtr->getType())->getElementType();
688 if (SrcTy == DstTy) {
689 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
690 return;
691 }
692
693 uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
694
695 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
696 DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
697 DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
698 }
699
700 // If the source and destination are integer or pointer types, just do an
701 // extension or truncation to the desired type.
702 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
703 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
704 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
705 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
706 return;
707 }
708
709 uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
710
711 // If store is legal, just bitcast the src pointer.
712 if (SrcSize <= DstSize) {
713 llvm::Value *Casted =
714 CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
715 // FIXME: Use better alignment / avoid requiring aligned store.
716 BuildAggStore(CGF, Src, Casted, DstIsVolatile, true);
717 } else {
718 // Otherwise do coercion through memory. This is stupid, but
719 // simple.
720
721 // Generally SrcSize is never greater than DstSize, since this means we are
722 // losing bits. However, this can happen in cases where the structure has
723 // additional padding, for example due to a user specified alignment.
724 //
725 // FIXME: Assert that we aren't truncating non-padding bits when have access
726 // to that information.
727 llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
728 CGF.Builder.CreateStore(Src, Tmp);
729 llvm::Value *Casted =
730 CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
731 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
732 // FIXME: Use better alignment / avoid requiring aligned load.
733 Load->setAlignment(1);
734 CGF.Builder.CreateStore(Load, DstPtr, DstIsVolatile);
735 }
736 }
737
738 /***/
739
ReturnTypeUsesSRet(const CGFunctionInfo & FI)740 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
741 return FI.getReturnInfo().isIndirect();
742 }
743
ReturnTypeUsesFPRet(QualType ResultType)744 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
745 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
746 switch (BT->getKind()) {
747 default:
748 return false;
749 case BuiltinType::Float:
750 return getContext().getTargetInfo().useObjCFPRetForRealType(TargetInfo::Float);
751 case BuiltinType::Double:
752 return getContext().getTargetInfo().useObjCFPRetForRealType(TargetInfo::Double);
753 case BuiltinType::LongDouble:
754 return getContext().getTargetInfo().useObjCFPRetForRealType(
755 TargetInfo::LongDouble);
756 }
757 }
758
759 return false;
760 }
761
ReturnTypeUsesFP2Ret(QualType ResultType)762 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
763 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
764 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
765 if (BT->getKind() == BuiltinType::LongDouble)
766 return getContext().getTargetInfo().useObjCFP2RetForComplexLongDouble();
767 }
768 }
769
770 return false;
771 }
772
GetFunctionType(GlobalDecl GD)773 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
774 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
775 return GetFunctionType(FI);
776 }
777
778 llvm::FunctionType *
GetFunctionType(const CGFunctionInfo & FI)779 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
780
781 bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted;
782 assert(Inserted && "Recursively being processed?");
783
784 SmallVector<llvm::Type*, 8> argTypes;
785 llvm::Type *resultType = 0;
786
787 const ABIArgInfo &retAI = FI.getReturnInfo();
788 switch (retAI.getKind()) {
789 case ABIArgInfo::Expand:
790 llvm_unreachable("Invalid ABI kind for return argument");
791
792 case ABIArgInfo::Extend:
793 case ABIArgInfo::Direct:
794 resultType = retAI.getCoerceToType();
795 break;
796
797 case ABIArgInfo::Indirect: {
798 assert(!retAI.getIndirectAlign() && "Align unused on indirect return.");
799 resultType = llvm::Type::getVoidTy(getLLVMContext());
800
801 QualType ret = FI.getReturnType();
802 llvm::Type *ty = ConvertType(ret);
803 unsigned addressSpace = Context.getTargetAddressSpace(ret);
804 argTypes.push_back(llvm::PointerType::get(ty, addressSpace));
805 break;
806 }
807
808 case ABIArgInfo::Ignore:
809 resultType = llvm::Type::getVoidTy(getLLVMContext());
810 break;
811 }
812
813 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
814 ie = FI.arg_end(); it != ie; ++it) {
815 const ABIArgInfo &argAI = it->info;
816
817 switch (argAI.getKind()) {
818 case ABIArgInfo::Ignore:
819 break;
820
821 case ABIArgInfo::Indirect: {
822 // indirect arguments are always on the stack, which is addr space #0.
823 llvm::Type *LTy = ConvertTypeForMem(it->type);
824 argTypes.push_back(LTy->getPointerTo());
825 break;
826 }
827
828 case ABIArgInfo::Extend:
829 case ABIArgInfo::Direct: {
830 // Insert a padding type to ensure proper alignment.
831 if (llvm::Type *PaddingType = argAI.getPaddingType())
832 argTypes.push_back(PaddingType);
833 // If the coerce-to type is a first class aggregate, flatten it. Either
834 // way is semantically identical, but fast-isel and the optimizer
835 // generally likes scalar values better than FCAs.
836 llvm::Type *argType = argAI.getCoerceToType();
837 if (llvm::StructType *st = dyn_cast<llvm::StructType>(argType)) {
838 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
839 argTypes.push_back(st->getElementType(i));
840 } else {
841 argTypes.push_back(argType);
842 }
843 break;
844 }
845
846 case ABIArgInfo::Expand:
847 GetExpandedTypes(it->type, argTypes);
848 break;
849 }
850 }
851
852 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
853 assert(Erased && "Not in set?");
854
855 return llvm::FunctionType::get(resultType, argTypes, FI.isVariadic());
856 }
857
GetFunctionTypeForVTable(GlobalDecl GD)858 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
859 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
860 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
861
862 if (!isFuncTypeConvertible(FPT))
863 return llvm::StructType::get(getLLVMContext());
864
865 const CGFunctionInfo *Info;
866 if (isa<CXXDestructorDecl>(MD))
867 Info = &arrangeCXXDestructor(cast<CXXDestructorDecl>(MD), GD.getDtorType());
868 else
869 Info = &arrangeCXXMethodDeclaration(MD);
870 return GetFunctionType(*Info);
871 }
872
ConstructAttributeList(const CGFunctionInfo & FI,const Decl * TargetDecl,AttributeListType & PAL,unsigned & CallingConv)873 void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
874 const Decl *TargetDecl,
875 AttributeListType &PAL,
876 unsigned &CallingConv) {
877 llvm::Attributes FuncAttrs;
878 llvm::Attributes RetAttrs;
879
880 CallingConv = FI.getEffectiveCallingConvention();
881
882 if (FI.isNoReturn())
883 FuncAttrs |= llvm::Attribute::NoReturn;
884
885 // FIXME: handle sseregparm someday...
886 if (TargetDecl) {
887 if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
888 FuncAttrs |= llvm::Attribute::ReturnsTwice;
889 if (TargetDecl->hasAttr<NoThrowAttr>())
890 FuncAttrs |= llvm::Attribute::NoUnwind;
891 else if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
892 const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>();
893 if (FPT && FPT->isNothrow(getContext()))
894 FuncAttrs |= llvm::Attribute::NoUnwind;
895 }
896
897 if (TargetDecl->hasAttr<NoReturnAttr>())
898 FuncAttrs |= llvm::Attribute::NoReturn;
899
900 if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
901 FuncAttrs |= llvm::Attribute::ReturnsTwice;
902
903 // 'const' and 'pure' attribute functions are also nounwind.
904 if (TargetDecl->hasAttr<ConstAttr>()) {
905 FuncAttrs |= llvm::Attribute::ReadNone;
906 FuncAttrs |= llvm::Attribute::NoUnwind;
907 } else if (TargetDecl->hasAttr<PureAttr>()) {
908 FuncAttrs |= llvm::Attribute::ReadOnly;
909 FuncAttrs |= llvm::Attribute::NoUnwind;
910 }
911 if (TargetDecl->hasAttr<MallocAttr>())
912 RetAttrs |= llvm::Attribute::NoAlias;
913 }
914
915 if (CodeGenOpts.OptimizeSize)
916 FuncAttrs |= llvm::Attribute::OptimizeForSize;
917 if (CodeGenOpts.DisableRedZone)
918 FuncAttrs |= llvm::Attribute::NoRedZone;
919 if (CodeGenOpts.NoImplicitFloat)
920 FuncAttrs |= llvm::Attribute::NoImplicitFloat;
921
922 QualType RetTy = FI.getReturnType();
923 unsigned Index = 1;
924 const ABIArgInfo &RetAI = FI.getReturnInfo();
925 switch (RetAI.getKind()) {
926 case ABIArgInfo::Extend:
927 if (RetTy->hasSignedIntegerRepresentation())
928 RetAttrs |= llvm::Attribute::SExt;
929 else if (RetTy->hasUnsignedIntegerRepresentation())
930 RetAttrs |= llvm::Attribute::ZExt;
931 break;
932 case ABIArgInfo::Direct:
933 case ABIArgInfo::Ignore:
934 break;
935
936 case ABIArgInfo::Indirect:
937 PAL.push_back(llvm::AttributeWithIndex::get(Index,
938 llvm::Attribute::StructRet));
939 ++Index;
940 // sret disables readnone and readonly
941 FuncAttrs &= ~(llvm::Attribute::ReadOnly |
942 llvm::Attribute::ReadNone);
943 break;
944
945 case ABIArgInfo::Expand:
946 llvm_unreachable("Invalid ABI kind for return argument");
947 }
948
949 if (RetAttrs)
950 PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
951
952 // FIXME: RegParm should be reduced in case of global register variable.
953 signed RegParm;
954 if (FI.getHasRegParm())
955 RegParm = FI.getRegParm();
956 else
957 RegParm = CodeGenOpts.NumRegisterParameters;
958
959 unsigned PointerWidth = getContext().getTargetInfo().getPointerWidth(0);
960 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
961 ie = FI.arg_end(); it != ie; ++it) {
962 QualType ParamType = it->type;
963 const ABIArgInfo &AI = it->info;
964 llvm::Attributes Attrs;
965
966 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
967 // have the corresponding parameter variable. It doesn't make
968 // sense to do it here because parameters are so messed up.
969 switch (AI.getKind()) {
970 case ABIArgInfo::Extend:
971 if (ParamType->isSignedIntegerOrEnumerationType())
972 Attrs |= llvm::Attribute::SExt;
973 else if (ParamType->isUnsignedIntegerOrEnumerationType())
974 Attrs |= llvm::Attribute::ZExt;
975 // FALL THROUGH
976 case ABIArgInfo::Direct:
977 if (RegParm > 0 &&
978 (ParamType->isIntegerType() || ParamType->isPointerType() ||
979 ParamType->isReferenceType())) {
980 RegParm -=
981 (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
982 if (RegParm >= 0)
983 Attrs |= llvm::Attribute::InReg;
984 }
985 // FIXME: handle sseregparm someday...
986
987 // Increment Index if there is padding.
988 Index += (AI.getPaddingType() != 0);
989
990 if (llvm::StructType *STy =
991 dyn_cast<llvm::StructType>(AI.getCoerceToType()))
992 Index += STy->getNumElements()-1; // 1 will be added below.
993 break;
994
995 case ABIArgInfo::Indirect:
996 if (AI.getIndirectByVal())
997 Attrs |= llvm::Attribute::ByVal;
998
999 Attrs |=
1000 llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
1001 // byval disables readnone and readonly.
1002 FuncAttrs &= ~(llvm::Attribute::ReadOnly |
1003 llvm::Attribute::ReadNone);
1004 break;
1005
1006 case ABIArgInfo::Ignore:
1007 // Skip increment, no matching LLVM parameter.
1008 continue;
1009
1010 case ABIArgInfo::Expand: {
1011 SmallVector<llvm::Type*, 8> types;
1012 // FIXME: This is rather inefficient. Do we ever actually need to do
1013 // anything here? The result should be just reconstructed on the other
1014 // side, so extension should be a non-issue.
1015 getTypes().GetExpandedTypes(ParamType, types);
1016 Index += types.size();
1017 continue;
1018 }
1019 }
1020
1021 if (Attrs)
1022 PAL.push_back(llvm::AttributeWithIndex::get(Index, Attrs));
1023 ++Index;
1024 }
1025 if (FuncAttrs)
1026 PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
1027 }
1028
1029 /// An argument came in as a promoted argument; demote it back to its
1030 /// declared type.
emitArgumentDemotion(CodeGenFunction & CGF,const VarDecl * var,llvm::Value * value)1031 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
1032 const VarDecl *var,
1033 llvm::Value *value) {
1034 llvm::Type *varType = CGF.ConvertType(var->getType());
1035
1036 // This can happen with promotions that actually don't change the
1037 // underlying type, like the enum promotions.
1038 if (value->getType() == varType) return value;
1039
1040 assert((varType->isIntegerTy() || varType->isFloatingPointTy())
1041 && "unexpected promotion type");
1042
1043 if (isa<llvm::IntegerType>(varType))
1044 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
1045
1046 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
1047 }
1048
EmitFunctionProlog(const CGFunctionInfo & FI,llvm::Function * Fn,const FunctionArgList & Args)1049 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
1050 llvm::Function *Fn,
1051 const FunctionArgList &Args) {
1052 // If this is an implicit-return-zero function, go ahead and
1053 // initialize the return value. TODO: it might be nice to have
1054 // a more general mechanism for this that didn't require synthesized
1055 // return statements.
1056 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
1057 if (FD->hasImplicitReturnZero()) {
1058 QualType RetTy = FD->getResultType().getUnqualifiedType();
1059 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
1060 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
1061 Builder.CreateStore(Zero, ReturnValue);
1062 }
1063 }
1064
1065 // FIXME: We no longer need the types from FunctionArgList; lift up and
1066 // simplify.
1067
1068 // Emit allocs for param decls. Give the LLVM Argument nodes names.
1069 llvm::Function::arg_iterator AI = Fn->arg_begin();
1070
1071 // Name the struct return argument.
1072 if (CGM.ReturnTypeUsesSRet(FI)) {
1073 AI->setName("agg.result");
1074 AI->addAttr(llvm::Attribute::NoAlias);
1075 ++AI;
1076 }
1077
1078 assert(FI.arg_size() == Args.size() &&
1079 "Mismatch between function signature & arguments.");
1080 unsigned ArgNo = 1;
1081 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
1082 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
1083 i != e; ++i, ++info_it, ++ArgNo) {
1084 const VarDecl *Arg = *i;
1085 QualType Ty = info_it->type;
1086 const ABIArgInfo &ArgI = info_it->info;
1087
1088 bool isPromoted =
1089 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
1090
1091 switch (ArgI.getKind()) {
1092 case ABIArgInfo::Indirect: {
1093 llvm::Value *V = AI;
1094
1095 if (hasAggregateLLVMType(Ty)) {
1096 // Aggregates and complex variables are accessed by reference. All we
1097 // need to do is realign the value, if requested
1098 if (ArgI.getIndirectRealign()) {
1099 llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce");
1100
1101 // Copy from the incoming argument pointer to the temporary with the
1102 // appropriate alignment.
1103 //
1104 // FIXME: We should have a common utility for generating an aggregate
1105 // copy.
1106 llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
1107 CharUnits Size = getContext().getTypeSizeInChars(Ty);
1108 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
1109 llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy);
1110 Builder.CreateMemCpy(Dst,
1111 Src,
1112 llvm::ConstantInt::get(IntPtrTy,
1113 Size.getQuantity()),
1114 ArgI.getIndirectAlign(),
1115 false);
1116 V = AlignedTemp;
1117 }
1118 } else {
1119 // Load scalar value from indirect argument.
1120 CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
1121 V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty);
1122
1123 if (isPromoted)
1124 V = emitArgumentDemotion(*this, Arg, V);
1125 }
1126 EmitParmDecl(*Arg, V, ArgNo);
1127 break;
1128 }
1129
1130 case ABIArgInfo::Extend:
1131 case ABIArgInfo::Direct: {
1132 // Skip the dummy padding argument.
1133 if (ArgI.getPaddingType())
1134 ++AI;
1135
1136 // If we have the trivial case, handle it with no muss and fuss.
1137 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
1138 ArgI.getCoerceToType() == ConvertType(Ty) &&
1139 ArgI.getDirectOffset() == 0) {
1140 assert(AI != Fn->arg_end() && "Argument mismatch!");
1141 llvm::Value *V = AI;
1142
1143 if (Arg->getType().isRestrictQualified())
1144 AI->addAttr(llvm::Attribute::NoAlias);
1145
1146 // Ensure the argument is the correct type.
1147 if (V->getType() != ArgI.getCoerceToType())
1148 V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
1149
1150 if (isPromoted)
1151 V = emitArgumentDemotion(*this, Arg, V);
1152
1153 EmitParmDecl(*Arg, V, ArgNo);
1154 break;
1155 }
1156
1157 llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName());
1158
1159 // The alignment we need to use is the max of the requested alignment for
1160 // the argument plus the alignment required by our access code below.
1161 unsigned AlignmentToUse =
1162 CGM.getTargetData().getABITypeAlignment(ArgI.getCoerceToType());
1163 AlignmentToUse = std::max(AlignmentToUse,
1164 (unsigned)getContext().getDeclAlign(Arg).getQuantity());
1165
1166 Alloca->setAlignment(AlignmentToUse);
1167 llvm::Value *V = Alloca;
1168 llvm::Value *Ptr = V; // Pointer to store into.
1169
1170 // If the value is offset in memory, apply the offset now.
1171 if (unsigned Offs = ArgI.getDirectOffset()) {
1172 Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy());
1173 Ptr = Builder.CreateConstGEP1_32(Ptr, Offs);
1174 Ptr = Builder.CreateBitCast(Ptr,
1175 llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
1176 }
1177
1178 // If the coerce-to type is a first class aggregate, we flatten it and
1179 // pass the elements. Either way is semantically identical, but fast-isel
1180 // and the optimizer generally likes scalar values better than FCAs.
1181 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
1182 if (STy && STy->getNumElements() > 1) {
1183 uint64_t SrcSize = CGM.getTargetData().getTypeAllocSize(STy);
1184 llvm::Type *DstTy =
1185 cast<llvm::PointerType>(Ptr->getType())->getElementType();
1186 uint64_t DstSize = CGM.getTargetData().getTypeAllocSize(DstTy);
1187
1188 if (SrcSize <= DstSize) {
1189 Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
1190
1191 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1192 assert(AI != Fn->arg_end() && "Argument mismatch!");
1193 AI->setName(Arg->getName() + ".coerce" + Twine(i));
1194 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i);
1195 Builder.CreateStore(AI++, EltPtr);
1196 }
1197 } else {
1198 llvm::AllocaInst *TempAlloca =
1199 CreateTempAlloca(ArgI.getCoerceToType(), "coerce");
1200 TempAlloca->setAlignment(AlignmentToUse);
1201 llvm::Value *TempV = TempAlloca;
1202
1203 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1204 assert(AI != Fn->arg_end() && "Argument mismatch!");
1205 AI->setName(Arg->getName() + ".coerce" + Twine(i));
1206 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(TempV, 0, i);
1207 Builder.CreateStore(AI++, EltPtr);
1208 }
1209
1210 Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse);
1211 }
1212 } else {
1213 // Simple case, just do a coerced store of the argument into the alloca.
1214 assert(AI != Fn->arg_end() && "Argument mismatch!");
1215 AI->setName(Arg->getName() + ".coerce");
1216 CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this);
1217 }
1218
1219
1220 // Match to what EmitParmDecl is expecting for this type.
1221 if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
1222 V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty);
1223 if (isPromoted)
1224 V = emitArgumentDemotion(*this, Arg, V);
1225 }
1226 EmitParmDecl(*Arg, V, ArgNo);
1227 continue; // Skip ++AI increment, already done.
1228 }
1229
1230 case ABIArgInfo::Expand: {
1231 // If this structure was expanded into multiple arguments then
1232 // we need to create a temporary and reconstruct it from the
1233 // arguments.
1234 llvm::AllocaInst *Alloca = CreateMemTemp(Ty);
1235 CharUnits Align = getContext().getDeclAlign(Arg);
1236 Alloca->setAlignment(Align.getQuantity());
1237 LValue LV = MakeAddrLValue(Alloca, Ty, Align);
1238 llvm::Function::arg_iterator End = ExpandTypeFromArgs(Ty, LV, AI);
1239 EmitParmDecl(*Arg, Alloca, ArgNo);
1240
1241 // Name the arguments used in expansion and increment AI.
1242 unsigned Index = 0;
1243 for (; AI != End; ++AI, ++Index)
1244 AI->setName(Arg->getName() + "." + Twine(Index));
1245 continue;
1246 }
1247
1248 case ABIArgInfo::Ignore:
1249 // Initialize the local variable appropriately.
1250 if (hasAggregateLLVMType(Ty))
1251 EmitParmDecl(*Arg, CreateMemTemp(Ty), ArgNo);
1252 else
1253 EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())),
1254 ArgNo);
1255
1256 // Skip increment, no matching LLVM parameter.
1257 continue;
1258 }
1259
1260 ++AI;
1261 }
1262 assert(AI == Fn->arg_end() && "Argument mismatch!");
1263 }
1264
eraseUnusedBitCasts(llvm::Instruction * insn)1265 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
1266 while (insn->use_empty()) {
1267 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
1268 if (!bitcast) return;
1269
1270 // This is "safe" because we would have used a ConstantExpr otherwise.
1271 insn = cast<llvm::Instruction>(bitcast->getOperand(0));
1272 bitcast->eraseFromParent();
1273 }
1274 }
1275
1276 /// Try to emit a fused autorelease of a return result.
tryEmitFusedAutoreleaseOfResult(CodeGenFunction & CGF,llvm::Value * result)1277 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
1278 llvm::Value *result) {
1279 // We must be immediately followed the cast.
1280 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
1281 if (BB->empty()) return 0;
1282 if (&BB->back() != result) return 0;
1283
1284 llvm::Type *resultType = result->getType();
1285
1286 // result is in a BasicBlock and is therefore an Instruction.
1287 llvm::Instruction *generator = cast<llvm::Instruction>(result);
1288
1289 SmallVector<llvm::Instruction*,4> insnsToKill;
1290
1291 // Look for:
1292 // %generator = bitcast %type1* %generator2 to %type2*
1293 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
1294 // We would have emitted this as a constant if the operand weren't
1295 // an Instruction.
1296 generator = cast<llvm::Instruction>(bitcast->getOperand(0));
1297
1298 // Require the generator to be immediately followed by the cast.
1299 if (generator->getNextNode() != bitcast)
1300 return 0;
1301
1302 insnsToKill.push_back(bitcast);
1303 }
1304
1305 // Look for:
1306 // %generator = call i8* @objc_retain(i8* %originalResult)
1307 // or
1308 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
1309 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
1310 if (!call) return 0;
1311
1312 bool doRetainAutorelease;
1313
1314 if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) {
1315 doRetainAutorelease = true;
1316 } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints()
1317 .objc_retainAutoreleasedReturnValue) {
1318 doRetainAutorelease = false;
1319
1320 // Look for an inline asm immediately preceding the call and kill it, too.
1321 llvm::Instruction *prev = call->getPrevNode();
1322 if (llvm::CallInst *asmCall = dyn_cast_or_null<llvm::CallInst>(prev))
1323 if (asmCall->getCalledValue()
1324 == CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker)
1325 insnsToKill.push_back(prev);
1326 } else {
1327 return 0;
1328 }
1329
1330 result = call->getArgOperand(0);
1331 insnsToKill.push_back(call);
1332
1333 // Keep killing bitcasts, for sanity. Note that we no longer care
1334 // about precise ordering as long as there's exactly one use.
1335 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
1336 if (!bitcast->hasOneUse()) break;
1337 insnsToKill.push_back(bitcast);
1338 result = bitcast->getOperand(0);
1339 }
1340
1341 // Delete all the unnecessary instructions, from latest to earliest.
1342 for (SmallVectorImpl<llvm::Instruction*>::iterator
1343 i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i)
1344 (*i)->eraseFromParent();
1345
1346 // Do the fused retain/autorelease if we were asked to.
1347 if (doRetainAutorelease)
1348 result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
1349
1350 // Cast back to the result type.
1351 return CGF.Builder.CreateBitCast(result, resultType);
1352 }
1353
1354 /// If this is a +1 of the value of an immutable 'self', remove it.
tryRemoveRetainOfSelf(CodeGenFunction & CGF,llvm::Value * result)1355 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
1356 llvm::Value *result) {
1357 // This is only applicable to a method with an immutable 'self'.
1358 const ObjCMethodDecl *method = dyn_cast<ObjCMethodDecl>(CGF.CurCodeDecl);
1359 if (!method) return 0;
1360 const VarDecl *self = method->getSelfDecl();
1361 if (!self->getType().isConstQualified()) return 0;
1362
1363 // Look for a retain call.
1364 llvm::CallInst *retainCall =
1365 dyn_cast<llvm::CallInst>(result->stripPointerCasts());
1366 if (!retainCall ||
1367 retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain)
1368 return 0;
1369
1370 // Look for an ordinary load of 'self'.
1371 llvm::Value *retainedValue = retainCall->getArgOperand(0);
1372 llvm::LoadInst *load =
1373 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
1374 if (!load || load->isAtomic() || load->isVolatile() ||
1375 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self))
1376 return 0;
1377
1378 // Okay! Burn it all down. This relies for correctness on the
1379 // assumption that the retain is emitted as part of the return and
1380 // that thereafter everything is used "linearly".
1381 llvm::Type *resultType = result->getType();
1382 eraseUnusedBitCasts(cast<llvm::Instruction>(result));
1383 assert(retainCall->use_empty());
1384 retainCall->eraseFromParent();
1385 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
1386
1387 return CGF.Builder.CreateBitCast(load, resultType);
1388 }
1389
1390 /// Emit an ARC autorelease of the result of a function.
1391 ///
1392 /// \return the value to actually return from the function
emitAutoreleaseOfResult(CodeGenFunction & CGF,llvm::Value * result)1393 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
1394 llvm::Value *result) {
1395 // If we're returning 'self', kill the initial retain. This is a
1396 // heuristic attempt to "encourage correctness" in the really unfortunate
1397 // case where we have a return of self during a dealloc and we desperately
1398 // need to avoid the possible autorelease.
1399 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
1400 return self;
1401
1402 // At -O0, try to emit a fused retain/autorelease.
1403 if (CGF.shouldUseFusedARCCalls())
1404 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
1405 return fused;
1406
1407 return CGF.EmitARCAutoreleaseReturnValue(result);
1408 }
1409
1410 /// Heuristically search for a dominating store to the return-value slot.
findDominatingStoreToReturnValue(CodeGenFunction & CGF)1411 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
1412 // If there are multiple uses of the return-value slot, just check
1413 // for something immediately preceding the IP. Sometimes this can
1414 // happen with how we generate implicit-returns; it can also happen
1415 // with noreturn cleanups.
1416 if (!CGF.ReturnValue->hasOneUse()) {
1417 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
1418 if (IP->empty()) return 0;
1419 llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(&IP->back());
1420 if (!store) return 0;
1421 if (store->getPointerOperand() != CGF.ReturnValue) return 0;
1422 assert(!store->isAtomic() && !store->isVolatile()); // see below
1423 return store;
1424 }
1425
1426 llvm::StoreInst *store =
1427 dyn_cast<llvm::StoreInst>(CGF.ReturnValue->use_back());
1428 if (!store) return 0;
1429
1430 // These aren't actually possible for non-coerced returns, and we
1431 // only care about non-coerced returns on this code path.
1432 assert(!store->isAtomic() && !store->isVolatile());
1433
1434 // Now do a first-and-dirty dominance check: just walk up the
1435 // single-predecessors chain from the current insertion point.
1436 llvm::BasicBlock *StoreBB = store->getParent();
1437 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
1438 while (IP != StoreBB) {
1439 if (!(IP = IP->getSinglePredecessor()))
1440 return 0;
1441 }
1442
1443 // Okay, the store's basic block dominates the insertion point; we
1444 // can do our thing.
1445 return store;
1446 }
1447
EmitFunctionEpilog(const CGFunctionInfo & FI)1448 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
1449 // Functions with no result always return void.
1450 if (ReturnValue == 0) {
1451 Builder.CreateRetVoid();
1452 return;
1453 }
1454
1455 llvm::DebugLoc RetDbgLoc;
1456 llvm::Value *RV = 0;
1457 QualType RetTy = FI.getReturnType();
1458 const ABIArgInfo &RetAI = FI.getReturnInfo();
1459
1460 switch (RetAI.getKind()) {
1461 case ABIArgInfo::Indirect: {
1462 unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
1463 if (RetTy->isAnyComplexType()) {
1464 ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
1465 StoreComplexToAddr(RT, CurFn->arg_begin(), false);
1466 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1467 // Do nothing; aggregrates get evaluated directly into the destination.
1468 } else {
1469 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
1470 false, Alignment, RetTy);
1471 }
1472 break;
1473 }
1474
1475 case ABIArgInfo::Extend:
1476 case ABIArgInfo::Direct:
1477 if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
1478 RetAI.getDirectOffset() == 0) {
1479 // The internal return value temp always will have pointer-to-return-type
1480 // type, just do a load.
1481
1482 // If there is a dominating store to ReturnValue, we can elide
1483 // the load, zap the store, and usually zap the alloca.
1484 if (llvm::StoreInst *SI = findDominatingStoreToReturnValue(*this)) {
1485 // Get the stored value and nuke the now-dead store.
1486 RetDbgLoc = SI->getDebugLoc();
1487 RV = SI->getValueOperand();
1488 SI->eraseFromParent();
1489
1490 // If that was the only use of the return value, nuke it as well now.
1491 if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
1492 cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
1493 ReturnValue = 0;
1494 }
1495
1496 // Otherwise, we have to do a simple load.
1497 } else {
1498 RV = Builder.CreateLoad(ReturnValue);
1499 }
1500 } else {
1501 llvm::Value *V = ReturnValue;
1502 // If the value is offset in memory, apply the offset now.
1503 if (unsigned Offs = RetAI.getDirectOffset()) {
1504 V = Builder.CreateBitCast(V, Builder.getInt8PtrTy());
1505 V = Builder.CreateConstGEP1_32(V, Offs);
1506 V = Builder.CreateBitCast(V,
1507 llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
1508 }
1509
1510 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
1511 }
1512
1513 // In ARC, end functions that return a retainable type with a call
1514 // to objc_autoreleaseReturnValue.
1515 if (AutoreleaseResult) {
1516 assert(getLangOpts().ObjCAutoRefCount &&
1517 !FI.isReturnsRetained() &&
1518 RetTy->isObjCRetainableType());
1519 RV = emitAutoreleaseOfResult(*this, RV);
1520 }
1521
1522 break;
1523
1524 case ABIArgInfo::Ignore:
1525 break;
1526
1527 case ABIArgInfo::Expand:
1528 llvm_unreachable("Invalid ABI kind for return argument");
1529 }
1530
1531 llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid();
1532 if (!RetDbgLoc.isUnknown())
1533 Ret->setDebugLoc(RetDbgLoc);
1534 }
1535
EmitDelegateCallArg(CallArgList & args,const VarDecl * param)1536 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
1537 const VarDecl *param) {
1538 // StartFunction converted the ABI-lowered parameter(s) into a
1539 // local alloca. We need to turn that into an r-value suitable
1540 // for EmitCall.
1541 llvm::Value *local = GetAddrOfLocalVar(param);
1542
1543 QualType type = param->getType();
1544
1545 // For the most part, we just need to load the alloca, except:
1546 // 1) aggregate r-values are actually pointers to temporaries, and
1547 // 2) references to aggregates are pointers directly to the aggregate.
1548 // I don't know why references to non-aggregates are different here.
1549 if (const ReferenceType *ref = type->getAs<ReferenceType>()) {
1550 if (hasAggregateLLVMType(ref->getPointeeType()))
1551 return args.add(RValue::getAggregate(local), type);
1552
1553 // Locals which are references to scalars are represented
1554 // with allocas holding the pointer.
1555 return args.add(RValue::get(Builder.CreateLoad(local)), type);
1556 }
1557
1558 if (type->isAnyComplexType()) {
1559 ComplexPairTy complex = LoadComplexFromAddr(local, /*volatile*/ false);
1560 return args.add(RValue::getComplex(complex), type);
1561 }
1562
1563 if (hasAggregateLLVMType(type))
1564 return args.add(RValue::getAggregate(local), type);
1565
1566 unsigned alignment = getContext().getDeclAlign(param).getQuantity();
1567 llvm::Value *value = EmitLoadOfScalar(local, false, alignment, type);
1568 return args.add(RValue::get(value), type);
1569 }
1570
isProvablyNull(llvm::Value * addr)1571 static bool isProvablyNull(llvm::Value *addr) {
1572 return isa<llvm::ConstantPointerNull>(addr);
1573 }
1574
isProvablyNonNull(llvm::Value * addr)1575 static bool isProvablyNonNull(llvm::Value *addr) {
1576 return isa<llvm::AllocaInst>(addr);
1577 }
1578
1579 /// Emit the actual writing-back of a writeback.
emitWriteback(CodeGenFunction & CGF,const CallArgList::Writeback & writeback)1580 static void emitWriteback(CodeGenFunction &CGF,
1581 const CallArgList::Writeback &writeback) {
1582 llvm::Value *srcAddr = writeback.Address;
1583 assert(!isProvablyNull(srcAddr) &&
1584 "shouldn't have writeback for provably null argument");
1585
1586 llvm::BasicBlock *contBB = 0;
1587
1588 // If the argument wasn't provably non-null, we need to null check
1589 // before doing the store.
1590 bool provablyNonNull = isProvablyNonNull(srcAddr);
1591 if (!provablyNonNull) {
1592 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
1593 contBB = CGF.createBasicBlock("icr.done");
1594
1595 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
1596 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
1597 CGF.EmitBlock(writebackBB);
1598 }
1599
1600 // Load the value to writeback.
1601 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
1602
1603 // Cast it back, in case we're writing an id to a Foo* or something.
1604 value = CGF.Builder.CreateBitCast(value,
1605 cast<llvm::PointerType>(srcAddr->getType())->getElementType(),
1606 "icr.writeback-cast");
1607
1608 // Perform the writeback.
1609 QualType srcAddrType = writeback.AddressType;
1610 CGF.EmitStoreThroughLValue(RValue::get(value),
1611 CGF.MakeAddrLValue(srcAddr, srcAddrType));
1612
1613 // Jump to the continuation block.
1614 if (!provablyNonNull)
1615 CGF.EmitBlock(contBB);
1616 }
1617
emitWritebacks(CodeGenFunction & CGF,const CallArgList & args)1618 static void emitWritebacks(CodeGenFunction &CGF,
1619 const CallArgList &args) {
1620 for (CallArgList::writeback_iterator
1621 i = args.writeback_begin(), e = args.writeback_end(); i != e; ++i)
1622 emitWriteback(CGF, *i);
1623 }
1624
1625 /// Emit an argument that's being passed call-by-writeback. That is,
1626 /// we are passing the address of
emitWritebackArg(CodeGenFunction & CGF,CallArgList & args,const ObjCIndirectCopyRestoreExpr * CRE)1627 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
1628 const ObjCIndirectCopyRestoreExpr *CRE) {
1629 llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr());
1630
1631 // The dest and src types don't necessarily match in LLVM terms
1632 // because of the crazy ObjC compatibility rules.
1633
1634 llvm::PointerType *destType =
1635 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
1636
1637 // If the address is a constant null, just pass the appropriate null.
1638 if (isProvablyNull(srcAddr)) {
1639 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
1640 CRE->getType());
1641 return;
1642 }
1643
1644 QualType srcAddrType =
1645 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
1646
1647 // Create the temporary.
1648 llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(),
1649 "icr.temp");
1650
1651 // Zero-initialize it if we're not doing a copy-initialization.
1652 bool shouldCopy = CRE->shouldCopy();
1653 if (!shouldCopy) {
1654 llvm::Value *null =
1655 llvm::ConstantPointerNull::get(
1656 cast<llvm::PointerType>(destType->getElementType()));
1657 CGF.Builder.CreateStore(null, temp);
1658 }
1659
1660 llvm::BasicBlock *contBB = 0;
1661
1662 // If the address is *not* known to be non-null, we need to switch.
1663 llvm::Value *finalArgument;
1664
1665 bool provablyNonNull = isProvablyNonNull(srcAddr);
1666 if (provablyNonNull) {
1667 finalArgument = temp;
1668 } else {
1669 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
1670
1671 finalArgument = CGF.Builder.CreateSelect(isNull,
1672 llvm::ConstantPointerNull::get(destType),
1673 temp, "icr.argument");
1674
1675 // If we need to copy, then the load has to be conditional, which
1676 // means we need control flow.
1677 if (shouldCopy) {
1678 contBB = CGF.createBasicBlock("icr.cont");
1679 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
1680 CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
1681 CGF.EmitBlock(copyBB);
1682 }
1683 }
1684
1685 // Perform a copy if necessary.
1686 if (shouldCopy) {
1687 LValue srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
1688 RValue srcRV = CGF.EmitLoadOfLValue(srcLV);
1689 assert(srcRV.isScalar());
1690
1691 llvm::Value *src = srcRV.getScalarVal();
1692 src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
1693 "icr.cast");
1694
1695 // Use an ordinary store, not a store-to-lvalue.
1696 CGF.Builder.CreateStore(src, temp);
1697 }
1698
1699 // Finish the control flow if we needed it.
1700 if (shouldCopy && !provablyNonNull)
1701 CGF.EmitBlock(contBB);
1702
1703 args.addWriteback(srcAddr, srcAddrType, temp);
1704 args.add(RValue::get(finalArgument), CRE->getType());
1705 }
1706
EmitCallArg(CallArgList & args,const Expr * E,QualType type)1707 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
1708 QualType type) {
1709 if (const ObjCIndirectCopyRestoreExpr *CRE
1710 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
1711 assert(getContext().getLangOpts().ObjCAutoRefCount);
1712 assert(getContext().hasSameType(E->getType(), type));
1713 return emitWritebackArg(*this, args, CRE);
1714 }
1715
1716 assert(type->isReferenceType() == E->isGLValue() &&
1717 "reference binding to unmaterialized r-value!");
1718
1719 if (E->isGLValue()) {
1720 assert(E->getObjectKind() == OK_Ordinary);
1721 return args.add(EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0),
1722 type);
1723 }
1724
1725 if (hasAggregateLLVMType(type) && !E->getType()->isAnyComplexType() &&
1726 isa<ImplicitCastExpr>(E) &&
1727 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
1728 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
1729 assert(L.isSimple());
1730 args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
1731 return;
1732 }
1733
1734 args.add(EmitAnyExprToTemp(E), type);
1735 }
1736
1737 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
1738 // optimizer it can aggressively ignore unwind edges.
1739 void
AddObjCARCExceptionMetadata(llvm::Instruction * Inst)1740 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
1741 if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
1742 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
1743 Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
1744 CGM.getNoObjCARCExceptionsMetadata());
1745 }
1746
1747 /// Emits a call or invoke instruction to the given function, depending
1748 /// on the current state of the EH stack.
1749 llvm::CallSite
EmitCallOrInvoke(llvm::Value * Callee,ArrayRef<llvm::Value * > Args,const Twine & Name)1750 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
1751 ArrayRef<llvm::Value *> Args,
1752 const Twine &Name) {
1753 llvm::BasicBlock *InvokeDest = getInvokeDest();
1754
1755 llvm::Instruction *Inst;
1756 if (!InvokeDest)
1757 Inst = Builder.CreateCall(Callee, Args, Name);
1758 else {
1759 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
1760 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name);
1761 EmitBlock(ContBB);
1762 }
1763
1764 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
1765 // optimizer it can aggressively ignore unwind edges.
1766 if (CGM.getLangOpts().ObjCAutoRefCount)
1767 AddObjCARCExceptionMetadata(Inst);
1768
1769 return Inst;
1770 }
1771
1772 llvm::CallSite
EmitCallOrInvoke(llvm::Value * Callee,const Twine & Name)1773 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
1774 const Twine &Name) {
1775 return EmitCallOrInvoke(Callee, ArrayRef<llvm::Value *>(), Name);
1776 }
1777
checkArgMatches(llvm::Value * Elt,unsigned & ArgNo,llvm::FunctionType * FTy)1778 static void checkArgMatches(llvm::Value *Elt, unsigned &ArgNo,
1779 llvm::FunctionType *FTy) {
1780 if (ArgNo < FTy->getNumParams())
1781 assert(Elt->getType() == FTy->getParamType(ArgNo));
1782 else
1783 assert(FTy->isVarArg());
1784 ++ArgNo;
1785 }
1786
ExpandTypeToArgs(QualType Ty,RValue RV,SmallVector<llvm::Value *,16> & Args,llvm::FunctionType * IRFuncTy)1787 void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
1788 SmallVector<llvm::Value*,16> &Args,
1789 llvm::FunctionType *IRFuncTy) {
1790 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
1791 unsigned NumElts = AT->getSize().getZExtValue();
1792 QualType EltTy = AT->getElementType();
1793 llvm::Value *Addr = RV.getAggregateAddr();
1794 for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
1795 llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt);
1796 LValue LV = MakeAddrLValue(EltAddr, EltTy);
1797 RValue EltRV;
1798 if (EltTy->isAnyComplexType())
1799 // FIXME: Volatile?
1800 EltRV = RValue::getComplex(LoadComplexFromAddr(LV.getAddress(), false));
1801 else if (CodeGenFunction::hasAggregateLLVMType(EltTy))
1802 EltRV = LV.asAggregateRValue();
1803 else
1804 EltRV = EmitLoadOfLValue(LV);
1805 ExpandTypeToArgs(EltTy, EltRV, Args, IRFuncTy);
1806 }
1807 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
1808 RecordDecl *RD = RT->getDecl();
1809 assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
1810 LValue LV = MakeAddrLValue(RV.getAggregateAddr(), Ty);
1811
1812 if (RD->isUnion()) {
1813 const FieldDecl *LargestFD = 0;
1814 CharUnits UnionSize = CharUnits::Zero();
1815
1816 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
1817 i != e; ++i) {
1818 const FieldDecl *FD = *i;
1819 assert(!FD->isBitField() &&
1820 "Cannot expand structure with bit-field members.");
1821 CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
1822 if (UnionSize < FieldSize) {
1823 UnionSize = FieldSize;
1824 LargestFD = FD;
1825 }
1826 }
1827 if (LargestFD) {
1828 RValue FldRV = EmitRValueForField(LV, LargestFD);
1829 ExpandTypeToArgs(LargestFD->getType(), FldRV, Args, IRFuncTy);
1830 }
1831 } else {
1832 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
1833 i != e; ++i) {
1834 FieldDecl *FD = *i;
1835
1836 RValue FldRV = EmitRValueForField(LV, FD);
1837 ExpandTypeToArgs(FD->getType(), FldRV, Args, IRFuncTy);
1838 }
1839 }
1840 } else if (Ty->isAnyComplexType()) {
1841 ComplexPairTy CV = RV.getComplexVal();
1842 Args.push_back(CV.first);
1843 Args.push_back(CV.second);
1844 } else {
1845 assert(RV.isScalar() &&
1846 "Unexpected non-scalar rvalue during struct expansion.");
1847
1848 // Insert a bitcast as needed.
1849 llvm::Value *V = RV.getScalarVal();
1850 if (Args.size() < IRFuncTy->getNumParams() &&
1851 V->getType() != IRFuncTy->getParamType(Args.size()))
1852 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(Args.size()));
1853
1854 Args.push_back(V);
1855 }
1856 }
1857
1858
EmitCall(const CGFunctionInfo & CallInfo,llvm::Value * Callee,ReturnValueSlot ReturnValue,const CallArgList & CallArgs,const Decl * TargetDecl,llvm::Instruction ** callOrInvoke)1859 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
1860 llvm::Value *Callee,
1861 ReturnValueSlot ReturnValue,
1862 const CallArgList &CallArgs,
1863 const Decl *TargetDecl,
1864 llvm::Instruction **callOrInvoke) {
1865 // FIXME: We no longer need the types from CallArgs; lift up and simplify.
1866 SmallVector<llvm::Value*, 16> Args;
1867
1868 // Handle struct-return functions by passing a pointer to the
1869 // location that we would like to return into.
1870 QualType RetTy = CallInfo.getReturnType();
1871 const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
1872
1873 // IRArgNo - Keep track of the argument number in the callee we're looking at.
1874 unsigned IRArgNo = 0;
1875 llvm::FunctionType *IRFuncTy =
1876 cast<llvm::FunctionType>(
1877 cast<llvm::PointerType>(Callee->getType())->getElementType());
1878
1879 // If the call returns a temporary with struct return, create a temporary
1880 // alloca to hold the result, unless one is given to us.
1881 if (CGM.ReturnTypeUsesSRet(CallInfo)) {
1882 llvm::Value *Value = ReturnValue.getValue();
1883 if (!Value)
1884 Value = CreateMemTemp(RetTy);
1885 Args.push_back(Value);
1886 checkArgMatches(Value, IRArgNo, IRFuncTy);
1887 }
1888
1889 assert(CallInfo.arg_size() == CallArgs.size() &&
1890 "Mismatch between function signature & arguments.");
1891 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
1892 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
1893 I != E; ++I, ++info_it) {
1894 const ABIArgInfo &ArgInfo = info_it->info;
1895 RValue RV = I->RV;
1896
1897 unsigned TypeAlign =
1898 getContext().getTypeAlignInChars(I->Ty).getQuantity();
1899 switch (ArgInfo.getKind()) {
1900 case ABIArgInfo::Indirect: {
1901 if (RV.isScalar() || RV.isComplex()) {
1902 // Make a temporary alloca to pass the argument.
1903 llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
1904 if (ArgInfo.getIndirectAlign() > AI->getAlignment())
1905 AI->setAlignment(ArgInfo.getIndirectAlign());
1906 Args.push_back(AI);
1907
1908 if (RV.isScalar())
1909 EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false,
1910 TypeAlign, I->Ty);
1911 else
1912 StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
1913
1914 // Validate argument match.
1915 checkArgMatches(AI, IRArgNo, IRFuncTy);
1916 } else {
1917 // We want to avoid creating an unnecessary temporary+copy here;
1918 // however, we need one in two cases:
1919 // 1. If the argument is not byval, and we are required to copy the
1920 // source. (This case doesn't occur on any common architecture.)
1921 // 2. If the argument is byval, RV is not sufficiently aligned, and
1922 // we cannot force it to be sufficiently aligned.
1923 llvm::Value *Addr = RV.getAggregateAddr();
1924 unsigned Align = ArgInfo.getIndirectAlign();
1925 const llvm::TargetData *TD = &CGM.getTargetData();
1926 if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
1927 (ArgInfo.getIndirectByVal() && TypeAlign < Align &&
1928 llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align)) {
1929 // Create an aligned temporary, and copy to it.
1930 llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
1931 if (Align > AI->getAlignment())
1932 AI->setAlignment(Align);
1933 Args.push_back(AI);
1934 EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
1935
1936 // Validate argument match.
1937 checkArgMatches(AI, IRArgNo, IRFuncTy);
1938 } else {
1939 // Skip the extra memcpy call.
1940 Args.push_back(Addr);
1941
1942 // Validate argument match.
1943 checkArgMatches(Addr, IRArgNo, IRFuncTy);
1944 }
1945 }
1946 break;
1947 }
1948
1949 case ABIArgInfo::Ignore:
1950 break;
1951
1952 case ABIArgInfo::Extend:
1953 case ABIArgInfo::Direct: {
1954 // Insert a padding argument to ensure proper alignment.
1955 if (llvm::Type *PaddingType = ArgInfo.getPaddingType()) {
1956 Args.push_back(llvm::UndefValue::get(PaddingType));
1957 ++IRArgNo;
1958 }
1959
1960 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
1961 ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
1962 ArgInfo.getDirectOffset() == 0) {
1963 llvm::Value *V;
1964 if (RV.isScalar())
1965 V = RV.getScalarVal();
1966 else
1967 V = Builder.CreateLoad(RV.getAggregateAddr());
1968
1969 // If the argument doesn't match, perform a bitcast to coerce it. This
1970 // can happen due to trivial type mismatches.
1971 if (IRArgNo < IRFuncTy->getNumParams() &&
1972 V->getType() != IRFuncTy->getParamType(IRArgNo))
1973 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRArgNo));
1974 Args.push_back(V);
1975
1976 checkArgMatches(V, IRArgNo, IRFuncTy);
1977 break;
1978 }
1979
1980 // FIXME: Avoid the conversion through memory if possible.
1981 llvm::Value *SrcPtr;
1982 if (RV.isScalar()) {
1983 SrcPtr = CreateMemTemp(I->Ty, "coerce");
1984 EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, TypeAlign, I->Ty);
1985 } else if (RV.isComplex()) {
1986 SrcPtr = CreateMemTemp(I->Ty, "coerce");
1987 StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
1988 } else
1989 SrcPtr = RV.getAggregateAddr();
1990
1991 // If the value is offset in memory, apply the offset now.
1992 if (unsigned Offs = ArgInfo.getDirectOffset()) {
1993 SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy());
1994 SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs);
1995 SrcPtr = Builder.CreateBitCast(SrcPtr,
1996 llvm::PointerType::getUnqual(ArgInfo.getCoerceToType()));
1997
1998 }
1999
2000 // If the coerce-to type is a first class aggregate, we flatten it and
2001 // pass the elements. Either way is semantically identical, but fast-isel
2002 // and the optimizer generally likes scalar values better than FCAs.
2003 if (llvm::StructType *STy =
2004 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) {
2005 SrcPtr = Builder.CreateBitCast(SrcPtr,
2006 llvm::PointerType::getUnqual(STy));
2007 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2008 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i);
2009 llvm::LoadInst *LI = Builder.CreateLoad(EltPtr);
2010 // We don't know what we're loading from.
2011 LI->setAlignment(1);
2012 Args.push_back(LI);
2013
2014 // Validate argument match.
2015 checkArgMatches(LI, IRArgNo, IRFuncTy);
2016 }
2017 } else {
2018 // In the simple case, just pass the coerced loaded value.
2019 Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
2020 *this));
2021
2022 // Validate argument match.
2023 checkArgMatches(Args.back(), IRArgNo, IRFuncTy);
2024 }
2025
2026 break;
2027 }
2028
2029 case ABIArgInfo::Expand:
2030 ExpandTypeToArgs(I->Ty, RV, Args, IRFuncTy);
2031 IRArgNo = Args.size();
2032 break;
2033 }
2034 }
2035
2036 // If the callee is a bitcast of a function to a varargs pointer to function
2037 // type, check to see if we can remove the bitcast. This handles some cases
2038 // with unprototyped functions.
2039 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
2040 if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
2041 llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
2042 llvm::FunctionType *CurFT =
2043 cast<llvm::FunctionType>(CurPT->getElementType());
2044 llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
2045
2046 if (CE->getOpcode() == llvm::Instruction::BitCast &&
2047 ActualFT->getReturnType() == CurFT->getReturnType() &&
2048 ActualFT->getNumParams() == CurFT->getNumParams() &&
2049 ActualFT->getNumParams() == Args.size() &&
2050 (CurFT->isVarArg() || !ActualFT->isVarArg())) {
2051 bool ArgsMatch = true;
2052 for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
2053 if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
2054 ArgsMatch = false;
2055 break;
2056 }
2057
2058 // Strip the cast if we can get away with it. This is a nice cleanup,
2059 // but also allows us to inline the function at -O0 if it is marked
2060 // always_inline.
2061 if (ArgsMatch)
2062 Callee = CalleeF;
2063 }
2064 }
2065
2066 unsigned CallingConv;
2067 CodeGen::AttributeListType AttributeList;
2068 CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
2069 llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
2070 AttributeList.end());
2071
2072 llvm::BasicBlock *InvokeDest = 0;
2073 if (!(Attrs.getFnAttributes() & llvm::Attribute::NoUnwind))
2074 InvokeDest = getInvokeDest();
2075
2076 llvm::CallSite CS;
2077 if (!InvokeDest) {
2078 CS = Builder.CreateCall(Callee, Args);
2079 } else {
2080 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
2081 CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, Args);
2082 EmitBlock(Cont);
2083 }
2084 if (callOrInvoke)
2085 *callOrInvoke = CS.getInstruction();
2086
2087 CS.setAttributes(Attrs);
2088 CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
2089
2090 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
2091 // optimizer it can aggressively ignore unwind edges.
2092 if (CGM.getLangOpts().ObjCAutoRefCount)
2093 AddObjCARCExceptionMetadata(CS.getInstruction());
2094
2095 // If the call doesn't return, finish the basic block and clear the
2096 // insertion point; this allows the rest of IRgen to discard
2097 // unreachable code.
2098 if (CS.doesNotReturn()) {
2099 Builder.CreateUnreachable();
2100 Builder.ClearInsertionPoint();
2101
2102 // FIXME: For now, emit a dummy basic block because expr emitters in
2103 // generally are not ready to handle emitting expressions at unreachable
2104 // points.
2105 EnsureInsertPoint();
2106
2107 // Return a reasonable RValue.
2108 return GetUndefRValue(RetTy);
2109 }
2110
2111 llvm::Instruction *CI = CS.getInstruction();
2112 if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
2113 CI->setName("call");
2114
2115 // Emit any writebacks immediately. Arguably this should happen
2116 // after any return-value munging.
2117 if (CallArgs.hasWritebacks())
2118 emitWritebacks(*this, CallArgs);
2119
2120 switch (RetAI.getKind()) {
2121 case ABIArgInfo::Indirect: {
2122 unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
2123 if (RetTy->isAnyComplexType())
2124 return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
2125 if (CodeGenFunction::hasAggregateLLVMType(RetTy))
2126 return RValue::getAggregate(Args[0]);
2127 return RValue::get(EmitLoadOfScalar(Args[0], false, Alignment, RetTy));
2128 }
2129
2130 case ABIArgInfo::Ignore:
2131 // If we are ignoring an argument that had a result, make sure to
2132 // construct the appropriate return value for our caller.
2133 return GetUndefRValue(RetTy);
2134
2135 case ABIArgInfo::Extend:
2136 case ABIArgInfo::Direct: {
2137 llvm::Type *RetIRTy = ConvertType(RetTy);
2138 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
2139 if (RetTy->isAnyComplexType()) {
2140 llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
2141 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
2142 return RValue::getComplex(std::make_pair(Real, Imag));
2143 }
2144 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
2145 llvm::Value *DestPtr = ReturnValue.getValue();
2146 bool DestIsVolatile = ReturnValue.isVolatile();
2147
2148 if (!DestPtr) {
2149 DestPtr = CreateMemTemp(RetTy, "agg.tmp");
2150 DestIsVolatile = false;
2151 }
2152 BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false);
2153 return RValue::getAggregate(DestPtr);
2154 }
2155
2156 // If the argument doesn't match, perform a bitcast to coerce it. This
2157 // can happen due to trivial type mismatches.
2158 llvm::Value *V = CI;
2159 if (V->getType() != RetIRTy)
2160 V = Builder.CreateBitCast(V, RetIRTy);
2161 return RValue::get(V);
2162 }
2163
2164 llvm::Value *DestPtr = ReturnValue.getValue();
2165 bool DestIsVolatile = ReturnValue.isVolatile();
2166
2167 if (!DestPtr) {
2168 DestPtr = CreateMemTemp(RetTy, "coerce");
2169 DestIsVolatile = false;
2170 }
2171
2172 // If the value is offset in memory, apply the offset now.
2173 llvm::Value *StorePtr = DestPtr;
2174 if (unsigned Offs = RetAI.getDirectOffset()) {
2175 StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
2176 StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs);
2177 StorePtr = Builder.CreateBitCast(StorePtr,
2178 llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
2179 }
2180 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
2181
2182 unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
2183 if (RetTy->isAnyComplexType())
2184 return RValue::getComplex(LoadComplexFromAddr(DestPtr, false));
2185 if (CodeGenFunction::hasAggregateLLVMType(RetTy))
2186 return RValue::getAggregate(DestPtr);
2187 return RValue::get(EmitLoadOfScalar(DestPtr, false, Alignment, RetTy));
2188 }
2189
2190 case ABIArgInfo::Expand:
2191 llvm_unreachable("Invalid ABI kind for return argument");
2192 }
2193
2194 llvm_unreachable("Unhandled ABIArgInfo::Kind");
2195 }
2196
2197 /* VarArg handling */
2198
EmitVAArg(llvm::Value * VAListAddr,QualType Ty)2199 llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
2200 return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
2201 }
2202