1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "TargetInfo.h"
16 #include "ABIInfo.h"
17 #include "CGCXXABI.h"
18 #include "CGValue.h"
19 #include "CodeGenFunction.h"
20 #include "clang/AST/RecordLayout.h"
21 #include "clang/CodeGen/CGFunctionInfo.h"
22 #include "clang/CodeGen/SwiftCallingConv.h"
23 #include "clang/Frontend/CodeGenOptions.h"
24 #include "llvm/ADT/StringExtras.h"
25 #include "llvm/ADT/Triple.h"
26 #include "llvm/IR/DataLayout.h"
27 #include "llvm/IR/Type.h"
28 #include "llvm/Support/raw_ostream.h"
29 #include <algorithm> // std::sort
30
31 using namespace clang;
32 using namespace CodeGen;
33
34 // Helper for coercing an aggregate argument or return value into an integer
35 // array of the same size (including padding) and alignment. This alternate
36 // coercion happens only for the RenderScript ABI and can be removed after
37 // runtimes that rely on it are no longer supported.
38 //
39 // RenderScript assumes that the size of the argument / return value in the IR
40 // is the same as the size of the corresponding qualified type. This helper
41 // coerces the aggregate type into an array of the same size (including
42 // padding). This coercion is used in lieu of expansion of struct members or
43 // other canonical coercions that return a coerced-type of larger size.
44 //
45 // Ty - The argument / return value type
46 // Context - The associated ASTContext
47 // LLVMContext - The associated LLVMContext
coerceToIntArray(QualType Ty,ASTContext & Context,llvm::LLVMContext & LLVMContext)48 static ABIArgInfo coerceToIntArray(QualType Ty,
49 ASTContext &Context,
50 llvm::LLVMContext &LLVMContext) {
51 // Alignment and Size are measured in bits.
52 const uint64_t Size = Context.getTypeSize(Ty);
53 const uint64_t Alignment = Context.getTypeAlign(Ty);
54 llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Alignment);
55 const uint64_t NumElements = (Size + Alignment - 1) / Alignment;
56 return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements));
57 }
58
AssignToArrayRange(CodeGen::CGBuilderTy & Builder,llvm::Value * Array,llvm::Value * Value,unsigned FirstIndex,unsigned LastIndex)59 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
60 llvm::Value *Array,
61 llvm::Value *Value,
62 unsigned FirstIndex,
63 unsigned LastIndex) {
64 // Alternatively, we could emit this as a loop in the source.
65 for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
66 llvm::Value *Cell =
67 Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I);
68 Builder.CreateAlignedStore(Value, Cell, CharUnits::One());
69 }
70 }
71
isAggregateTypeForABI(QualType T)72 static bool isAggregateTypeForABI(QualType T) {
73 return !CodeGenFunction::hasScalarEvaluationKind(T) ||
74 T->isMemberFunctionPointerType();
75 }
76
77 ABIArgInfo
getNaturalAlignIndirect(QualType Ty,bool ByRef,bool Realign,llvm::Type * Padding) const78 ABIInfo::getNaturalAlignIndirect(QualType Ty, bool ByRef, bool Realign,
79 llvm::Type *Padding) const {
80 return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty),
81 ByRef, Realign, Padding);
82 }
83
84 ABIArgInfo
getNaturalAlignIndirectInReg(QualType Ty,bool Realign) const85 ABIInfo::getNaturalAlignIndirectInReg(QualType Ty, bool Realign) const {
86 return ABIArgInfo::getIndirectInReg(getContext().getTypeAlignInChars(Ty),
87 /*ByRef*/ false, Realign);
88 }
89
EmitMSVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const90 Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
91 QualType Ty) const {
92 return Address::invalid();
93 }
94
~ABIInfo()95 ABIInfo::~ABIInfo() {}
96
97 /// Does the given lowering require more than the given number of
98 /// registers when expanded?
99 ///
100 /// This is intended to be the basis of a reasonable basic implementation
101 /// of should{Pass,Return}IndirectlyForSwift.
102 ///
103 /// For most targets, a limit of four total registers is reasonable; this
104 /// limits the amount of code required in order to move around the value
105 /// in case it wasn't produced immediately prior to the call by the caller
106 /// (or wasn't produced in exactly the right registers) or isn't used
107 /// immediately within the callee. But some targets may need to further
108 /// limit the register count due to an inability to support that many
109 /// return registers.
occupiesMoreThan(CodeGenTypes & cgt,ArrayRef<llvm::Type * > scalarTypes,unsigned maxAllRegisters)110 static bool occupiesMoreThan(CodeGenTypes &cgt,
111 ArrayRef<llvm::Type*> scalarTypes,
112 unsigned maxAllRegisters) {
113 unsigned intCount = 0, fpCount = 0;
114 for (llvm::Type *type : scalarTypes) {
115 if (type->isPointerTy()) {
116 intCount++;
117 } else if (auto intTy = dyn_cast<llvm::IntegerType>(type)) {
118 auto ptrWidth = cgt.getTarget().getPointerWidth(0);
119 intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth;
120 } else {
121 assert(type->isVectorTy() || type->isFloatingPointTy());
122 fpCount++;
123 }
124 }
125
126 return (intCount + fpCount > maxAllRegisters);
127 }
128
isLegalVectorTypeForSwift(CharUnits vectorSize,llvm::Type * eltTy,unsigned numElts) const129 bool SwiftABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize,
130 llvm::Type *eltTy,
131 unsigned numElts) const {
132 // The default implementation of this assumes that the target guarantees
133 // 128-bit SIMD support but nothing more.
134 return (vectorSize.getQuantity() > 8 && vectorSize.getQuantity() <= 16);
135 }
136
getRecordArgABI(const RecordType * RT,CGCXXABI & CXXABI)137 static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT,
138 CGCXXABI &CXXABI) {
139 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
140 if (!RD)
141 return CGCXXABI::RAA_Default;
142 return CXXABI.getRecordArgABI(RD);
143 }
144
getRecordArgABI(QualType T,CGCXXABI & CXXABI)145 static CGCXXABI::RecordArgABI getRecordArgABI(QualType T,
146 CGCXXABI &CXXABI) {
147 const RecordType *RT = T->getAs<RecordType>();
148 if (!RT)
149 return CGCXXABI::RAA_Default;
150 return getRecordArgABI(RT, CXXABI);
151 }
152
153 /// Pass transparent unions as if they were the type of the first element. Sema
154 /// should ensure that all elements of the union have the same "machine type".
useFirstFieldIfTransparentUnion(QualType Ty)155 static QualType useFirstFieldIfTransparentUnion(QualType Ty) {
156 if (const RecordType *UT = Ty->getAsUnionType()) {
157 const RecordDecl *UD = UT->getDecl();
158 if (UD->hasAttr<TransparentUnionAttr>()) {
159 assert(!UD->field_empty() && "sema created an empty transparent union");
160 return UD->field_begin()->getType();
161 }
162 }
163 return Ty;
164 }
165
getCXXABI() const166 CGCXXABI &ABIInfo::getCXXABI() const {
167 return CGT.getCXXABI();
168 }
169
getContext() const170 ASTContext &ABIInfo::getContext() const {
171 return CGT.getContext();
172 }
173
getVMContext() const174 llvm::LLVMContext &ABIInfo::getVMContext() const {
175 return CGT.getLLVMContext();
176 }
177
getDataLayout() const178 const llvm::DataLayout &ABIInfo::getDataLayout() const {
179 return CGT.getDataLayout();
180 }
181
getTarget() const182 const TargetInfo &ABIInfo::getTarget() const {
183 return CGT.getTarget();
184 }
185
isAndroid() const186 bool ABIInfo:: isAndroid() const {
187 return getTarget().getTriple().isAndroid() ||
188 getContext().getLangOpts().RenderScript;
189 }
190
isHomogeneousAggregateBaseType(QualType Ty) const191 bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
192 return false;
193 }
194
isHomogeneousAggregateSmallEnough(const Type * Base,uint64_t Members) const195 bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
196 uint64_t Members) const {
197 return false;
198 }
199
shouldSignExtUnsignedType(QualType Ty) const200 bool ABIInfo::shouldSignExtUnsignedType(QualType Ty) const {
201 return false;
202 }
203
dump() const204 LLVM_DUMP_METHOD void ABIArgInfo::dump() const {
205 raw_ostream &OS = llvm::errs();
206 OS << "(ABIArgInfo Kind=";
207 switch (TheKind) {
208 case Direct:
209 OS << "Direct Type=";
210 if (llvm::Type *Ty = getCoerceToType())
211 Ty->print(OS);
212 else
213 OS << "null";
214 break;
215 case Extend:
216 OS << "Extend";
217 break;
218 case Ignore:
219 OS << "Ignore";
220 break;
221 case InAlloca:
222 OS << "InAlloca Offset=" << getInAllocaFieldIndex();
223 break;
224 case Indirect:
225 OS << "Indirect Align=" << getIndirectAlign().getQuantity()
226 << " ByVal=" << getIndirectByVal()
227 << " Realign=" << getIndirectRealign();
228 break;
229 case Expand:
230 OS << "Expand";
231 break;
232 case CoerceAndExpand:
233 OS << "CoerceAndExpand Type=";
234 getCoerceAndExpandType()->print(OS);
235 break;
236 }
237 OS << ")\n";
238 }
239
240 // Dynamically round a pointer up to a multiple of the given alignment.
emitRoundPointerUpToAlignment(CodeGenFunction & CGF,llvm::Value * Ptr,CharUnits Align)241 static llvm::Value *emitRoundPointerUpToAlignment(CodeGenFunction &CGF,
242 llvm::Value *Ptr,
243 CharUnits Align) {
244 llvm::Value *PtrAsInt = Ptr;
245 // OverflowArgArea = (OverflowArgArea + Align - 1) & -Align;
246 PtrAsInt = CGF.Builder.CreatePtrToInt(PtrAsInt, CGF.IntPtrTy);
247 PtrAsInt = CGF.Builder.CreateAdd(PtrAsInt,
248 llvm::ConstantInt::get(CGF.IntPtrTy, Align.getQuantity() - 1));
249 PtrAsInt = CGF.Builder.CreateAnd(PtrAsInt,
250 llvm::ConstantInt::get(CGF.IntPtrTy, -Align.getQuantity()));
251 PtrAsInt = CGF.Builder.CreateIntToPtr(PtrAsInt,
252 Ptr->getType(),
253 Ptr->getName() + ".aligned");
254 return PtrAsInt;
255 }
256
257 /// Emit va_arg for a platform using the common void* representation,
258 /// where arguments are simply emitted in an array of slots on the stack.
259 ///
260 /// This version implements the core direct-value passing rules.
261 ///
262 /// \param SlotSize - The size and alignment of a stack slot.
263 /// Each argument will be allocated to a multiple of this number of
264 /// slots, and all the slots will be aligned to this value.
265 /// \param AllowHigherAlign - The slot alignment is not a cap;
266 /// an argument type with an alignment greater than the slot size
267 /// will be emitted on a higher-alignment address, potentially
268 /// leaving one or more empty slots behind as padding. If this
269 /// is false, the returned address might be less-aligned than
270 /// DirectAlign.
emitVoidPtrDirectVAArg(CodeGenFunction & CGF,Address VAListAddr,llvm::Type * DirectTy,CharUnits DirectSize,CharUnits DirectAlign,CharUnits SlotSize,bool AllowHigherAlign)271 static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF,
272 Address VAListAddr,
273 llvm::Type *DirectTy,
274 CharUnits DirectSize,
275 CharUnits DirectAlign,
276 CharUnits SlotSize,
277 bool AllowHigherAlign) {
278 // Cast the element type to i8* if necessary. Some platforms define
279 // va_list as a struct containing an i8* instead of just an i8*.
280 if (VAListAddr.getElementType() != CGF.Int8PtrTy)
281 VAListAddr = CGF.Builder.CreateElementBitCast(VAListAddr, CGF.Int8PtrTy);
282
283 llvm::Value *Ptr = CGF.Builder.CreateLoad(VAListAddr, "argp.cur");
284
285 // If the CC aligns values higher than the slot size, do so if needed.
286 Address Addr = Address::invalid();
287 if (AllowHigherAlign && DirectAlign > SlotSize) {
288 Addr = Address(emitRoundPointerUpToAlignment(CGF, Ptr, DirectAlign),
289 DirectAlign);
290 } else {
291 Addr = Address(Ptr, SlotSize);
292 }
293
294 // Advance the pointer past the argument, then store that back.
295 CharUnits FullDirectSize = DirectSize.alignTo(SlotSize);
296 llvm::Value *NextPtr =
297 CGF.Builder.CreateConstInBoundsByteGEP(Addr.getPointer(), FullDirectSize,
298 "argp.next");
299 CGF.Builder.CreateStore(NextPtr, VAListAddr);
300
301 // If the argument is smaller than a slot, and this is a big-endian
302 // target, the argument will be right-adjusted in its slot.
303 if (DirectSize < SlotSize && CGF.CGM.getDataLayout().isBigEndian() &&
304 !DirectTy->isStructTy()) {
305 Addr = CGF.Builder.CreateConstInBoundsByteGEP(Addr, SlotSize - DirectSize);
306 }
307
308 Addr = CGF.Builder.CreateElementBitCast(Addr, DirectTy);
309 return Addr;
310 }
311
312 /// Emit va_arg for a platform using the common void* representation,
313 /// where arguments are simply emitted in an array of slots on the stack.
314 ///
315 /// \param IsIndirect - Values of this type are passed indirectly.
316 /// \param ValueInfo - The size and alignment of this type, generally
317 /// computed with getContext().getTypeInfoInChars(ValueTy).
318 /// \param SlotSizeAndAlign - The size and alignment of a stack slot.
319 /// Each argument will be allocated to a multiple of this number of
320 /// slots, and all the slots will be aligned to this value.
321 /// \param AllowHigherAlign - The slot alignment is not a cap;
322 /// an argument type with an alignment greater than the slot size
323 /// will be emitted on a higher-alignment address, potentially
324 /// leaving one or more empty slots behind as padding.
emitVoidPtrVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType ValueTy,bool IsIndirect,std::pair<CharUnits,CharUnits> ValueInfo,CharUnits SlotSizeAndAlign,bool AllowHigherAlign)325 static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr,
326 QualType ValueTy, bool IsIndirect,
327 std::pair<CharUnits, CharUnits> ValueInfo,
328 CharUnits SlotSizeAndAlign,
329 bool AllowHigherAlign) {
330 // The size and alignment of the value that was passed directly.
331 CharUnits DirectSize, DirectAlign;
332 if (IsIndirect) {
333 DirectSize = CGF.getPointerSize();
334 DirectAlign = CGF.getPointerAlign();
335 } else {
336 DirectSize = ValueInfo.first;
337 DirectAlign = ValueInfo.second;
338 }
339
340 // Cast the address we've calculated to the right type.
341 llvm::Type *DirectTy = CGF.ConvertTypeForMem(ValueTy);
342 if (IsIndirect)
343 DirectTy = DirectTy->getPointerTo(0);
344
345 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, DirectTy,
346 DirectSize, DirectAlign,
347 SlotSizeAndAlign,
348 AllowHigherAlign);
349
350 if (IsIndirect) {
351 Addr = Address(CGF.Builder.CreateLoad(Addr), ValueInfo.second);
352 }
353
354 return Addr;
355
356 }
357
emitMergePHI(CodeGenFunction & CGF,Address Addr1,llvm::BasicBlock * Block1,Address Addr2,llvm::BasicBlock * Block2,const llvm::Twine & Name="")358 static Address emitMergePHI(CodeGenFunction &CGF,
359 Address Addr1, llvm::BasicBlock *Block1,
360 Address Addr2, llvm::BasicBlock *Block2,
361 const llvm::Twine &Name = "") {
362 assert(Addr1.getType() == Addr2.getType());
363 llvm::PHINode *PHI = CGF.Builder.CreatePHI(Addr1.getType(), 2, Name);
364 PHI->addIncoming(Addr1.getPointer(), Block1);
365 PHI->addIncoming(Addr2.getPointer(), Block2);
366 CharUnits Align = std::min(Addr1.getAlignment(), Addr2.getAlignment());
367 return Address(PHI, Align);
368 }
369
~TargetCodeGenInfo()370 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; }
371
372 // If someone can figure out a general rule for this, that would be great.
373 // It's probably just doomed to be platform-dependent, though.
getSizeOfUnwindException() const374 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const {
375 // Verified for:
376 // x86-64 FreeBSD, Linux, Darwin
377 // x86-32 FreeBSD, Linux, Darwin
378 // PowerPC Linux, Darwin
379 // ARM Darwin (*not* EABI)
380 // AArch64 Linux
381 return 32;
382 }
383
isNoProtoCallVariadic(const CallArgList & args,const FunctionNoProtoType * fnType) const384 bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args,
385 const FunctionNoProtoType *fnType) const {
386 // The following conventions are known to require this to be false:
387 // x86_stdcall
388 // MIPS
389 // For everything else, we just prefer false unless we opt out.
390 return false;
391 }
392
393 void
getDependentLibraryOption(llvm::StringRef Lib,llvm::SmallString<24> & Opt) const394 TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib,
395 llvm::SmallString<24> &Opt) const {
396 // This assumes the user is passing a library name like "rt" instead of a
397 // filename like "librt.a/so", and that they don't care whether it's static or
398 // dynamic.
399 Opt = "-l";
400 Opt += Lib;
401 }
402
getOpenCLKernelCallingConv() const403 unsigned TargetCodeGenInfo::getOpenCLKernelCallingConv() const {
404 return llvm::CallingConv::C;
405 }
406 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
407
408 /// isEmptyField - Return true iff a the field is "empty", that is it
409 /// is an unnamed bit-field or an (array of) empty record(s).
isEmptyField(ASTContext & Context,const FieldDecl * FD,bool AllowArrays)410 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
411 bool AllowArrays) {
412 if (FD->isUnnamedBitfield())
413 return true;
414
415 QualType FT = FD->getType();
416
417 // Constant arrays of empty records count as empty, strip them off.
418 // Constant arrays of zero length always count as empty.
419 if (AllowArrays)
420 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
421 if (AT->getSize() == 0)
422 return true;
423 FT = AT->getElementType();
424 }
425
426 const RecordType *RT = FT->getAs<RecordType>();
427 if (!RT)
428 return false;
429
430 // C++ record fields are never empty, at least in the Itanium ABI.
431 //
432 // FIXME: We should use a predicate for whether this behavior is true in the
433 // current ABI.
434 if (isa<CXXRecordDecl>(RT->getDecl()))
435 return false;
436
437 return isEmptyRecord(Context, FT, AllowArrays);
438 }
439
440 /// isEmptyRecord - Return true iff a structure contains only empty
441 /// fields. Note that a structure with a flexible array member is not
442 /// considered empty.
isEmptyRecord(ASTContext & Context,QualType T,bool AllowArrays)443 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
444 const RecordType *RT = T->getAs<RecordType>();
445 if (!RT)
446 return false;
447 const RecordDecl *RD = RT->getDecl();
448 if (RD->hasFlexibleArrayMember())
449 return false;
450
451 // If this is a C++ record, check the bases first.
452 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
453 for (const auto &I : CXXRD->bases())
454 if (!isEmptyRecord(Context, I.getType(), true))
455 return false;
456
457 for (const auto *I : RD->fields())
458 if (!isEmptyField(Context, I, AllowArrays))
459 return false;
460 return true;
461 }
462
463 /// isSingleElementStruct - Determine if a structure is a "single
464 /// element struct", i.e. it has exactly one non-empty field or
465 /// exactly one field which is itself a single element
466 /// struct. Structures with flexible array members are never
467 /// considered single element structs.
468 ///
469 /// \return The field declaration for the single non-empty field, if
470 /// it exists.
isSingleElementStruct(QualType T,ASTContext & Context)471 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
472 const RecordType *RT = T->getAs<RecordType>();
473 if (!RT)
474 return nullptr;
475
476 const RecordDecl *RD = RT->getDecl();
477 if (RD->hasFlexibleArrayMember())
478 return nullptr;
479
480 const Type *Found = nullptr;
481
482 // If this is a C++ record, check the bases first.
483 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
484 for (const auto &I : CXXRD->bases()) {
485 // Ignore empty records.
486 if (isEmptyRecord(Context, I.getType(), true))
487 continue;
488
489 // If we already found an element then this isn't a single-element struct.
490 if (Found)
491 return nullptr;
492
493 // If this is non-empty and not a single element struct, the composite
494 // cannot be a single element struct.
495 Found = isSingleElementStruct(I.getType(), Context);
496 if (!Found)
497 return nullptr;
498 }
499 }
500
501 // Check for single element.
502 for (const auto *FD : RD->fields()) {
503 QualType FT = FD->getType();
504
505 // Ignore empty fields.
506 if (isEmptyField(Context, FD, true))
507 continue;
508
509 // If we already found an element then this isn't a single-element
510 // struct.
511 if (Found)
512 return nullptr;
513
514 // Treat single element arrays as the element.
515 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
516 if (AT->getSize().getZExtValue() != 1)
517 break;
518 FT = AT->getElementType();
519 }
520
521 if (!isAggregateTypeForABI(FT)) {
522 Found = FT.getTypePtr();
523 } else {
524 Found = isSingleElementStruct(FT, Context);
525 if (!Found)
526 return nullptr;
527 }
528 }
529
530 // We don't consider a struct a single-element struct if it has
531 // padding beyond the element type.
532 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T))
533 return nullptr;
534
535 return Found;
536 }
537
538 namespace {
EmitVAArgInstr(CodeGenFunction & CGF,Address VAListAddr,QualType Ty,const ABIArgInfo & AI)539 Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
540 const ABIArgInfo &AI) {
541 // This default implementation defers to the llvm backend's va_arg
542 // instruction. It can handle only passing arguments directly
543 // (typically only handled in the backend for primitive types), or
544 // aggregates passed indirectly by pointer (NOTE: if the "byval"
545 // flag has ABI impact in the callee, this implementation cannot
546 // work.)
547
548 // Only a few cases are covered here at the moment -- those needed
549 // by the default abi.
550 llvm::Value *Val;
551
552 if (AI.isIndirect()) {
553 assert(!AI.getPaddingType() &&
554 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
555 assert(
556 !AI.getIndirectRealign() &&
557 "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!");
558
559 auto TyInfo = CGF.getContext().getTypeInfoInChars(Ty);
560 CharUnits TyAlignForABI = TyInfo.second;
561
562 llvm::Type *BaseTy =
563 llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
564 llvm::Value *Addr =
565 CGF.Builder.CreateVAArg(VAListAddr.getPointer(), BaseTy);
566 return Address(Addr, TyAlignForABI);
567 } else {
568 assert((AI.isDirect() || AI.isExtend()) &&
569 "Unexpected ArgInfo Kind in generic VAArg emitter!");
570
571 assert(!AI.getInReg() &&
572 "Unexpected InReg seen in arginfo in generic VAArg emitter!");
573 assert(!AI.getPaddingType() &&
574 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
575 assert(!AI.getDirectOffset() &&
576 "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!");
577 assert(!AI.getCoerceToType() &&
578 "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!");
579
580 Address Temp = CGF.CreateMemTemp(Ty, "varet");
581 Val = CGF.Builder.CreateVAArg(VAListAddr.getPointer(), CGF.ConvertType(Ty));
582 CGF.Builder.CreateStore(Val, Temp);
583 return Temp;
584 }
585 }
586
587 /// DefaultABIInfo - The default implementation for ABI specific
588 /// details. This implementation provides information which results in
589 /// self-consistent and sensible LLVM IR generation, but does not
590 /// conform to any particular ABI.
591 class DefaultABIInfo : public ABIInfo {
592 public:
DefaultABIInfo(CodeGen::CodeGenTypes & CGT)593 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
594
595 ABIArgInfo classifyReturnType(QualType RetTy) const;
596 ABIArgInfo classifyArgumentType(QualType RetTy) const;
597
computeInfo(CGFunctionInfo & FI) const598 void computeInfo(CGFunctionInfo &FI) const override {
599 if (!getCXXABI().classifyReturnType(FI))
600 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
601 for (auto &I : FI.arguments())
602 I.info = classifyArgumentType(I.type);
603 }
604
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const605 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
606 QualType Ty) const override {
607 return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty));
608 }
609 };
610
611 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
612 public:
DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes & CGT)613 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
614 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
615 };
616
classifyArgumentType(QualType Ty) const617 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
618 Ty = useFirstFieldIfTransparentUnion(Ty);
619
620 if (isAggregateTypeForABI(Ty)) {
621 // Records with non-trivial destructors/copy-constructors should not be
622 // passed by value.
623 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
624 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
625
626 return getNaturalAlignIndirect(Ty);
627 }
628
629 // Treat an enum type as its underlying type.
630 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
631 Ty = EnumTy->getDecl()->getIntegerType();
632
633 return (Ty->isPromotableIntegerType() ?
634 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
635 }
636
classifyReturnType(QualType RetTy) const637 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
638 if (RetTy->isVoidType())
639 return ABIArgInfo::getIgnore();
640
641 if (isAggregateTypeForABI(RetTy))
642 return getNaturalAlignIndirect(RetTy);
643
644 // Treat an enum type as its underlying type.
645 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
646 RetTy = EnumTy->getDecl()->getIntegerType();
647
648 return (RetTy->isPromotableIntegerType() ?
649 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
650 }
651
652 //===----------------------------------------------------------------------===//
653 // WebAssembly ABI Implementation
654 //
655 // This is a very simple ABI that relies a lot on DefaultABIInfo.
656 //===----------------------------------------------------------------------===//
657
658 class WebAssemblyABIInfo final : public DefaultABIInfo {
659 public:
WebAssemblyABIInfo(CodeGen::CodeGenTypes & CGT)660 explicit WebAssemblyABIInfo(CodeGen::CodeGenTypes &CGT)
661 : DefaultABIInfo(CGT) {}
662
663 private:
664 ABIArgInfo classifyReturnType(QualType RetTy) const;
665 ABIArgInfo classifyArgumentType(QualType Ty) const;
666
667 // DefaultABIInfo's classifyReturnType and classifyArgumentType are
668 // non-virtual, but computeInfo and EmitVAArg are virtual, so we
669 // overload them.
computeInfo(CGFunctionInfo & FI) const670 void computeInfo(CGFunctionInfo &FI) const override {
671 if (!getCXXABI().classifyReturnType(FI))
672 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
673 for (auto &Arg : FI.arguments())
674 Arg.info = classifyArgumentType(Arg.type);
675 }
676
677 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
678 QualType Ty) const override;
679 };
680
681 class WebAssemblyTargetCodeGenInfo final : public TargetCodeGenInfo {
682 public:
WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes & CGT)683 explicit WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
684 : TargetCodeGenInfo(new WebAssemblyABIInfo(CGT)) {}
685 };
686
687 /// \brief Classify argument of given type \p Ty.
classifyArgumentType(QualType Ty) const688 ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const {
689 Ty = useFirstFieldIfTransparentUnion(Ty);
690
691 if (isAggregateTypeForABI(Ty)) {
692 // Records with non-trivial destructors/copy-constructors should not be
693 // passed by value.
694 if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
695 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
696 // Ignore empty structs/unions.
697 if (isEmptyRecord(getContext(), Ty, true))
698 return ABIArgInfo::getIgnore();
699 // Lower single-element structs to just pass a regular value. TODO: We
700 // could do reasonable-size multiple-element structs too, using getExpand(),
701 // though watch out for things like bitfields.
702 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
703 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
704 }
705
706 // Otherwise just do the default thing.
707 return DefaultABIInfo::classifyArgumentType(Ty);
708 }
709
classifyReturnType(QualType RetTy) const710 ABIArgInfo WebAssemblyABIInfo::classifyReturnType(QualType RetTy) const {
711 if (isAggregateTypeForABI(RetTy)) {
712 // Records with non-trivial destructors/copy-constructors should not be
713 // returned by value.
714 if (!getRecordArgABI(RetTy, getCXXABI())) {
715 // Ignore empty structs/unions.
716 if (isEmptyRecord(getContext(), RetTy, true))
717 return ABIArgInfo::getIgnore();
718 // Lower single-element structs to just return a regular value. TODO: We
719 // could do reasonable-size multiple-element structs too, using
720 // ABIArgInfo::getDirect().
721 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
722 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
723 }
724 }
725
726 // Otherwise just do the default thing.
727 return DefaultABIInfo::classifyReturnType(RetTy);
728 }
729
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const730 Address WebAssemblyABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
731 QualType Ty) const {
732 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect=*/ false,
733 getContext().getTypeInfoInChars(Ty),
734 CharUnits::fromQuantity(4),
735 /*AllowHigherAlign=*/ true);
736 }
737
738 //===----------------------------------------------------------------------===//
739 // le32/PNaCl bitcode ABI Implementation
740 //
741 // This is a simplified version of the x86_32 ABI. Arguments and return values
742 // are always passed on the stack.
743 //===----------------------------------------------------------------------===//
744
745 class PNaClABIInfo : public ABIInfo {
746 public:
PNaClABIInfo(CodeGen::CodeGenTypes & CGT)747 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
748
749 ABIArgInfo classifyReturnType(QualType RetTy) const;
750 ABIArgInfo classifyArgumentType(QualType RetTy) const;
751
752 void computeInfo(CGFunctionInfo &FI) const override;
753 Address EmitVAArg(CodeGenFunction &CGF,
754 Address VAListAddr, QualType Ty) const override;
755 };
756
757 class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
758 public:
PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes & CGT)759 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
760 : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {}
761 };
762
computeInfo(CGFunctionInfo & FI) const763 void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
764 if (!getCXXABI().classifyReturnType(FI))
765 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
766
767 for (auto &I : FI.arguments())
768 I.info = classifyArgumentType(I.type);
769 }
770
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const771 Address PNaClABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
772 QualType Ty) const {
773 // The PNaCL ABI is a bit odd, in that varargs don't use normal
774 // function classification. Structs get passed directly for varargs
775 // functions, through a rewriting transform in
776 // pnacl-llvm/lib/Transforms/NaCl/ExpandVarArgs.cpp, which allows
777 // this target to actually support a va_arg instructions with an
778 // aggregate type, unlike other targets.
779 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
780 }
781
782 /// \brief Classify argument of given type \p Ty.
classifyArgumentType(QualType Ty) const783 ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
784 if (isAggregateTypeForABI(Ty)) {
785 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
786 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
787 return getNaturalAlignIndirect(Ty);
788 } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
789 // Treat an enum type as its underlying type.
790 Ty = EnumTy->getDecl()->getIntegerType();
791 } else if (Ty->isFloatingType()) {
792 // Floating-point types don't go inreg.
793 return ABIArgInfo::getDirect();
794 }
795
796 return (Ty->isPromotableIntegerType() ?
797 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
798 }
799
classifyReturnType(QualType RetTy) const800 ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
801 if (RetTy->isVoidType())
802 return ABIArgInfo::getIgnore();
803
804 // In the PNaCl ABI we always return records/structures on the stack.
805 if (isAggregateTypeForABI(RetTy))
806 return getNaturalAlignIndirect(RetTy);
807
808 // Treat an enum type as its underlying type.
809 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
810 RetTy = EnumTy->getDecl()->getIntegerType();
811
812 return (RetTy->isPromotableIntegerType() ?
813 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
814 }
815
816 /// IsX86_MMXType - Return true if this is an MMX type.
IsX86_MMXType(llvm::Type * IRType)817 bool IsX86_MMXType(llvm::Type *IRType) {
818 // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>.
819 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
820 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
821 IRType->getScalarSizeInBits() != 64;
822 }
823
X86AdjustInlineAsmType(CodeGen::CodeGenFunction & CGF,StringRef Constraint,llvm::Type * Ty)824 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
825 StringRef Constraint,
826 llvm::Type* Ty) {
827 if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) {
828 if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) {
829 // Invalid MMX constraint
830 return nullptr;
831 }
832
833 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
834 }
835
836 // No operation needed
837 return Ty;
838 }
839
840 /// Returns true if this type can be passed in SSE registers with the
841 /// X86_VectorCall calling convention. Shared between x86_32 and x86_64.
isX86VectorTypeForVectorCall(ASTContext & Context,QualType Ty)842 static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) {
843 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
844 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half)
845 return true;
846 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
847 // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX
848 // registers specially.
849 unsigned VecSize = Context.getTypeSize(VT);
850 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
851 return true;
852 }
853 return false;
854 }
855
856 /// Returns true if this aggregate is small enough to be passed in SSE registers
857 /// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64.
isX86VectorCallAggregateSmallEnough(uint64_t NumMembers)858 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
859 return NumMembers <= 4;
860 }
861
862 //===----------------------------------------------------------------------===//
863 // X86-32 ABI Implementation
864 //===----------------------------------------------------------------------===//
865
866 /// \brief Similar to llvm::CCState, but for Clang.
867 struct CCState {
CCState__anonbc557ed20111::CCState868 CCState(unsigned CC) : CC(CC), FreeRegs(0), FreeSSERegs(0) {}
869
870 unsigned CC;
871 unsigned FreeRegs;
872 unsigned FreeSSERegs;
873 };
874
875 /// X86_32ABIInfo - The X86-32 ABI information.
876 class X86_32ABIInfo : public SwiftABIInfo {
877 enum Class {
878 Integer,
879 Float
880 };
881
882 static const unsigned MinABIStackAlignInBytes = 4;
883
884 bool IsDarwinVectorABI;
885 bool IsRetSmallStructInRegABI;
886 bool IsWin32StructABI;
887 bool IsSoftFloatABI;
888 bool IsMCUABI;
889 unsigned DefaultNumRegisterParameters;
890
isRegisterSize(unsigned Size)891 static bool isRegisterSize(unsigned Size) {
892 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
893 }
894
isHomogeneousAggregateBaseType(QualType Ty) const895 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
896 // FIXME: Assumes vectorcall is in use.
897 return isX86VectorTypeForVectorCall(getContext(), Ty);
898 }
899
isHomogeneousAggregateSmallEnough(const Type * Ty,uint64_t NumMembers) const900 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
901 uint64_t NumMembers) const override {
902 // FIXME: Assumes vectorcall is in use.
903 return isX86VectorCallAggregateSmallEnough(NumMembers);
904 }
905
906 bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const;
907
908 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
909 /// such that the argument will be passed in memory.
910 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
911
912 ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State) const;
913
914 /// \brief Return the alignment to use for the given type on the stack.
915 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
916
917 Class classify(QualType Ty) const;
918 ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const;
919 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
920 /// \brief Updates the number of available free registers, returns
921 /// true if any registers were allocated.
922 bool updateFreeRegs(QualType Ty, CCState &State) const;
923
924 bool shouldAggregateUseDirect(QualType Ty, CCState &State, bool &InReg,
925 bool &NeedsPadding) const;
926 bool shouldPrimitiveUseInReg(QualType Ty, CCState &State) const;
927
928 bool canExpandIndirectArgument(QualType Ty) const;
929
930 /// \brief Rewrite the function info so that all memory arguments use
931 /// inalloca.
932 void rewriteWithInAlloca(CGFunctionInfo &FI) const;
933
934 void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
935 CharUnits &StackOffset, ABIArgInfo &Info,
936 QualType Type) const;
937
938 public:
939
940 void computeInfo(CGFunctionInfo &FI) const override;
941 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
942 QualType Ty) const override;
943
X86_32ABIInfo(CodeGen::CodeGenTypes & CGT,bool DarwinVectorABI,bool RetSmallStructInRegABI,bool Win32StructABI,unsigned NumRegisterParameters,bool SoftFloatABI)944 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
945 bool RetSmallStructInRegABI, bool Win32StructABI,
946 unsigned NumRegisterParameters, bool SoftFloatABI)
947 : SwiftABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
948 IsRetSmallStructInRegABI(RetSmallStructInRegABI),
949 IsWin32StructABI(Win32StructABI),
950 IsSoftFloatABI(SoftFloatABI),
951 IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
952 DefaultNumRegisterParameters(NumRegisterParameters) {}
953
shouldPassIndirectlyForSwift(CharUnits totalSize,ArrayRef<llvm::Type * > scalars,bool asReturnValue) const954 bool shouldPassIndirectlyForSwift(CharUnits totalSize,
955 ArrayRef<llvm::Type*> scalars,
956 bool asReturnValue) const override {
957 // LLVM's x86-32 lowering currently only assigns up to three
958 // integer registers and three fp registers. Oddly, it'll use up to
959 // four vector registers for vectors, but those can overlap with the
960 // scalar registers.
961 return occupiesMoreThan(CGT, scalars, /*total*/ 3);
962 }
963 };
964
965 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
966 public:
X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes & CGT,bool DarwinVectorABI,bool RetSmallStructInRegABI,bool Win32StructABI,unsigned NumRegisterParameters,bool SoftFloatABI)967 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
968 bool RetSmallStructInRegABI, bool Win32StructABI,
969 unsigned NumRegisterParameters, bool SoftFloatABI)
970 : TargetCodeGenInfo(new X86_32ABIInfo(
971 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
972 NumRegisterParameters, SoftFloatABI)) {}
973
974 static bool isStructReturnInRegABI(
975 const llvm::Triple &Triple, const CodeGenOptions &Opts);
976
977 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
978 CodeGen::CodeGenModule &CGM) const override;
979
getDwarfEHStackPointer(CodeGen::CodeGenModule & CGM) const980 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
981 // Darwin uses different dwarf register numbers for EH.
982 if (CGM.getTarget().getTriple().isOSDarwin()) return 5;
983 return 4;
984 }
985
986 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
987 llvm::Value *Address) const override;
988
adjustInlineAsmType(CodeGen::CodeGenFunction & CGF,StringRef Constraint,llvm::Type * Ty) const989 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
990 StringRef Constraint,
991 llvm::Type* Ty) const override {
992 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
993 }
994
995 void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue,
996 std::string &Constraints,
997 std::vector<llvm::Type *> &ResultRegTypes,
998 std::vector<llvm::Type *> &ResultTruncRegTypes,
999 std::vector<LValue> &ResultRegDests,
1000 std::string &AsmString,
1001 unsigned NumOutputs) const override;
1002
1003 llvm::Constant *
getUBSanFunctionSignature(CodeGen::CodeGenModule & CGM) const1004 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
1005 unsigned Sig = (0xeb << 0) | // jmp rel8
1006 (0x06 << 8) | // .+0x08
1007 ('F' << 16) |
1008 ('T' << 24);
1009 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
1010 }
1011
getARCRetainAutoreleasedReturnValueMarker() const1012 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
1013 return "movl\t%ebp, %ebp"
1014 "\t\t## marker for objc_retainAutoreleaseReturnValue";
1015 }
1016 };
1017
1018 }
1019
1020 /// Rewrite input constraint references after adding some output constraints.
1021 /// In the case where there is one output and one input and we add one output,
1022 /// we need to replace all operand references greater than or equal to 1:
1023 /// mov $0, $1
1024 /// mov eax, $1
1025 /// The result will be:
1026 /// mov $0, $2
1027 /// mov eax, $2
rewriteInputConstraintReferences(unsigned FirstIn,unsigned NumNewOuts,std::string & AsmString)1028 static void rewriteInputConstraintReferences(unsigned FirstIn,
1029 unsigned NumNewOuts,
1030 std::string &AsmString) {
1031 std::string Buf;
1032 llvm::raw_string_ostream OS(Buf);
1033 size_t Pos = 0;
1034 while (Pos < AsmString.size()) {
1035 size_t DollarStart = AsmString.find('$', Pos);
1036 if (DollarStart == std::string::npos)
1037 DollarStart = AsmString.size();
1038 size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart);
1039 if (DollarEnd == std::string::npos)
1040 DollarEnd = AsmString.size();
1041 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
1042 Pos = DollarEnd;
1043 size_t NumDollars = DollarEnd - DollarStart;
1044 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
1045 // We have an operand reference.
1046 size_t DigitStart = Pos;
1047 size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart);
1048 if (DigitEnd == std::string::npos)
1049 DigitEnd = AsmString.size();
1050 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
1051 unsigned OperandIndex;
1052 if (!OperandStr.getAsInteger(10, OperandIndex)) {
1053 if (OperandIndex >= FirstIn)
1054 OperandIndex += NumNewOuts;
1055 OS << OperandIndex;
1056 } else {
1057 OS << OperandStr;
1058 }
1059 Pos = DigitEnd;
1060 }
1061 }
1062 AsmString = std::move(OS.str());
1063 }
1064
1065 /// Add output constraints for EAX:EDX because they are return registers.
addReturnRegisterOutputs(CodeGenFunction & CGF,LValue ReturnSlot,std::string & Constraints,std::vector<llvm::Type * > & ResultRegTypes,std::vector<llvm::Type * > & ResultTruncRegTypes,std::vector<LValue> & ResultRegDests,std::string & AsmString,unsigned NumOutputs) const1066 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
1067 CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints,
1068 std::vector<llvm::Type *> &ResultRegTypes,
1069 std::vector<llvm::Type *> &ResultTruncRegTypes,
1070 std::vector<LValue> &ResultRegDests, std::string &AsmString,
1071 unsigned NumOutputs) const {
1072 uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType());
1073
1074 // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is
1075 // larger.
1076 if (!Constraints.empty())
1077 Constraints += ',';
1078 if (RetWidth <= 32) {
1079 Constraints += "={eax}";
1080 ResultRegTypes.push_back(CGF.Int32Ty);
1081 } else {
1082 // Use the 'A' constraint for EAX:EDX.
1083 Constraints += "=A";
1084 ResultRegTypes.push_back(CGF.Int64Ty);
1085 }
1086
1087 // Truncate EAX or EAX:EDX to an integer of the appropriate size.
1088 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth);
1089 ResultTruncRegTypes.push_back(CoerceTy);
1090
1091 // Coerce the integer by bitcasting the return slot pointer.
1092 ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(),
1093 CoerceTy->getPointerTo()));
1094 ResultRegDests.push_back(ReturnSlot);
1095
1096 rewriteInputConstraintReferences(NumOutputs, 1, AsmString);
1097 }
1098
1099 /// shouldReturnTypeInRegister - Determine if the given type should be
1100 /// returned in a register (for the Darwin and MCU ABI).
shouldReturnTypeInRegister(QualType Ty,ASTContext & Context) const1101 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
1102 ASTContext &Context) const {
1103 uint64_t Size = Context.getTypeSize(Ty);
1104
1105 // For i386, type must be register sized.
1106 // For the MCU ABI, it only needs to be <= 8-byte
1107 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
1108 return false;
1109
1110 if (Ty->isVectorType()) {
1111 // 64- and 128- bit vectors inside structures are not returned in
1112 // registers.
1113 if (Size == 64 || Size == 128)
1114 return false;
1115
1116 return true;
1117 }
1118
1119 // If this is a builtin, pointer, enum, complex type, member pointer, or
1120 // member function pointer it is ok.
1121 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
1122 Ty->isAnyComplexType() || Ty->isEnumeralType() ||
1123 Ty->isBlockPointerType() || Ty->isMemberPointerType())
1124 return true;
1125
1126 // Arrays are treated like records.
1127 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
1128 return shouldReturnTypeInRegister(AT->getElementType(), Context);
1129
1130 // Otherwise, it must be a record type.
1131 const RecordType *RT = Ty->getAs<RecordType>();
1132 if (!RT) return false;
1133
1134 // FIXME: Traverse bases here too.
1135
1136 // Structure types are passed in register if all fields would be
1137 // passed in a register.
1138 for (const auto *FD : RT->getDecl()->fields()) {
1139 // Empty fields are ignored.
1140 if (isEmptyField(Context, FD, true))
1141 continue;
1142
1143 // Check fields recursively.
1144 if (!shouldReturnTypeInRegister(FD->getType(), Context))
1145 return false;
1146 }
1147 return true;
1148 }
1149
is32Or64BitBasicType(QualType Ty,ASTContext & Context)1150 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
1151 // Treat complex types as the element type.
1152 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
1153 Ty = CTy->getElementType();
1154
1155 // Check for a type which we know has a simple scalar argument-passing
1156 // convention without any padding. (We're specifically looking for 32
1157 // and 64-bit integer and integer-equivalents, float, and double.)
1158 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
1159 !Ty->isEnumeralType() && !Ty->isBlockPointerType())
1160 return false;
1161
1162 uint64_t Size = Context.getTypeSize(Ty);
1163 return Size == 32 || Size == 64;
1164 }
1165
1166 /// Test whether an argument type which is to be passed indirectly (on the
1167 /// stack) would have the equivalent layout if it was expanded into separate
1168 /// arguments. If so, we prefer to do the latter to avoid inhibiting
1169 /// optimizations.
canExpandIndirectArgument(QualType Ty) const1170 bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty) const {
1171 // We can only expand structure types.
1172 const RecordType *RT = Ty->getAs<RecordType>();
1173 if (!RT)
1174 return false;
1175 const RecordDecl *RD = RT->getDecl();
1176 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1177 if (!IsWin32StructABI ) {
1178 // On non-Windows, we have to conservatively match our old bitcode
1179 // prototypes in order to be ABI-compatible at the bitcode level.
1180 if (!CXXRD->isCLike())
1181 return false;
1182 } else {
1183 // Don't do this for dynamic classes.
1184 if (CXXRD->isDynamicClass())
1185 return false;
1186 // Don't do this if there are any non-empty bases.
1187 for (const CXXBaseSpecifier &Base : CXXRD->bases()) {
1188 if (!isEmptyRecord(getContext(), Base.getType(), /*AllowArrays=*/true))
1189 return false;
1190 }
1191 }
1192 }
1193
1194 uint64_t Size = 0;
1195
1196 for (const auto *FD : RD->fields()) {
1197 // Scalar arguments on the stack get 4 byte alignment on x86. If the
1198 // argument is smaller than 32-bits, expanding the struct will create
1199 // alignment padding.
1200 if (!is32Or64BitBasicType(FD->getType(), getContext()))
1201 return false;
1202
1203 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
1204 // how to expand them yet, and the predicate for telling if a bitfield still
1205 // counts as "basic" is more complicated than what we were doing previously.
1206 if (FD->isBitField())
1207 return false;
1208
1209 Size += getContext().getTypeSize(FD->getType());
1210 }
1211
1212 // We can do this if there was no alignment padding.
1213 return Size == getContext().getTypeSize(Ty);
1214 }
1215
getIndirectReturnResult(QualType RetTy,CCState & State) const1216 ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State) const {
1217 // If the return value is indirect, then the hidden argument is consuming one
1218 // integer register.
1219 if (State.FreeRegs) {
1220 --State.FreeRegs;
1221 if (!IsMCUABI)
1222 return getNaturalAlignIndirectInReg(RetTy);
1223 }
1224 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
1225 }
1226
classifyReturnType(QualType RetTy,CCState & State) const1227 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
1228 CCState &State) const {
1229 if (RetTy->isVoidType())
1230 return ABIArgInfo::getIgnore();
1231
1232 const Type *Base = nullptr;
1233 uint64_t NumElts = 0;
1234 if (State.CC == llvm::CallingConv::X86_VectorCall &&
1235 isHomogeneousAggregate(RetTy, Base, NumElts)) {
1236 // The LLVM struct type for such an aggregate should lower properly.
1237 return ABIArgInfo::getDirect();
1238 }
1239
1240 if (const VectorType *VT = RetTy->getAs<VectorType>()) {
1241 // On Darwin, some vectors are returned in registers.
1242 if (IsDarwinVectorABI) {
1243 uint64_t Size = getContext().getTypeSize(RetTy);
1244
1245 // 128-bit vectors are a special case; they are returned in
1246 // registers and we need to make sure to pick a type the LLVM
1247 // backend will like.
1248 if (Size == 128)
1249 return ABIArgInfo::getDirect(llvm::VectorType::get(
1250 llvm::Type::getInt64Ty(getVMContext()), 2));
1251
1252 // Always return in register if it fits in a general purpose
1253 // register, or if it is 64 bits and has a single element.
1254 if ((Size == 8 || Size == 16 || Size == 32) ||
1255 (Size == 64 && VT->getNumElements() == 1))
1256 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
1257 Size));
1258
1259 return getIndirectReturnResult(RetTy, State);
1260 }
1261
1262 return ABIArgInfo::getDirect();
1263 }
1264
1265 if (isAggregateTypeForABI(RetTy)) {
1266 if (const RecordType *RT = RetTy->getAs<RecordType>()) {
1267 // Structures with flexible arrays are always indirect.
1268 if (RT->getDecl()->hasFlexibleArrayMember())
1269 return getIndirectReturnResult(RetTy, State);
1270 }
1271
1272 // If specified, structs and unions are always indirect.
1273 if (!IsRetSmallStructInRegABI && !RetTy->isAnyComplexType())
1274 return getIndirectReturnResult(RetTy, State);
1275
1276 // Ignore empty structs/unions.
1277 if (isEmptyRecord(getContext(), RetTy, true))
1278 return ABIArgInfo::getIgnore();
1279
1280 // Small structures which are register sized are generally returned
1281 // in a register.
1282 if (shouldReturnTypeInRegister(RetTy, getContext())) {
1283 uint64_t Size = getContext().getTypeSize(RetTy);
1284
1285 // As a special-case, if the struct is a "single-element" struct, and
1286 // the field is of type "float" or "double", return it in a
1287 // floating-point register. (MSVC does not apply this special case.)
1288 // We apply a similar transformation for pointer types to improve the
1289 // quality of the generated IR.
1290 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
1291 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
1292 || SeltTy->hasPointerRepresentation())
1293 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
1294
1295 // FIXME: We should be able to narrow this integer in cases with dead
1296 // padding.
1297 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
1298 }
1299
1300 return getIndirectReturnResult(RetTy, State);
1301 }
1302
1303 // Treat an enum type as its underlying type.
1304 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
1305 RetTy = EnumTy->getDecl()->getIntegerType();
1306
1307 return (RetTy->isPromotableIntegerType() ?
1308 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1309 }
1310
isSSEVectorType(ASTContext & Context,QualType Ty)1311 static bool isSSEVectorType(ASTContext &Context, QualType Ty) {
1312 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128;
1313 }
1314
isRecordWithSSEVectorType(ASTContext & Context,QualType Ty)1315 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
1316 const RecordType *RT = Ty->getAs<RecordType>();
1317 if (!RT)
1318 return 0;
1319 const RecordDecl *RD = RT->getDecl();
1320
1321 // If this is a C++ record, check the bases first.
1322 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
1323 for (const auto &I : CXXRD->bases())
1324 if (!isRecordWithSSEVectorType(Context, I.getType()))
1325 return false;
1326
1327 for (const auto *i : RD->fields()) {
1328 QualType FT = i->getType();
1329
1330 if (isSSEVectorType(Context, FT))
1331 return true;
1332
1333 if (isRecordWithSSEVectorType(Context, FT))
1334 return true;
1335 }
1336
1337 return false;
1338 }
1339
getTypeStackAlignInBytes(QualType Ty,unsigned Align) const1340 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
1341 unsigned Align) const {
1342 // Otherwise, if the alignment is less than or equal to the minimum ABI
1343 // alignment, just use the default; the backend will handle this.
1344 if (Align <= MinABIStackAlignInBytes)
1345 return 0; // Use default alignment.
1346
1347 // On non-Darwin, the stack type alignment is always 4.
1348 if (!IsDarwinVectorABI) {
1349 // Set explicit alignment, since we may need to realign the top.
1350 return MinABIStackAlignInBytes;
1351 }
1352
1353 // Otherwise, if the type contains an SSE vector type, the alignment is 16.
1354 if (Align >= 16 && (isSSEVectorType(getContext(), Ty) ||
1355 isRecordWithSSEVectorType(getContext(), Ty)))
1356 return 16;
1357
1358 return MinABIStackAlignInBytes;
1359 }
1360
getIndirectResult(QualType Ty,bool ByVal,CCState & State) const1361 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
1362 CCState &State) const {
1363 if (!ByVal) {
1364 if (State.FreeRegs) {
1365 --State.FreeRegs; // Non-byval indirects just use one pointer.
1366 if (!IsMCUABI)
1367 return getNaturalAlignIndirectInReg(Ty);
1368 }
1369 return getNaturalAlignIndirect(Ty, false);
1370 }
1371
1372 // Compute the byval alignment.
1373 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
1374 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
1375 if (StackAlign == 0)
1376 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true);
1377
1378 // If the stack alignment is less than the type alignment, realign the
1379 // argument.
1380 bool Realign = TypeAlign > StackAlign;
1381 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(StackAlign),
1382 /*ByVal=*/true, Realign);
1383 }
1384
classify(QualType Ty) const1385 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
1386 const Type *T = isSingleElementStruct(Ty, getContext());
1387 if (!T)
1388 T = Ty.getTypePtr();
1389
1390 if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
1391 BuiltinType::Kind K = BT->getKind();
1392 if (K == BuiltinType::Float || K == BuiltinType::Double)
1393 return Float;
1394 }
1395 return Integer;
1396 }
1397
updateFreeRegs(QualType Ty,CCState & State) const1398 bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State) const {
1399 if (!IsSoftFloatABI) {
1400 Class C = classify(Ty);
1401 if (C == Float)
1402 return false;
1403 }
1404
1405 unsigned Size = getContext().getTypeSize(Ty);
1406 unsigned SizeInRegs = (Size + 31) / 32;
1407
1408 if (SizeInRegs == 0)
1409 return false;
1410
1411 if (!IsMCUABI) {
1412 if (SizeInRegs > State.FreeRegs) {
1413 State.FreeRegs = 0;
1414 return false;
1415 }
1416 } else {
1417 // The MCU psABI allows passing parameters in-reg even if there are
1418 // earlier parameters that are passed on the stack. Also,
1419 // it does not allow passing >8-byte structs in-register,
1420 // even if there are 3 free registers available.
1421 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
1422 return false;
1423 }
1424
1425 State.FreeRegs -= SizeInRegs;
1426 return true;
1427 }
1428
shouldAggregateUseDirect(QualType Ty,CCState & State,bool & InReg,bool & NeedsPadding) const1429 bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State,
1430 bool &InReg,
1431 bool &NeedsPadding) const {
1432 // On Windows, aggregates other than HFAs are never passed in registers, and
1433 // they do not consume register slots. Homogenous floating-point aggregates
1434 // (HFAs) have already been dealt with at this point.
1435 if (IsWin32StructABI && isAggregateTypeForABI(Ty))
1436 return false;
1437
1438 NeedsPadding = false;
1439 InReg = !IsMCUABI;
1440
1441 if (!updateFreeRegs(Ty, State))
1442 return false;
1443
1444 if (IsMCUABI)
1445 return true;
1446
1447 if (State.CC == llvm::CallingConv::X86_FastCall ||
1448 State.CC == llvm::CallingConv::X86_VectorCall) {
1449 if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
1450 NeedsPadding = true;
1451
1452 return false;
1453 }
1454
1455 return true;
1456 }
1457
shouldPrimitiveUseInReg(QualType Ty,CCState & State) const1458 bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State) const {
1459 if (!updateFreeRegs(Ty, State))
1460 return false;
1461
1462 if (IsMCUABI)
1463 return false;
1464
1465 if (State.CC == llvm::CallingConv::X86_FastCall ||
1466 State.CC == llvm::CallingConv::X86_VectorCall) {
1467 if (getContext().getTypeSize(Ty) > 32)
1468 return false;
1469
1470 return (Ty->isIntegralOrEnumerationType() || Ty->isPointerType() ||
1471 Ty->isReferenceType());
1472 }
1473
1474 return true;
1475 }
1476
classifyArgumentType(QualType Ty,CCState & State) const1477 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
1478 CCState &State) const {
1479 // FIXME: Set alignment on indirect arguments.
1480
1481 Ty = useFirstFieldIfTransparentUnion(Ty);
1482
1483 // Check with the C++ ABI first.
1484 const RecordType *RT = Ty->getAs<RecordType>();
1485 if (RT) {
1486 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
1487 if (RAA == CGCXXABI::RAA_Indirect) {
1488 return getIndirectResult(Ty, false, State);
1489 } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
1490 // The field index doesn't matter, we'll fix it up later.
1491 return ABIArgInfo::getInAlloca(/*FieldIndex=*/0);
1492 }
1493 }
1494
1495 // vectorcall adds the concept of a homogenous vector aggregate, similar
1496 // to other targets.
1497 const Type *Base = nullptr;
1498 uint64_t NumElts = 0;
1499 if (State.CC == llvm::CallingConv::X86_VectorCall &&
1500 isHomogeneousAggregate(Ty, Base, NumElts)) {
1501 if (State.FreeSSERegs >= NumElts) {
1502 State.FreeSSERegs -= NumElts;
1503 if (Ty->isBuiltinType() || Ty->isVectorType())
1504 return ABIArgInfo::getDirect();
1505 return ABIArgInfo::getExpand();
1506 }
1507 return getIndirectResult(Ty, /*ByVal=*/false, State);
1508 }
1509
1510 if (isAggregateTypeForABI(Ty)) {
1511 // Structures with flexible arrays are always indirect.
1512 // FIXME: This should not be byval!
1513 if (RT && RT->getDecl()->hasFlexibleArrayMember())
1514 return getIndirectResult(Ty, true, State);
1515
1516 // Ignore empty structs/unions on non-Windows.
1517 if (!IsWin32StructABI && isEmptyRecord(getContext(), Ty, true))
1518 return ABIArgInfo::getIgnore();
1519
1520 llvm::LLVMContext &LLVMContext = getVMContext();
1521 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
1522 bool NeedsPadding = false;
1523 bool InReg;
1524 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
1525 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
1526 SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
1527 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
1528 if (InReg)
1529 return ABIArgInfo::getDirectInReg(Result);
1530 else
1531 return ABIArgInfo::getDirect(Result);
1532 }
1533 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr;
1534
1535 // Expand small (<= 128-bit) record types when we know that the stack layout
1536 // of those arguments will match the struct. This is important because the
1537 // LLVM backend isn't smart enough to remove byval, which inhibits many
1538 // optimizations.
1539 // Don't do this for the MCU if there are still free integer registers
1540 // (see X86_64 ABI for full explanation).
1541 if (getContext().getTypeSize(Ty) <= 4 * 32 &&
1542 (!IsMCUABI || State.FreeRegs == 0) && canExpandIndirectArgument(Ty))
1543 return ABIArgInfo::getExpandWithPadding(
1544 State.CC == llvm::CallingConv::X86_FastCall ||
1545 State.CC == llvm::CallingConv::X86_VectorCall,
1546 PaddingType);
1547
1548 return getIndirectResult(Ty, true, State);
1549 }
1550
1551 if (const VectorType *VT = Ty->getAs<VectorType>()) {
1552 // On Darwin, some vectors are passed in memory, we handle this by passing
1553 // it as an i8/i16/i32/i64.
1554 if (IsDarwinVectorABI) {
1555 uint64_t Size = getContext().getTypeSize(Ty);
1556 if ((Size == 8 || Size == 16 || Size == 32) ||
1557 (Size == 64 && VT->getNumElements() == 1))
1558 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
1559 Size));
1560 }
1561
1562 if (IsX86_MMXType(CGT.ConvertType(Ty)))
1563 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64));
1564
1565 return ABIArgInfo::getDirect();
1566 }
1567
1568
1569 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1570 Ty = EnumTy->getDecl()->getIntegerType();
1571
1572 bool InReg = shouldPrimitiveUseInReg(Ty, State);
1573
1574 if (Ty->isPromotableIntegerType()) {
1575 if (InReg)
1576 return ABIArgInfo::getExtendInReg();
1577 return ABIArgInfo::getExtend();
1578 }
1579
1580 if (InReg)
1581 return ABIArgInfo::getDirectInReg();
1582 return ABIArgInfo::getDirect();
1583 }
1584
computeInfo(CGFunctionInfo & FI) const1585 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
1586 CCState State(FI.getCallingConvention());
1587 if (IsMCUABI)
1588 State.FreeRegs = 3;
1589 else if (State.CC == llvm::CallingConv::X86_FastCall)
1590 State.FreeRegs = 2;
1591 else if (State.CC == llvm::CallingConv::X86_VectorCall) {
1592 State.FreeRegs = 2;
1593 State.FreeSSERegs = 6;
1594 } else if (FI.getHasRegParm())
1595 State.FreeRegs = FI.getRegParm();
1596 else
1597 State.FreeRegs = DefaultNumRegisterParameters;
1598
1599 if (!getCXXABI().classifyReturnType(FI)) {
1600 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State);
1601 } else if (FI.getReturnInfo().isIndirect()) {
1602 // The C++ ABI is not aware of register usage, so we have to check if the
1603 // return value was sret and put it in a register ourselves if appropriate.
1604 if (State.FreeRegs) {
1605 --State.FreeRegs; // The sret parameter consumes a register.
1606 if (!IsMCUABI)
1607 FI.getReturnInfo().setInReg(true);
1608 }
1609 }
1610
1611 // The chain argument effectively gives us another free register.
1612 if (FI.isChainCall())
1613 ++State.FreeRegs;
1614
1615 bool UsedInAlloca = false;
1616 for (auto &I : FI.arguments()) {
1617 I.info = classifyArgumentType(I.type, State);
1618 UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca);
1619 }
1620
1621 // If we needed to use inalloca for any argument, do a second pass and rewrite
1622 // all the memory arguments to use inalloca.
1623 if (UsedInAlloca)
1624 rewriteWithInAlloca(FI);
1625 }
1626
1627 void
addFieldToArgStruct(SmallVector<llvm::Type *,6> & FrameFields,CharUnits & StackOffset,ABIArgInfo & Info,QualType Type) const1628 X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
1629 CharUnits &StackOffset, ABIArgInfo &Info,
1630 QualType Type) const {
1631 // Arguments are always 4-byte-aligned.
1632 CharUnits FieldAlign = CharUnits::fromQuantity(4);
1633
1634 assert(StackOffset.isMultipleOf(FieldAlign) && "unaligned inalloca struct");
1635 Info = ABIArgInfo::getInAlloca(FrameFields.size());
1636 FrameFields.push_back(CGT.ConvertTypeForMem(Type));
1637 StackOffset += getContext().getTypeSizeInChars(Type);
1638
1639 // Insert padding bytes to respect alignment.
1640 CharUnits FieldEnd = StackOffset;
1641 StackOffset = FieldEnd.alignTo(FieldAlign);
1642 if (StackOffset != FieldEnd) {
1643 CharUnits NumBytes = StackOffset - FieldEnd;
1644 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
1645 Ty = llvm::ArrayType::get(Ty, NumBytes.getQuantity());
1646 FrameFields.push_back(Ty);
1647 }
1648 }
1649
isArgInAlloca(const ABIArgInfo & Info)1650 static bool isArgInAlloca(const ABIArgInfo &Info) {
1651 // Leave ignored and inreg arguments alone.
1652 switch (Info.getKind()) {
1653 case ABIArgInfo::InAlloca:
1654 return true;
1655 case ABIArgInfo::Indirect:
1656 assert(Info.getIndirectByVal());
1657 return true;
1658 case ABIArgInfo::Ignore:
1659 return false;
1660 case ABIArgInfo::Direct:
1661 case ABIArgInfo::Extend:
1662 if (Info.getInReg())
1663 return false;
1664 return true;
1665 case ABIArgInfo::Expand:
1666 case ABIArgInfo::CoerceAndExpand:
1667 // These are aggregate types which are never passed in registers when
1668 // inalloca is involved.
1669 return true;
1670 }
1671 llvm_unreachable("invalid enum");
1672 }
1673
rewriteWithInAlloca(CGFunctionInfo & FI) const1674 void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
1675 assert(IsWin32StructABI && "inalloca only supported on win32");
1676
1677 // Build a packed struct type for all of the arguments in memory.
1678 SmallVector<llvm::Type *, 6> FrameFields;
1679
1680 // The stack alignment is always 4.
1681 CharUnits StackAlign = CharUnits::fromQuantity(4);
1682
1683 CharUnits StackOffset;
1684 CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end();
1685
1686 // Put 'this' into the struct before 'sret', if necessary.
1687 bool IsThisCall =
1688 FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall;
1689 ABIArgInfo &Ret = FI.getReturnInfo();
1690 if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall &&
1691 isArgInAlloca(I->info)) {
1692 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1693 ++I;
1694 }
1695
1696 // Put the sret parameter into the inalloca struct if it's in memory.
1697 if (Ret.isIndirect() && !Ret.getInReg()) {
1698 CanQualType PtrTy = getContext().getPointerType(FI.getReturnType());
1699 addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy);
1700 // On Windows, the hidden sret parameter is always returned in eax.
1701 Ret.setInAllocaSRet(IsWin32StructABI);
1702 }
1703
1704 // Skip the 'this' parameter in ecx.
1705 if (IsThisCall)
1706 ++I;
1707
1708 // Put arguments passed in memory into the struct.
1709 for (; I != E; ++I) {
1710 if (isArgInAlloca(I->info))
1711 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1712 }
1713
1714 FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
1715 /*isPacked=*/true),
1716 StackAlign);
1717 }
1718
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const1719 Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF,
1720 Address VAListAddr, QualType Ty) const {
1721
1722 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
1723
1724 // x86-32 changes the alignment of certain arguments on the stack.
1725 //
1726 // Just messing with TypeInfo like this works because we never pass
1727 // anything indirectly.
1728 TypeInfo.second = CharUnits::fromQuantity(
1729 getTypeStackAlignInBytes(Ty, TypeInfo.second.getQuantity()));
1730
1731 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
1732 TypeInfo, CharUnits::fromQuantity(4),
1733 /*AllowHigherAlign*/ true);
1734 }
1735
isStructReturnInRegABI(const llvm::Triple & Triple,const CodeGenOptions & Opts)1736 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
1737 const llvm::Triple &Triple, const CodeGenOptions &Opts) {
1738 assert(Triple.getArch() == llvm::Triple::x86);
1739
1740 switch (Opts.getStructReturnConvention()) {
1741 case CodeGenOptions::SRCK_Default:
1742 break;
1743 case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return
1744 return false;
1745 case CodeGenOptions::SRCK_InRegs: // -freg-struct-return
1746 return true;
1747 }
1748
1749 if (Triple.isOSDarwin() || Triple.isOSIAMCU())
1750 return true;
1751
1752 switch (Triple.getOS()) {
1753 case llvm::Triple::DragonFly:
1754 case llvm::Triple::FreeBSD:
1755 case llvm::Triple::OpenBSD:
1756 case llvm::Triple::Bitrig:
1757 case llvm::Triple::Win32:
1758 return true;
1759 default:
1760 return false;
1761 }
1762 }
1763
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const1764 void X86_32TargetCodeGenInfo::setTargetAttributes(const Decl *D,
1765 llvm::GlobalValue *GV,
1766 CodeGen::CodeGenModule &CGM) const {
1767 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1768 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1769 // Get the LLVM function.
1770 llvm::Function *Fn = cast<llvm::Function>(GV);
1771
1772 // Now add the 'alignstack' attribute with a value of 16.
1773 llvm::AttrBuilder B;
1774 B.addStackAlignmentAttr(16);
1775 Fn->addAttributes(llvm::AttributeSet::FunctionIndex,
1776 llvm::AttributeSet::get(CGM.getLLVMContext(),
1777 llvm::AttributeSet::FunctionIndex,
1778 B));
1779 }
1780 if (FD->hasAttr<AnyX86InterruptAttr>()) {
1781 llvm::Function *Fn = cast<llvm::Function>(GV);
1782 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
1783 }
1784 }
1785 }
1786
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const1787 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
1788 CodeGen::CodeGenFunction &CGF,
1789 llvm::Value *Address) const {
1790 CodeGen::CGBuilderTy &Builder = CGF.Builder;
1791
1792 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
1793
1794 // 0-7 are the eight integer registers; the order is different
1795 // on Darwin (for EH), but the range is the same.
1796 // 8 is %eip.
1797 AssignToArrayRange(Builder, Address, Four8, 0, 8);
1798
1799 if (CGF.CGM.getTarget().getTriple().isOSDarwin()) {
1800 // 12-16 are st(0..4). Not sure why we stop at 4.
1801 // These have size 16, which is sizeof(long double) on
1802 // platforms with 8-byte alignment for that type.
1803 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
1804 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
1805
1806 } else {
1807 // 9 is %eflags, which doesn't get a size on Darwin for some
1808 // reason.
1809 Builder.CreateAlignedStore(
1810 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9),
1811 CharUnits::One());
1812
1813 // 11-16 are st(0..5). Not sure why we stop at 5.
1814 // These have size 12, which is sizeof(long double) on
1815 // platforms with 4-byte alignment for that type.
1816 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
1817 AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
1818 }
1819
1820 return false;
1821 }
1822
1823 //===----------------------------------------------------------------------===//
1824 // X86-64 ABI Implementation
1825 //===----------------------------------------------------------------------===//
1826
1827
1828 namespace {
1829 /// The AVX ABI level for X86 targets.
1830 enum class X86AVXABILevel {
1831 None,
1832 AVX,
1833 AVX512
1834 };
1835
1836 /// \p returns the size in bits of the largest (native) vector for \p AVXLevel.
getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel)1837 static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) {
1838 switch (AVXLevel) {
1839 case X86AVXABILevel::AVX512:
1840 return 512;
1841 case X86AVXABILevel::AVX:
1842 return 256;
1843 case X86AVXABILevel::None:
1844 return 128;
1845 }
1846 llvm_unreachable("Unknown AVXLevel");
1847 }
1848
1849 /// X86_64ABIInfo - The X86_64 ABI information.
1850 class X86_64ABIInfo : public SwiftABIInfo {
1851 enum Class {
1852 Integer = 0,
1853 SSE,
1854 SSEUp,
1855 X87,
1856 X87Up,
1857 ComplexX87,
1858 NoClass,
1859 Memory
1860 };
1861
1862 /// merge - Implement the X86_64 ABI merging algorithm.
1863 ///
1864 /// Merge an accumulating classification \arg Accum with a field
1865 /// classification \arg Field.
1866 ///
1867 /// \param Accum - The accumulating classification. This should
1868 /// always be either NoClass or the result of a previous merge
1869 /// call. In addition, this should never be Memory (the caller
1870 /// should just return Memory for the aggregate).
1871 static Class merge(Class Accum, Class Field);
1872
1873 /// postMerge - Implement the X86_64 ABI post merging algorithm.
1874 ///
1875 /// Post merger cleanup, reduces a malformed Hi and Lo pair to
1876 /// final MEMORY or SSE classes when necessary.
1877 ///
1878 /// \param AggregateSize - The size of the current aggregate in
1879 /// the classification process.
1880 ///
1881 /// \param Lo - The classification for the parts of the type
1882 /// residing in the low word of the containing object.
1883 ///
1884 /// \param Hi - The classification for the parts of the type
1885 /// residing in the higher words of the containing object.
1886 ///
1887 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
1888
1889 /// classify - Determine the x86_64 register classes in which the
1890 /// given type T should be passed.
1891 ///
1892 /// \param Lo - The classification for the parts of the type
1893 /// residing in the low word of the containing object.
1894 ///
1895 /// \param Hi - The classification for the parts of the type
1896 /// residing in the high word of the containing object.
1897 ///
1898 /// \param OffsetBase - The bit offset of this type in the
1899 /// containing object. Some parameters are classified different
1900 /// depending on whether they straddle an eightbyte boundary.
1901 ///
1902 /// \param isNamedArg - Whether the argument in question is a "named"
1903 /// argument, as used in AMD64-ABI 3.5.7.
1904 ///
1905 /// If a word is unused its result will be NoClass; if a type should
1906 /// be passed in Memory then at least the classification of \arg Lo
1907 /// will be Memory.
1908 ///
1909 /// The \arg Lo class will be NoClass iff the argument is ignored.
1910 ///
1911 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
1912 /// also be ComplexX87.
1913 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
1914 bool isNamedArg) const;
1915
1916 llvm::Type *GetByteVectorType(QualType Ty) const;
1917 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
1918 unsigned IROffset, QualType SourceTy,
1919 unsigned SourceOffset) const;
1920 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
1921 unsigned IROffset, QualType SourceTy,
1922 unsigned SourceOffset) const;
1923
1924 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
1925 /// such that the argument will be returned in memory.
1926 ABIArgInfo getIndirectReturnResult(QualType Ty) const;
1927
1928 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
1929 /// such that the argument will be passed in memory.
1930 ///
1931 /// \param freeIntRegs - The number of free integer registers remaining
1932 /// available.
1933 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
1934
1935 ABIArgInfo classifyReturnType(QualType RetTy) const;
1936
1937 ABIArgInfo classifyArgumentType(QualType Ty,
1938 unsigned freeIntRegs,
1939 unsigned &neededInt,
1940 unsigned &neededSSE,
1941 bool isNamedArg) const;
1942
1943 bool IsIllegalVectorType(QualType Ty) const;
1944
1945 /// The 0.98 ABI revision clarified a lot of ambiguities,
1946 /// unfortunately in ways that were not always consistent with
1947 /// certain previous compilers. In particular, platforms which
1948 /// required strict binary compatibility with older versions of GCC
1949 /// may need to exempt themselves.
honorsRevision0_98() const1950 bool honorsRevision0_98() const {
1951 return !getTarget().getTriple().isOSDarwin();
1952 }
1953
1954 /// GCC classifies <1 x long long> as SSE but compatibility with older clang
1955 // compilers require us to classify it as INTEGER.
classifyIntegerMMXAsSSE() const1956 bool classifyIntegerMMXAsSSE() const {
1957 const llvm::Triple &Triple = getTarget().getTriple();
1958 if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::PS4)
1959 return false;
1960 if (Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 10)
1961 return false;
1962 return true;
1963 }
1964
1965 X86AVXABILevel AVXLevel;
1966 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on
1967 // 64-bit hardware.
1968 bool Has64BitPointers;
1969
1970 public:
X86_64ABIInfo(CodeGen::CodeGenTypes & CGT,X86AVXABILevel AVXLevel)1971 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) :
1972 SwiftABIInfo(CGT), AVXLevel(AVXLevel),
1973 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {
1974 }
1975
isPassedUsingAVXType(QualType type) const1976 bool isPassedUsingAVXType(QualType type) const {
1977 unsigned neededInt, neededSSE;
1978 // The freeIntRegs argument doesn't matter here.
1979 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE,
1980 /*isNamedArg*/true);
1981 if (info.isDirect()) {
1982 llvm::Type *ty = info.getCoerceToType();
1983 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
1984 return (vectorTy->getBitWidth() > 128);
1985 }
1986 return false;
1987 }
1988
1989 void computeInfo(CGFunctionInfo &FI) const override;
1990
1991 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
1992 QualType Ty) const override;
1993 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
1994 QualType Ty) const override;
1995
has64BitPointers() const1996 bool has64BitPointers() const {
1997 return Has64BitPointers;
1998 }
1999
shouldPassIndirectlyForSwift(CharUnits totalSize,ArrayRef<llvm::Type * > scalars,bool asReturnValue) const2000 bool shouldPassIndirectlyForSwift(CharUnits totalSize,
2001 ArrayRef<llvm::Type*> scalars,
2002 bool asReturnValue) const override {
2003 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
2004 }
2005 };
2006
2007 /// WinX86_64ABIInfo - The Windows X86_64 ABI information.
2008 class WinX86_64ABIInfo : public ABIInfo {
2009 public:
WinX86_64ABIInfo(CodeGen::CodeGenTypes & CGT)2010 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT)
2011 : ABIInfo(CGT),
2012 IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {}
2013
2014 void computeInfo(CGFunctionInfo &FI) const override;
2015
2016 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
2017 QualType Ty) const override;
2018
isHomogeneousAggregateBaseType(QualType Ty) const2019 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
2020 // FIXME: Assumes vectorcall is in use.
2021 return isX86VectorTypeForVectorCall(getContext(), Ty);
2022 }
2023
isHomogeneousAggregateSmallEnough(const Type * Ty,uint64_t NumMembers) const2024 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
2025 uint64_t NumMembers) const override {
2026 // FIXME: Assumes vectorcall is in use.
2027 return isX86VectorCallAggregateSmallEnough(NumMembers);
2028 }
2029
2030 private:
2031 ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs,
2032 bool IsReturnType) const;
2033
2034 bool IsMingw64;
2035 };
2036
2037 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
2038 public:
X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes & CGT,X86AVXABILevel AVXLevel)2039 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
2040 : TargetCodeGenInfo(new X86_64ABIInfo(CGT, AVXLevel)) {}
2041
getABIInfo() const2042 const X86_64ABIInfo &getABIInfo() const {
2043 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
2044 }
2045
getDwarfEHStackPointer(CodeGen::CodeGenModule & CGM) const2046 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
2047 return 7;
2048 }
2049
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const2050 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2051 llvm::Value *Address) const override {
2052 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
2053
2054 // 0-15 are the 16 integer registers.
2055 // 16 is %rip.
2056 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
2057 return false;
2058 }
2059
adjustInlineAsmType(CodeGen::CodeGenFunction & CGF,StringRef Constraint,llvm::Type * Ty) const2060 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
2061 StringRef Constraint,
2062 llvm::Type* Ty) const override {
2063 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
2064 }
2065
isNoProtoCallVariadic(const CallArgList & args,const FunctionNoProtoType * fnType) const2066 bool isNoProtoCallVariadic(const CallArgList &args,
2067 const FunctionNoProtoType *fnType) const override {
2068 // The default CC on x86-64 sets %al to the number of SSA
2069 // registers used, and GCC sets this when calling an unprototyped
2070 // function, so we override the default behavior. However, don't do
2071 // that when AVX types are involved: the ABI explicitly states it is
2072 // undefined, and it doesn't work in practice because of how the ABI
2073 // defines varargs anyway.
2074 if (fnType->getCallConv() == CC_C) {
2075 bool HasAVXType = false;
2076 for (CallArgList::const_iterator
2077 it = args.begin(), ie = args.end(); it != ie; ++it) {
2078 if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
2079 HasAVXType = true;
2080 break;
2081 }
2082 }
2083
2084 if (!HasAVXType)
2085 return true;
2086 }
2087
2088 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
2089 }
2090
2091 llvm::Constant *
getUBSanFunctionSignature(CodeGen::CodeGenModule & CGM) const2092 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
2093 unsigned Sig;
2094 if (getABIInfo().has64BitPointers())
2095 Sig = (0xeb << 0) | // jmp rel8
2096 (0x0a << 8) | // .+0x0c
2097 ('F' << 16) |
2098 ('T' << 24);
2099 else
2100 Sig = (0xeb << 0) | // jmp rel8
2101 (0x06 << 8) | // .+0x08
2102 ('F' << 16) |
2103 ('T' << 24);
2104 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
2105 }
2106
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const2107 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2108 CodeGen::CodeGenModule &CGM) const override {
2109 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2110 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2111 llvm::Function *Fn = cast<llvm::Function>(GV);
2112 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2113 }
2114 }
2115 }
2116 };
2117
2118 class PS4TargetCodeGenInfo : public X86_64TargetCodeGenInfo {
2119 public:
PS4TargetCodeGenInfo(CodeGen::CodeGenTypes & CGT,X86AVXABILevel AVXLevel)2120 PS4TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
2121 : X86_64TargetCodeGenInfo(CGT, AVXLevel) {}
2122
getDependentLibraryOption(llvm::StringRef Lib,llvm::SmallString<24> & Opt) const2123 void getDependentLibraryOption(llvm::StringRef Lib,
2124 llvm::SmallString<24> &Opt) const override {
2125 Opt = "\01";
2126 // If the argument contains a space, enclose it in quotes.
2127 if (Lib.find(" ") != StringRef::npos)
2128 Opt += "\"" + Lib.str() + "\"";
2129 else
2130 Opt += Lib;
2131 }
2132 };
2133
qualifyWindowsLibrary(llvm::StringRef Lib)2134 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
2135 // If the argument does not end in .lib, automatically add the suffix.
2136 // If the argument contains a space, enclose it in quotes.
2137 // This matches the behavior of MSVC.
2138 bool Quote = (Lib.find(" ") != StringRef::npos);
2139 std::string ArgStr = Quote ? "\"" : "";
2140 ArgStr += Lib;
2141 if (!Lib.endswith_lower(".lib"))
2142 ArgStr += ".lib";
2143 ArgStr += Quote ? "\"" : "";
2144 return ArgStr;
2145 }
2146
2147 class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo {
2148 public:
WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes & CGT,bool DarwinVectorABI,bool RetSmallStructInRegABI,bool Win32StructABI,unsigned NumRegisterParameters)2149 WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
2150 bool DarwinVectorABI, bool RetSmallStructInRegABI, bool Win32StructABI,
2151 unsigned NumRegisterParameters)
2152 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
2153 Win32StructABI, NumRegisterParameters, false) {}
2154
2155 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2156 CodeGen::CodeGenModule &CGM) const override;
2157
getDependentLibraryOption(llvm::StringRef Lib,llvm::SmallString<24> & Opt) const2158 void getDependentLibraryOption(llvm::StringRef Lib,
2159 llvm::SmallString<24> &Opt) const override {
2160 Opt = "/DEFAULTLIB:";
2161 Opt += qualifyWindowsLibrary(Lib);
2162 }
2163
getDetectMismatchOption(llvm::StringRef Name,llvm::StringRef Value,llvm::SmallString<32> & Opt) const2164 void getDetectMismatchOption(llvm::StringRef Name,
2165 llvm::StringRef Value,
2166 llvm::SmallString<32> &Opt) const override {
2167 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
2168 }
2169 };
2170
addStackProbeSizeTargetAttribute(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM)2171 static void addStackProbeSizeTargetAttribute(const Decl *D,
2172 llvm::GlobalValue *GV,
2173 CodeGen::CodeGenModule &CGM) {
2174 if (D && isa<FunctionDecl>(D)) {
2175 if (CGM.getCodeGenOpts().StackProbeSize != 4096) {
2176 llvm::Function *Fn = cast<llvm::Function>(GV);
2177
2178 Fn->addFnAttr("stack-probe-size",
2179 llvm::utostr(CGM.getCodeGenOpts().StackProbeSize));
2180 }
2181 }
2182 }
2183
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const2184 void WinX86_32TargetCodeGenInfo::setTargetAttributes(const Decl *D,
2185 llvm::GlobalValue *GV,
2186 CodeGen::CodeGenModule &CGM) const {
2187 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
2188
2189 addStackProbeSizeTargetAttribute(D, GV, CGM);
2190 }
2191
2192 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
2193 public:
WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes & CGT,X86AVXABILevel AVXLevel)2194 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
2195 X86AVXABILevel AVXLevel)
2196 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {}
2197
2198 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2199 CodeGen::CodeGenModule &CGM) const override;
2200
getDwarfEHStackPointer(CodeGen::CodeGenModule & CGM) const2201 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
2202 return 7;
2203 }
2204
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const2205 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2206 llvm::Value *Address) const override {
2207 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
2208
2209 // 0-15 are the 16 integer registers.
2210 // 16 is %rip.
2211 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
2212 return false;
2213 }
2214
getDependentLibraryOption(llvm::StringRef Lib,llvm::SmallString<24> & Opt) const2215 void getDependentLibraryOption(llvm::StringRef Lib,
2216 llvm::SmallString<24> &Opt) const override {
2217 Opt = "/DEFAULTLIB:";
2218 Opt += qualifyWindowsLibrary(Lib);
2219 }
2220
getDetectMismatchOption(llvm::StringRef Name,llvm::StringRef Value,llvm::SmallString<32> & Opt) const2221 void getDetectMismatchOption(llvm::StringRef Name,
2222 llvm::StringRef Value,
2223 llvm::SmallString<32> &Opt) const override {
2224 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
2225 }
2226 };
2227
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const2228 void WinX86_64TargetCodeGenInfo::setTargetAttributes(const Decl *D,
2229 llvm::GlobalValue *GV,
2230 CodeGen::CodeGenModule &CGM) const {
2231 TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
2232
2233 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2234 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2235 llvm::Function *Fn = cast<llvm::Function>(GV);
2236 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2237 }
2238 }
2239
2240 addStackProbeSizeTargetAttribute(D, GV, CGM);
2241 }
2242 }
2243
postMerge(unsigned AggregateSize,Class & Lo,Class & Hi) const2244 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
2245 Class &Hi) const {
2246 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
2247 //
2248 // (a) If one of the classes is Memory, the whole argument is passed in
2249 // memory.
2250 //
2251 // (b) If X87UP is not preceded by X87, the whole argument is passed in
2252 // memory.
2253 //
2254 // (c) If the size of the aggregate exceeds two eightbytes and the first
2255 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
2256 // argument is passed in memory. NOTE: This is necessary to keep the
2257 // ABI working for processors that don't support the __m256 type.
2258 //
2259 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
2260 //
2261 // Some of these are enforced by the merging logic. Others can arise
2262 // only with unions; for example:
2263 // union { _Complex double; unsigned; }
2264 //
2265 // Note that clauses (b) and (c) were added in 0.98.
2266 //
2267 if (Hi == Memory)
2268 Lo = Memory;
2269 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
2270 Lo = Memory;
2271 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
2272 Lo = Memory;
2273 if (Hi == SSEUp && Lo != SSE)
2274 Hi = SSE;
2275 }
2276
merge(Class Accum,Class Field)2277 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
2278 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
2279 // classified recursively so that always two fields are
2280 // considered. The resulting class is calculated according to
2281 // the classes of the fields in the eightbyte:
2282 //
2283 // (a) If both classes are equal, this is the resulting class.
2284 //
2285 // (b) If one of the classes is NO_CLASS, the resulting class is
2286 // the other class.
2287 //
2288 // (c) If one of the classes is MEMORY, the result is the MEMORY
2289 // class.
2290 //
2291 // (d) If one of the classes is INTEGER, the result is the
2292 // INTEGER.
2293 //
2294 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
2295 // MEMORY is used as class.
2296 //
2297 // (f) Otherwise class SSE is used.
2298
2299 // Accum should never be memory (we should have returned) or
2300 // ComplexX87 (because this cannot be passed in a structure).
2301 assert((Accum != Memory && Accum != ComplexX87) &&
2302 "Invalid accumulated classification during merge.");
2303 if (Accum == Field || Field == NoClass)
2304 return Accum;
2305 if (Field == Memory)
2306 return Memory;
2307 if (Accum == NoClass)
2308 return Field;
2309 if (Accum == Integer || Field == Integer)
2310 return Integer;
2311 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
2312 Accum == X87 || Accum == X87Up)
2313 return Memory;
2314 return SSE;
2315 }
2316
classify(QualType Ty,uint64_t OffsetBase,Class & Lo,Class & Hi,bool isNamedArg) const2317 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
2318 Class &Lo, Class &Hi, bool isNamedArg) const {
2319 // FIXME: This code can be simplified by introducing a simple value class for
2320 // Class pairs with appropriate constructor methods for the various
2321 // situations.
2322
2323 // FIXME: Some of the split computations are wrong; unaligned vectors
2324 // shouldn't be passed in registers for example, so there is no chance they
2325 // can straddle an eightbyte. Verify & simplify.
2326
2327 Lo = Hi = NoClass;
2328
2329 Class &Current = OffsetBase < 64 ? Lo : Hi;
2330 Current = Memory;
2331
2332 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
2333 BuiltinType::Kind k = BT->getKind();
2334
2335 if (k == BuiltinType::Void) {
2336 Current = NoClass;
2337 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
2338 Lo = Integer;
2339 Hi = Integer;
2340 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
2341 Current = Integer;
2342 } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
2343 Current = SSE;
2344 } else if (k == BuiltinType::LongDouble) {
2345 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
2346 if (LDF == &llvm::APFloat::IEEEquad) {
2347 Lo = SSE;
2348 Hi = SSEUp;
2349 } else if (LDF == &llvm::APFloat::x87DoubleExtended) {
2350 Lo = X87;
2351 Hi = X87Up;
2352 } else if (LDF == &llvm::APFloat::IEEEdouble) {
2353 Current = SSE;
2354 } else
2355 llvm_unreachable("unexpected long double representation!");
2356 }
2357 // FIXME: _Decimal32 and _Decimal64 are SSE.
2358 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
2359 return;
2360 }
2361
2362 if (const EnumType *ET = Ty->getAs<EnumType>()) {
2363 // Classify the underlying integer type.
2364 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
2365 return;
2366 }
2367
2368 if (Ty->hasPointerRepresentation()) {
2369 Current = Integer;
2370 return;
2371 }
2372
2373 if (Ty->isMemberPointerType()) {
2374 if (Ty->isMemberFunctionPointerType()) {
2375 if (Has64BitPointers) {
2376 // If Has64BitPointers, this is an {i64, i64}, so classify both
2377 // Lo and Hi now.
2378 Lo = Hi = Integer;
2379 } else {
2380 // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that
2381 // straddles an eightbyte boundary, Hi should be classified as well.
2382 uint64_t EB_FuncPtr = (OffsetBase) / 64;
2383 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
2384 if (EB_FuncPtr != EB_ThisAdj) {
2385 Lo = Hi = Integer;
2386 } else {
2387 Current = Integer;
2388 }
2389 }
2390 } else {
2391 Current = Integer;
2392 }
2393 return;
2394 }
2395
2396 if (const VectorType *VT = Ty->getAs<VectorType>()) {
2397 uint64_t Size = getContext().getTypeSize(VT);
2398 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
2399 // gcc passes the following as integer:
2400 // 4 bytes - <4 x char>, <2 x short>, <1 x int>, <1 x float>
2401 // 2 bytes - <2 x char>, <1 x short>
2402 // 1 byte - <1 x char>
2403 Current = Integer;
2404
2405 // If this type crosses an eightbyte boundary, it should be
2406 // split.
2407 uint64_t EB_Lo = (OffsetBase) / 64;
2408 uint64_t EB_Hi = (OffsetBase + Size - 1) / 64;
2409 if (EB_Lo != EB_Hi)
2410 Hi = Lo;
2411 } else if (Size == 64) {
2412 QualType ElementType = VT->getElementType();
2413
2414 // gcc passes <1 x double> in memory. :(
2415 if (ElementType->isSpecificBuiltinType(BuiltinType::Double))
2416 return;
2417
2418 // gcc passes <1 x long long> as SSE but clang used to unconditionally
2419 // pass them as integer. For platforms where clang is the de facto
2420 // platform compiler, we must continue to use integer.
2421 if (!classifyIntegerMMXAsSSE() &&
2422 (ElementType->isSpecificBuiltinType(BuiltinType::LongLong) ||
2423 ElementType->isSpecificBuiltinType(BuiltinType::ULongLong) ||
2424 ElementType->isSpecificBuiltinType(BuiltinType::Long) ||
2425 ElementType->isSpecificBuiltinType(BuiltinType::ULong)))
2426 Current = Integer;
2427 else
2428 Current = SSE;
2429
2430 // If this type crosses an eightbyte boundary, it should be
2431 // split.
2432 if (OffsetBase && OffsetBase != 64)
2433 Hi = Lo;
2434 } else if (Size == 128 ||
2435 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
2436 // Arguments of 256-bits are split into four eightbyte chunks. The
2437 // least significant one belongs to class SSE and all the others to class
2438 // SSEUP. The original Lo and Hi design considers that types can't be
2439 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
2440 // This design isn't correct for 256-bits, but since there're no cases
2441 // where the upper parts would need to be inspected, avoid adding
2442 // complexity and just consider Hi to match the 64-256 part.
2443 //
2444 // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in
2445 // registers if they are "named", i.e. not part of the "..." of a
2446 // variadic function.
2447 //
2448 // Similarly, per 3.2.3. of the AVX512 draft, 512-bits ("named") args are
2449 // split into eight eightbyte chunks, one SSE and seven SSEUP.
2450 Lo = SSE;
2451 Hi = SSEUp;
2452 }
2453 return;
2454 }
2455
2456 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
2457 QualType ET = getContext().getCanonicalType(CT->getElementType());
2458
2459 uint64_t Size = getContext().getTypeSize(Ty);
2460 if (ET->isIntegralOrEnumerationType()) {
2461 if (Size <= 64)
2462 Current = Integer;
2463 else if (Size <= 128)
2464 Lo = Hi = Integer;
2465 } else if (ET == getContext().FloatTy) {
2466 Current = SSE;
2467 } else if (ET == getContext().DoubleTy) {
2468 Lo = Hi = SSE;
2469 } else if (ET == getContext().LongDoubleTy) {
2470 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
2471 if (LDF == &llvm::APFloat::IEEEquad)
2472 Current = Memory;
2473 else if (LDF == &llvm::APFloat::x87DoubleExtended)
2474 Current = ComplexX87;
2475 else if (LDF == &llvm::APFloat::IEEEdouble)
2476 Lo = Hi = SSE;
2477 else
2478 llvm_unreachable("unexpected long double representation!");
2479 }
2480
2481 // If this complex type crosses an eightbyte boundary then it
2482 // should be split.
2483 uint64_t EB_Real = (OffsetBase) / 64;
2484 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
2485 if (Hi == NoClass && EB_Real != EB_Imag)
2486 Hi = Lo;
2487
2488 return;
2489 }
2490
2491 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
2492 // Arrays are treated like structures.
2493
2494 uint64_t Size = getContext().getTypeSize(Ty);
2495
2496 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
2497 // than four eightbytes, ..., it has class MEMORY.
2498 if (Size > 256)
2499 return;
2500
2501 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
2502 // fields, it has class MEMORY.
2503 //
2504 // Only need to check alignment of array base.
2505 if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
2506 return;
2507
2508 // Otherwise implement simplified merge. We could be smarter about
2509 // this, but it isn't worth it and would be harder to verify.
2510 Current = NoClass;
2511 uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
2512 uint64_t ArraySize = AT->getSize().getZExtValue();
2513
2514 // The only case a 256-bit wide vector could be used is when the array
2515 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
2516 // to work for sizes wider than 128, early check and fallback to memory.
2517 if (Size > 128 && EltSize != 256)
2518 return;
2519
2520 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
2521 Class FieldLo, FieldHi;
2522 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
2523 Lo = merge(Lo, FieldLo);
2524 Hi = merge(Hi, FieldHi);
2525 if (Lo == Memory || Hi == Memory)
2526 break;
2527 }
2528
2529 postMerge(Size, Lo, Hi);
2530 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
2531 return;
2532 }
2533
2534 if (const RecordType *RT = Ty->getAs<RecordType>()) {
2535 uint64_t Size = getContext().getTypeSize(Ty);
2536
2537 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
2538 // than four eightbytes, ..., it has class MEMORY.
2539 if (Size > 256)
2540 return;
2541
2542 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
2543 // copy constructor or a non-trivial destructor, it is passed by invisible
2544 // reference.
2545 if (getRecordArgABI(RT, getCXXABI()))
2546 return;
2547
2548 const RecordDecl *RD = RT->getDecl();
2549
2550 // Assume variable sized types are passed in memory.
2551 if (RD->hasFlexibleArrayMember())
2552 return;
2553
2554 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
2555
2556 // Reset Lo class, this will be recomputed.
2557 Current = NoClass;
2558
2559 // If this is a C++ record, classify the bases first.
2560 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2561 for (const auto &I : CXXRD->bases()) {
2562 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2563 "Unexpected base class!");
2564 const CXXRecordDecl *Base =
2565 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
2566
2567 // Classify this field.
2568 //
2569 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
2570 // single eightbyte, each is classified separately. Each eightbyte gets
2571 // initialized to class NO_CLASS.
2572 Class FieldLo, FieldHi;
2573 uint64_t Offset =
2574 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base));
2575 classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
2576 Lo = merge(Lo, FieldLo);
2577 Hi = merge(Hi, FieldHi);
2578 if (Lo == Memory || Hi == Memory) {
2579 postMerge(Size, Lo, Hi);
2580 return;
2581 }
2582 }
2583 }
2584
2585 // Classify the fields one at a time, merging the results.
2586 unsigned idx = 0;
2587 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2588 i != e; ++i, ++idx) {
2589 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
2590 bool BitField = i->isBitField();
2591
2592 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
2593 // four eightbytes, or it contains unaligned fields, it has class MEMORY.
2594 //
2595 // The only case a 256-bit wide vector could be used is when the struct
2596 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
2597 // to work for sizes wider than 128, early check and fallback to memory.
2598 //
2599 if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) {
2600 Lo = Memory;
2601 postMerge(Size, Lo, Hi);
2602 return;
2603 }
2604 // Note, skip this test for bit-fields, see below.
2605 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
2606 Lo = Memory;
2607 postMerge(Size, Lo, Hi);
2608 return;
2609 }
2610
2611 // Classify this field.
2612 //
2613 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
2614 // exceeds a single eightbyte, each is classified
2615 // separately. Each eightbyte gets initialized to class
2616 // NO_CLASS.
2617 Class FieldLo, FieldHi;
2618
2619 // Bit-fields require special handling, they do not force the
2620 // structure to be passed in memory even if unaligned, and
2621 // therefore they can straddle an eightbyte.
2622 if (BitField) {
2623 // Ignore padding bit-fields.
2624 if (i->isUnnamedBitfield())
2625 continue;
2626
2627 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
2628 uint64_t Size = i->getBitWidthValue(getContext());
2629
2630 uint64_t EB_Lo = Offset / 64;
2631 uint64_t EB_Hi = (Offset + Size - 1) / 64;
2632
2633 if (EB_Lo) {
2634 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
2635 FieldLo = NoClass;
2636 FieldHi = Integer;
2637 } else {
2638 FieldLo = Integer;
2639 FieldHi = EB_Hi ? Integer : NoClass;
2640 }
2641 } else
2642 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
2643 Lo = merge(Lo, FieldLo);
2644 Hi = merge(Hi, FieldHi);
2645 if (Lo == Memory || Hi == Memory)
2646 break;
2647 }
2648
2649 postMerge(Size, Lo, Hi);
2650 }
2651 }
2652
getIndirectReturnResult(QualType Ty) const2653 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
2654 // If this is a scalar LLVM value then assume LLVM will pass it in the right
2655 // place naturally.
2656 if (!isAggregateTypeForABI(Ty)) {
2657 // Treat an enum type as its underlying type.
2658 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2659 Ty = EnumTy->getDecl()->getIntegerType();
2660
2661 return (Ty->isPromotableIntegerType() ?
2662 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2663 }
2664
2665 return getNaturalAlignIndirect(Ty);
2666 }
2667
IsIllegalVectorType(QualType Ty) const2668 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
2669 if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
2670 uint64_t Size = getContext().getTypeSize(VecTy);
2671 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
2672 if (Size <= 64 || Size > LargestVector)
2673 return true;
2674 }
2675
2676 return false;
2677 }
2678
getIndirectResult(QualType Ty,unsigned freeIntRegs) const2679 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
2680 unsigned freeIntRegs) const {
2681 // If this is a scalar LLVM value then assume LLVM will pass it in the right
2682 // place naturally.
2683 //
2684 // This assumption is optimistic, as there could be free registers available
2685 // when we need to pass this argument in memory, and LLVM could try to pass
2686 // the argument in the free register. This does not seem to happen currently,
2687 // but this code would be much safer if we could mark the argument with
2688 // 'onstack'. See PR12193.
2689 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) {
2690 // Treat an enum type as its underlying type.
2691 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2692 Ty = EnumTy->getDecl()->getIntegerType();
2693
2694 return (Ty->isPromotableIntegerType() ?
2695 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2696 }
2697
2698 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
2699 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
2700
2701 // Compute the byval alignment. We specify the alignment of the byval in all
2702 // cases so that the mid-level optimizer knows the alignment of the byval.
2703 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
2704
2705 // Attempt to avoid passing indirect results using byval when possible. This
2706 // is important for good codegen.
2707 //
2708 // We do this by coercing the value into a scalar type which the backend can
2709 // handle naturally (i.e., without using byval).
2710 //
2711 // For simplicity, we currently only do this when we have exhausted all of the
2712 // free integer registers. Doing this when there are free integer registers
2713 // would require more care, as we would have to ensure that the coerced value
2714 // did not claim the unused register. That would require either reording the
2715 // arguments to the function (so that any subsequent inreg values came first),
2716 // or only doing this optimization when there were no following arguments that
2717 // might be inreg.
2718 //
2719 // We currently expect it to be rare (particularly in well written code) for
2720 // arguments to be passed on the stack when there are still free integer
2721 // registers available (this would typically imply large structs being passed
2722 // by value), so this seems like a fair tradeoff for now.
2723 //
2724 // We can revisit this if the backend grows support for 'onstack' parameter
2725 // attributes. See PR12193.
2726 if (freeIntRegs == 0) {
2727 uint64_t Size = getContext().getTypeSize(Ty);
2728
2729 // If this type fits in an eightbyte, coerce it into the matching integral
2730 // type, which will end up on the stack (with alignment 8).
2731 if (Align == 8 && Size <= 64)
2732 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
2733 Size));
2734 }
2735
2736 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(Align));
2737 }
2738
2739 /// The ABI specifies that a value should be passed in a full vector XMM/YMM
2740 /// register. Pick an LLVM IR type that will be passed as a vector register.
GetByteVectorType(QualType Ty) const2741 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
2742 // Wrapper structs/arrays that only contain vectors are passed just like
2743 // vectors; strip them off if present.
2744 if (const Type *InnerTy = isSingleElementStruct(Ty, getContext()))
2745 Ty = QualType(InnerTy, 0);
2746
2747 llvm::Type *IRType = CGT.ConvertType(Ty);
2748 if (isa<llvm::VectorType>(IRType) ||
2749 IRType->getTypeID() == llvm::Type::FP128TyID)
2750 return IRType;
2751
2752 // We couldn't find the preferred IR vector type for 'Ty'.
2753 uint64_t Size = getContext().getTypeSize(Ty);
2754 assert((Size == 128 || Size == 256) && "Invalid type found!");
2755
2756 // Return a LLVM IR vector type based on the size of 'Ty'.
2757 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()),
2758 Size / 64);
2759 }
2760
2761 /// BitsContainNoUserData - Return true if the specified [start,end) bit range
2762 /// is known to either be off the end of the specified type or being in
2763 /// alignment padding. The user type specified is known to be at most 128 bits
2764 /// in size, and have passed through X86_64ABIInfo::classify with a successful
2765 /// classification that put one of the two halves in the INTEGER class.
2766 ///
2767 /// It is conservatively correct to return false.
BitsContainNoUserData(QualType Ty,unsigned StartBit,unsigned EndBit,ASTContext & Context)2768 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
2769 unsigned EndBit, ASTContext &Context) {
2770 // If the bytes being queried are off the end of the type, there is no user
2771 // data hiding here. This handles analysis of builtins, vectors and other
2772 // types that don't contain interesting padding.
2773 unsigned TySize = (unsigned)Context.getTypeSize(Ty);
2774 if (TySize <= StartBit)
2775 return true;
2776
2777 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
2778 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
2779 unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
2780
2781 // Check each element to see if the element overlaps with the queried range.
2782 for (unsigned i = 0; i != NumElts; ++i) {
2783 // If the element is after the span we care about, then we're done..
2784 unsigned EltOffset = i*EltSize;
2785 if (EltOffset >= EndBit) break;
2786
2787 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
2788 if (!BitsContainNoUserData(AT->getElementType(), EltStart,
2789 EndBit-EltOffset, Context))
2790 return false;
2791 }
2792 // If it overlaps no elements, then it is safe to process as padding.
2793 return true;
2794 }
2795
2796 if (const RecordType *RT = Ty->getAs<RecordType>()) {
2797 const RecordDecl *RD = RT->getDecl();
2798 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
2799
2800 // If this is a C++ record, check the bases first.
2801 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2802 for (const auto &I : CXXRD->bases()) {
2803 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2804 "Unexpected base class!");
2805 const CXXRecordDecl *Base =
2806 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
2807
2808 // If the base is after the span we care about, ignore it.
2809 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
2810 if (BaseOffset >= EndBit) continue;
2811
2812 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
2813 if (!BitsContainNoUserData(I.getType(), BaseStart,
2814 EndBit-BaseOffset, Context))
2815 return false;
2816 }
2817 }
2818
2819 // Verify that no field has data that overlaps the region of interest. Yes
2820 // this could be sped up a lot by being smarter about queried fields,
2821 // however we're only looking at structs up to 16 bytes, so we don't care
2822 // much.
2823 unsigned idx = 0;
2824 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2825 i != e; ++i, ++idx) {
2826 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
2827
2828 // If we found a field after the region we care about, then we're done.
2829 if (FieldOffset >= EndBit) break;
2830
2831 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
2832 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
2833 Context))
2834 return false;
2835 }
2836
2837 // If nothing in this record overlapped the area of interest, then we're
2838 // clean.
2839 return true;
2840 }
2841
2842 return false;
2843 }
2844
2845 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a
2846 /// float member at the specified offset. For example, {int,{float}} has a
2847 /// float at offset 4. It is conservatively correct for this routine to return
2848 /// false.
ContainsFloatAtOffset(llvm::Type * IRType,unsigned IROffset,const llvm::DataLayout & TD)2849 static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset,
2850 const llvm::DataLayout &TD) {
2851 // Base case if we find a float.
2852 if (IROffset == 0 && IRType->isFloatTy())
2853 return true;
2854
2855 // If this is a struct, recurse into the field at the specified offset.
2856 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2857 const llvm::StructLayout *SL = TD.getStructLayout(STy);
2858 unsigned Elt = SL->getElementContainingOffset(IROffset);
2859 IROffset -= SL->getElementOffset(Elt);
2860 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD);
2861 }
2862
2863 // If this is an array, recurse into the field at the specified offset.
2864 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2865 llvm::Type *EltTy = ATy->getElementType();
2866 unsigned EltSize = TD.getTypeAllocSize(EltTy);
2867 IROffset -= IROffset/EltSize*EltSize;
2868 return ContainsFloatAtOffset(EltTy, IROffset, TD);
2869 }
2870
2871 return false;
2872 }
2873
2874
2875 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
2876 /// low 8 bytes of an XMM register, corresponding to the SSE class.
2877 llvm::Type *X86_64ABIInfo::
GetSSETypeAtOffset(llvm::Type * IRType,unsigned IROffset,QualType SourceTy,unsigned SourceOffset) const2878 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
2879 QualType SourceTy, unsigned SourceOffset) const {
2880 // The only three choices we have are either double, <2 x float>, or float. We
2881 // pass as float if the last 4 bytes is just padding. This happens for
2882 // structs that contain 3 floats.
2883 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32,
2884 SourceOffset*8+64, getContext()))
2885 return llvm::Type::getFloatTy(getVMContext());
2886
2887 // We want to pass as <2 x float> if the LLVM IR type contains a float at
2888 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the
2889 // case.
2890 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) &&
2891 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout()))
2892 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2);
2893
2894 return llvm::Type::getDoubleTy(getVMContext());
2895 }
2896
2897
2898 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
2899 /// an 8-byte GPR. This means that we either have a scalar or we are talking
2900 /// about the high or low part of an up-to-16-byte struct. This routine picks
2901 /// the best LLVM IR type to represent this, which may be i64 or may be anything
2902 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
2903 /// etc).
2904 ///
2905 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
2906 /// the source type. IROffset is an offset in bytes into the LLVM IR type that
2907 /// the 8-byte value references. PrefType may be null.
2908 ///
2909 /// SourceTy is the source-level type for the entire argument. SourceOffset is
2910 /// an offset into this that we're processing (which is always either 0 or 8).
2911 ///
2912 llvm::Type *X86_64ABIInfo::
GetINTEGERTypeAtOffset(llvm::Type * IRType,unsigned IROffset,QualType SourceTy,unsigned SourceOffset) const2913 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
2914 QualType SourceTy, unsigned SourceOffset) const {
2915 // If we're dealing with an un-offset LLVM IR type, then it means that we're
2916 // returning an 8-byte unit starting with it. See if we can safely use it.
2917 if (IROffset == 0) {
2918 // Pointers and int64's always fill the 8-byte unit.
2919 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
2920 IRType->isIntegerTy(64))
2921 return IRType;
2922
2923 // If we have a 1/2/4-byte integer, we can use it only if the rest of the
2924 // goodness in the source type is just tail padding. This is allowed to
2925 // kick in for struct {double,int} on the int, but not on
2926 // struct{double,int,int} because we wouldn't return the second int. We
2927 // have to do this analysis on the source type because we can't depend on
2928 // unions being lowered a specific way etc.
2929 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
2930 IRType->isIntegerTy(32) ||
2931 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
2932 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
2933 cast<llvm::IntegerType>(IRType)->getBitWidth();
2934
2935 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
2936 SourceOffset*8+64, getContext()))
2937 return IRType;
2938 }
2939 }
2940
2941 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2942 // If this is a struct, recurse into the field at the specified offset.
2943 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
2944 if (IROffset < SL->getSizeInBytes()) {
2945 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
2946 IROffset -= SL->getElementOffset(FieldIdx);
2947
2948 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
2949 SourceTy, SourceOffset);
2950 }
2951 }
2952
2953 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2954 llvm::Type *EltTy = ATy->getElementType();
2955 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
2956 unsigned EltOffset = IROffset/EltSize*EltSize;
2957 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
2958 SourceOffset);
2959 }
2960
2961 // Okay, we don't have any better idea of what to pass, so we pass this in an
2962 // integer register that isn't too big to fit the rest of the struct.
2963 unsigned TySizeInBytes =
2964 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
2965
2966 assert(TySizeInBytes != SourceOffset && "Empty field?");
2967
2968 // It is always safe to classify this as an integer type up to i64 that
2969 // isn't larger than the structure.
2970 return llvm::IntegerType::get(getVMContext(),
2971 std::min(TySizeInBytes-SourceOffset, 8U)*8);
2972 }
2973
2974
2975 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
2976 /// be used as elements of a two register pair to pass or return, return a
2977 /// first class aggregate to represent them. For example, if the low part of
2978 /// a by-value argument should be passed as i32* and the high part as float,
2979 /// return {i32*, float}.
2980 static llvm::Type *
GetX86_64ByValArgumentPair(llvm::Type * Lo,llvm::Type * Hi,const llvm::DataLayout & TD)2981 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
2982 const llvm::DataLayout &TD) {
2983 // In order to correctly satisfy the ABI, we need to the high part to start
2984 // at offset 8. If the high and low parts we inferred are both 4-byte types
2985 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
2986 // the second element at offset 8. Check for this:
2987 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
2988 unsigned HiAlign = TD.getABITypeAlignment(Hi);
2989 unsigned HiStart = llvm::alignTo(LoSize, HiAlign);
2990 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
2991
2992 // To handle this, we have to increase the size of the low part so that the
2993 // second element will start at an 8 byte offset. We can't increase the size
2994 // of the second element because it might make us access off the end of the
2995 // struct.
2996 if (HiStart != 8) {
2997 // There are usually two sorts of types the ABI generation code can produce
2998 // for the low part of a pair that aren't 8 bytes in size: float or
2999 // i8/i16/i32. This can also include pointers when they are 32-bit (X32 and
3000 // NaCl).
3001 // Promote these to a larger type.
3002 if (Lo->isFloatTy())
3003 Lo = llvm::Type::getDoubleTy(Lo->getContext());
3004 else {
3005 assert((Lo->isIntegerTy() || Lo->isPointerTy())
3006 && "Invalid/unknown lo type");
3007 Lo = llvm::Type::getInt64Ty(Lo->getContext());
3008 }
3009 }
3010
3011 llvm::StructType *Result = llvm::StructType::get(Lo, Hi, nullptr);
3012
3013
3014 // Verify that the second element is at an 8-byte offset.
3015 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
3016 "Invalid x86-64 argument pair!");
3017 return Result;
3018 }
3019
3020 ABIArgInfo X86_64ABIInfo::
classifyReturnType(QualType RetTy) const3021 classifyReturnType(QualType RetTy) const {
3022 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
3023 // classification algorithm.
3024 X86_64ABIInfo::Class Lo, Hi;
3025 classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true);
3026
3027 // Check some invariants.
3028 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
3029 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
3030
3031 llvm::Type *ResType = nullptr;
3032 switch (Lo) {
3033 case NoClass:
3034 if (Hi == NoClass)
3035 return ABIArgInfo::getIgnore();
3036 // If the low part is just padding, it takes no register, leave ResType
3037 // null.
3038 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
3039 "Unknown missing lo part");
3040 break;
3041
3042 case SSEUp:
3043 case X87Up:
3044 llvm_unreachable("Invalid classification for lo word.");
3045
3046 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
3047 // hidden argument.
3048 case Memory:
3049 return getIndirectReturnResult(RetTy);
3050
3051 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
3052 // available register of the sequence %rax, %rdx is used.
3053 case Integer:
3054 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
3055
3056 // If we have a sign or zero extended integer, make sure to return Extend
3057 // so that the parameter gets the right LLVM IR attributes.
3058 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3059 // Treat an enum type as its underlying type.
3060 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
3061 RetTy = EnumTy->getDecl()->getIntegerType();
3062
3063 if (RetTy->isIntegralOrEnumerationType() &&
3064 RetTy->isPromotableIntegerType())
3065 return ABIArgInfo::getExtend();
3066 }
3067 break;
3068
3069 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
3070 // available SSE register of the sequence %xmm0, %xmm1 is used.
3071 case SSE:
3072 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
3073 break;
3074
3075 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
3076 // returned on the X87 stack in %st0 as 80-bit x87 number.
3077 case X87:
3078 ResType = llvm::Type::getX86_FP80Ty(getVMContext());
3079 break;
3080
3081 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
3082 // part of the value is returned in %st0 and the imaginary part in
3083 // %st1.
3084 case ComplexX87:
3085 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
3086 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
3087 llvm::Type::getX86_FP80Ty(getVMContext()),
3088 nullptr);
3089 break;
3090 }
3091
3092 llvm::Type *HighPart = nullptr;
3093 switch (Hi) {
3094 // Memory was handled previously and X87 should
3095 // never occur as a hi class.
3096 case Memory:
3097 case X87:
3098 llvm_unreachable("Invalid classification for hi word.");
3099
3100 case ComplexX87: // Previously handled.
3101 case NoClass:
3102 break;
3103
3104 case Integer:
3105 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3106 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3107 return ABIArgInfo::getDirect(HighPart, 8);
3108 break;
3109 case SSE:
3110 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3111 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3112 return ABIArgInfo::getDirect(HighPart, 8);
3113 break;
3114
3115 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
3116 // is passed in the next available eightbyte chunk if the last used
3117 // vector register.
3118 //
3119 // SSEUP should always be preceded by SSE, just widen.
3120 case SSEUp:
3121 assert(Lo == SSE && "Unexpected SSEUp classification.");
3122 ResType = GetByteVectorType(RetTy);
3123 break;
3124
3125 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
3126 // returned together with the previous X87 value in %st0.
3127 case X87Up:
3128 // If X87Up is preceded by X87, we don't need to do
3129 // anything. However, in some cases with unions it may not be
3130 // preceded by X87. In such situations we follow gcc and pass the
3131 // extra bits in an SSE reg.
3132 if (Lo != X87) {
3133 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3134 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3135 return ABIArgInfo::getDirect(HighPart, 8);
3136 }
3137 break;
3138 }
3139
3140 // If a high part was specified, merge it together with the low part. It is
3141 // known to pass in the high eightbyte of the result. We do this by forming a
3142 // first class struct aggregate with the high and low part: {low, high}
3143 if (HighPart)
3144 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
3145
3146 return ABIArgInfo::getDirect(ResType);
3147 }
3148
classifyArgumentType(QualType Ty,unsigned freeIntRegs,unsigned & neededInt,unsigned & neededSSE,bool isNamedArg) const3149 ABIArgInfo X86_64ABIInfo::classifyArgumentType(
3150 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE,
3151 bool isNamedArg)
3152 const
3153 {
3154 Ty = useFirstFieldIfTransparentUnion(Ty);
3155
3156 X86_64ABIInfo::Class Lo, Hi;
3157 classify(Ty, 0, Lo, Hi, isNamedArg);
3158
3159 // Check some invariants.
3160 // FIXME: Enforce these by construction.
3161 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
3162 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
3163
3164 neededInt = 0;
3165 neededSSE = 0;
3166 llvm::Type *ResType = nullptr;
3167 switch (Lo) {
3168 case NoClass:
3169 if (Hi == NoClass)
3170 return ABIArgInfo::getIgnore();
3171 // If the low part is just padding, it takes no register, leave ResType
3172 // null.
3173 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
3174 "Unknown missing lo part");
3175 break;
3176
3177 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
3178 // on the stack.
3179 case Memory:
3180
3181 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
3182 // COMPLEX_X87, it is passed in memory.
3183 case X87:
3184 case ComplexX87:
3185 if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect)
3186 ++neededInt;
3187 return getIndirectResult(Ty, freeIntRegs);
3188
3189 case SSEUp:
3190 case X87Up:
3191 llvm_unreachable("Invalid classification for lo word.");
3192
3193 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
3194 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
3195 // and %r9 is used.
3196 case Integer:
3197 ++neededInt;
3198
3199 // Pick an 8-byte type based on the preferred type.
3200 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
3201
3202 // If we have a sign or zero extended integer, make sure to return Extend
3203 // so that the parameter gets the right LLVM IR attributes.
3204 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3205 // Treat an enum type as its underlying type.
3206 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3207 Ty = EnumTy->getDecl()->getIntegerType();
3208
3209 if (Ty->isIntegralOrEnumerationType() &&
3210 Ty->isPromotableIntegerType())
3211 return ABIArgInfo::getExtend();
3212 }
3213
3214 break;
3215
3216 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
3217 // available SSE register is used, the registers are taken in the
3218 // order from %xmm0 to %xmm7.
3219 case SSE: {
3220 llvm::Type *IRType = CGT.ConvertType(Ty);
3221 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
3222 ++neededSSE;
3223 break;
3224 }
3225 }
3226
3227 llvm::Type *HighPart = nullptr;
3228 switch (Hi) {
3229 // Memory was handled previously, ComplexX87 and X87 should
3230 // never occur as hi classes, and X87Up must be preceded by X87,
3231 // which is passed in memory.
3232 case Memory:
3233 case X87:
3234 case ComplexX87:
3235 llvm_unreachable("Invalid classification for hi word.");
3236
3237 case NoClass: break;
3238
3239 case Integer:
3240 ++neededInt;
3241 // Pick an 8-byte type based on the preferred type.
3242 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3243
3244 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
3245 return ABIArgInfo::getDirect(HighPart, 8);
3246 break;
3247
3248 // X87Up generally doesn't occur here (long double is passed in
3249 // memory), except in situations involving unions.
3250 case X87Up:
3251 case SSE:
3252 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3253
3254 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
3255 return ABIArgInfo::getDirect(HighPart, 8);
3256
3257 ++neededSSE;
3258 break;
3259
3260 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
3261 // eightbyte is passed in the upper half of the last used SSE
3262 // register. This only happens when 128-bit vectors are passed.
3263 case SSEUp:
3264 assert(Lo == SSE && "Unexpected SSEUp classification");
3265 ResType = GetByteVectorType(Ty);
3266 break;
3267 }
3268
3269 // If a high part was specified, merge it together with the low part. It is
3270 // known to pass in the high eightbyte of the result. We do this by forming a
3271 // first class struct aggregate with the high and low part: {low, high}
3272 if (HighPart)
3273 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
3274
3275 return ABIArgInfo::getDirect(ResType);
3276 }
3277
computeInfo(CGFunctionInfo & FI) const3278 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
3279
3280 if (!getCXXABI().classifyReturnType(FI))
3281 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
3282
3283 // Keep track of the number of assigned registers.
3284 unsigned freeIntRegs = 6, freeSSERegs = 8;
3285
3286 // If the return value is indirect, then the hidden argument is consuming one
3287 // integer register.
3288 if (FI.getReturnInfo().isIndirect())
3289 --freeIntRegs;
3290
3291 // The chain argument effectively gives us another free register.
3292 if (FI.isChainCall())
3293 ++freeIntRegs;
3294
3295 unsigned NumRequiredArgs = FI.getNumRequiredArgs();
3296 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
3297 // get assigned (in left-to-right order) for passing as follows...
3298 unsigned ArgNo = 0;
3299 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
3300 it != ie; ++it, ++ArgNo) {
3301 bool IsNamedArg = ArgNo < NumRequiredArgs;
3302
3303 unsigned neededInt, neededSSE;
3304 it->info = classifyArgumentType(it->type, freeIntRegs, neededInt,
3305 neededSSE, IsNamedArg);
3306
3307 // AMD64-ABI 3.2.3p3: If there are no registers available for any
3308 // eightbyte of an argument, the whole argument is passed on the
3309 // stack. If registers have already been assigned for some
3310 // eightbytes of such an argument, the assignments get reverted.
3311 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
3312 freeIntRegs -= neededInt;
3313 freeSSERegs -= neededSSE;
3314 } else {
3315 it->info = getIndirectResult(it->type, freeIntRegs);
3316 }
3317 }
3318 }
3319
EmitX86_64VAArgFromMemory(CodeGenFunction & CGF,Address VAListAddr,QualType Ty)3320 static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF,
3321 Address VAListAddr, QualType Ty) {
3322 Address overflow_arg_area_p = CGF.Builder.CreateStructGEP(
3323 VAListAddr, 2, CharUnits::fromQuantity(8), "overflow_arg_area_p");
3324 llvm::Value *overflow_arg_area =
3325 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
3326
3327 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
3328 // byte boundary if alignment needed by type exceeds 8 byte boundary.
3329 // It isn't stated explicitly in the standard, but in practice we use
3330 // alignment greater than 16 where necessary.
3331 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
3332 if (Align > CharUnits::fromQuantity(8)) {
3333 overflow_arg_area = emitRoundPointerUpToAlignment(CGF, overflow_arg_area,
3334 Align);
3335 }
3336
3337 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
3338 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
3339 llvm::Value *Res =
3340 CGF.Builder.CreateBitCast(overflow_arg_area,
3341 llvm::PointerType::getUnqual(LTy));
3342
3343 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
3344 // l->overflow_arg_area + sizeof(type).
3345 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
3346 // an 8 byte boundary.
3347
3348 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
3349 llvm::Value *Offset =
3350 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7);
3351 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
3352 "overflow_arg_area.next");
3353 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
3354
3355 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
3356 return Address(Res, Align);
3357 }
3358
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const3359 Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3360 QualType Ty) const {
3361 // Assume that va_list type is correct; should be pointer to LLVM type:
3362 // struct {
3363 // i32 gp_offset;
3364 // i32 fp_offset;
3365 // i8* overflow_arg_area;
3366 // i8* reg_save_area;
3367 // };
3368 unsigned neededInt, neededSSE;
3369
3370 Ty = getContext().getCanonicalType(Ty);
3371 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE,
3372 /*isNamedArg*/false);
3373
3374 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
3375 // in the registers. If not go to step 7.
3376 if (!neededInt && !neededSSE)
3377 return EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
3378
3379 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
3380 // general purpose registers needed to pass type and num_fp to hold
3381 // the number of floating point registers needed.
3382
3383 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
3384 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
3385 // l->fp_offset > 304 - num_fp * 16 go to step 7.
3386 //
3387 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
3388 // register save space).
3389
3390 llvm::Value *InRegs = nullptr;
3391 Address gp_offset_p = Address::invalid(), fp_offset_p = Address::invalid();
3392 llvm::Value *gp_offset = nullptr, *fp_offset = nullptr;
3393 if (neededInt) {
3394 gp_offset_p =
3395 CGF.Builder.CreateStructGEP(VAListAddr, 0, CharUnits::Zero(),
3396 "gp_offset_p");
3397 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
3398 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
3399 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
3400 }
3401
3402 if (neededSSE) {
3403 fp_offset_p =
3404 CGF.Builder.CreateStructGEP(VAListAddr, 1, CharUnits::fromQuantity(4),
3405 "fp_offset_p");
3406 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
3407 llvm::Value *FitsInFP =
3408 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
3409 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
3410 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
3411 }
3412
3413 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
3414 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
3415 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
3416 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
3417
3418 // Emit code to load the value if it was passed in registers.
3419
3420 CGF.EmitBlock(InRegBlock);
3421
3422 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
3423 // an offset of l->gp_offset and/or l->fp_offset. This may require
3424 // copying to a temporary location in case the parameter is passed
3425 // in different register classes or requires an alignment greater
3426 // than 8 for general purpose registers and 16 for XMM registers.
3427 //
3428 // FIXME: This really results in shameful code when we end up needing to
3429 // collect arguments from different places; often what should result in a
3430 // simple assembling of a structure from scattered addresses has many more
3431 // loads than necessary. Can we clean this up?
3432 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
3433 llvm::Value *RegSaveArea = CGF.Builder.CreateLoad(
3434 CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(16)),
3435 "reg_save_area");
3436
3437 Address RegAddr = Address::invalid();
3438 if (neededInt && neededSSE) {
3439 // FIXME: Cleanup.
3440 assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
3441 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
3442 Address Tmp = CGF.CreateMemTemp(Ty);
3443 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
3444 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
3445 llvm::Type *TyLo = ST->getElementType(0);
3446 llvm::Type *TyHi = ST->getElementType(1);
3447 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
3448 "Unexpected ABI info for mixed regs");
3449 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
3450 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
3451 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegSaveArea, gp_offset);
3452 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegSaveArea, fp_offset);
3453 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
3454 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
3455
3456 // Copy the first element.
3457 llvm::Value *V =
3458 CGF.Builder.CreateDefaultAlignedLoad(
3459 CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
3460 CGF.Builder.CreateStore(V,
3461 CGF.Builder.CreateStructGEP(Tmp, 0, CharUnits::Zero()));
3462
3463 // Copy the second element.
3464 V = CGF.Builder.CreateDefaultAlignedLoad(
3465 CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
3466 CharUnits Offset = CharUnits::fromQuantity(
3467 getDataLayout().getStructLayout(ST)->getElementOffset(1));
3468 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1, Offset));
3469
3470 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
3471 } else if (neededInt) {
3472 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, gp_offset),
3473 CharUnits::fromQuantity(8));
3474 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
3475
3476 // Copy to a temporary if necessary to ensure the appropriate alignment.
3477 std::pair<CharUnits, CharUnits> SizeAlign =
3478 getContext().getTypeInfoInChars(Ty);
3479 uint64_t TySize = SizeAlign.first.getQuantity();
3480 CharUnits TyAlign = SizeAlign.second;
3481
3482 // Copy into a temporary if the type is more aligned than the
3483 // register save area.
3484 if (TyAlign.getQuantity() > 8) {
3485 Address Tmp = CGF.CreateMemTemp(Ty);
3486 CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, false);
3487 RegAddr = Tmp;
3488 }
3489
3490 } else if (neededSSE == 1) {
3491 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset),
3492 CharUnits::fromQuantity(16));
3493 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
3494 } else {
3495 assert(neededSSE == 2 && "Invalid number of needed registers!");
3496 // SSE registers are spaced 16 bytes apart in the register save
3497 // area, we need to collect the two eightbytes together.
3498 // The ABI isn't explicit about this, but it seems reasonable
3499 // to assume that the slots are 16-byte aligned, since the stack is
3500 // naturally 16-byte aligned and the prologue is expected to store
3501 // all the SSE registers to the RSA.
3502 Address RegAddrLo = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset),
3503 CharUnits::fromQuantity(16));
3504 Address RegAddrHi =
3505 CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo,
3506 CharUnits::fromQuantity(16));
3507 llvm::Type *DoubleTy = CGF.DoubleTy;
3508 llvm::StructType *ST = llvm::StructType::get(DoubleTy, DoubleTy, nullptr);
3509 llvm::Value *V;
3510 Address Tmp = CGF.CreateMemTemp(Ty);
3511 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
3512 V = CGF.Builder.CreateLoad(
3513 CGF.Builder.CreateElementBitCast(RegAddrLo, DoubleTy));
3514 CGF.Builder.CreateStore(V,
3515 CGF.Builder.CreateStructGEP(Tmp, 0, CharUnits::Zero()));
3516 V = CGF.Builder.CreateLoad(
3517 CGF.Builder.CreateElementBitCast(RegAddrHi, DoubleTy));
3518 CGF.Builder.CreateStore(V,
3519 CGF.Builder.CreateStructGEP(Tmp, 1, CharUnits::fromQuantity(8)));
3520
3521 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
3522 }
3523
3524 // AMD64-ABI 3.5.7p5: Step 5. Set:
3525 // l->gp_offset = l->gp_offset + num_gp * 8
3526 // l->fp_offset = l->fp_offset + num_fp * 16.
3527 if (neededInt) {
3528 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
3529 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
3530 gp_offset_p);
3531 }
3532 if (neededSSE) {
3533 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
3534 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
3535 fp_offset_p);
3536 }
3537 CGF.EmitBranch(ContBlock);
3538
3539 // Emit code to load the value if it was passed in memory.
3540
3541 CGF.EmitBlock(InMemBlock);
3542 Address MemAddr = EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
3543
3544 // Return the appropriate result.
3545
3546 CGF.EmitBlock(ContBlock);
3547 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock,
3548 "vaarg.addr");
3549 return ResAddr;
3550 }
3551
EmitMSVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const3552 Address X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
3553 QualType Ty) const {
3554 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
3555 CGF.getContext().getTypeInfoInChars(Ty),
3556 CharUnits::fromQuantity(8),
3557 /*allowHigherAlign*/ false);
3558 }
3559
classify(QualType Ty,unsigned & FreeSSERegs,bool IsReturnType) const3560 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
3561 bool IsReturnType) const {
3562
3563 if (Ty->isVoidType())
3564 return ABIArgInfo::getIgnore();
3565
3566 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3567 Ty = EnumTy->getDecl()->getIntegerType();
3568
3569 TypeInfo Info = getContext().getTypeInfo(Ty);
3570 uint64_t Width = Info.Width;
3571 CharUnits Align = getContext().toCharUnitsFromBits(Info.Align);
3572
3573 const RecordType *RT = Ty->getAs<RecordType>();
3574 if (RT) {
3575 if (!IsReturnType) {
3576 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()))
3577 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
3578 }
3579
3580 if (RT->getDecl()->hasFlexibleArrayMember())
3581 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
3582
3583 }
3584
3585 // vectorcall adds the concept of a homogenous vector aggregate, similar to
3586 // other targets.
3587 const Type *Base = nullptr;
3588 uint64_t NumElts = 0;
3589 if (FreeSSERegs && isHomogeneousAggregate(Ty, Base, NumElts)) {
3590 if (FreeSSERegs >= NumElts) {
3591 FreeSSERegs -= NumElts;
3592 if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())
3593 return ABIArgInfo::getDirect();
3594 return ABIArgInfo::getExpand();
3595 }
3596 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
3597 }
3598
3599
3600 if (Ty->isMemberPointerType()) {
3601 // If the member pointer is represented by an LLVM int or ptr, pass it
3602 // directly.
3603 llvm::Type *LLTy = CGT.ConvertType(Ty);
3604 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
3605 return ABIArgInfo::getDirect();
3606 }
3607
3608 if (RT || Ty->isAnyComplexType() || Ty->isMemberPointerType()) {
3609 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
3610 // not 1, 2, 4, or 8 bytes, must be passed by reference."
3611 if (Width > 64 || !llvm::isPowerOf2_64(Width))
3612 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
3613
3614 // Otherwise, coerce it to a small integer.
3615 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width));
3616 }
3617
3618 // Bool type is always extended to the ABI, other builtin types are not
3619 // extended.
3620 const BuiltinType *BT = Ty->getAs<BuiltinType>();
3621 if (BT && BT->getKind() == BuiltinType::Bool)
3622 return ABIArgInfo::getExtend();
3623
3624 // Mingw64 GCC uses the old 80 bit extended precision floating point unit. It
3625 // passes them indirectly through memory.
3626 if (IsMingw64 && BT && BT->getKind() == BuiltinType::LongDouble) {
3627 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
3628 if (LDF == &llvm::APFloat::x87DoubleExtended)
3629 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
3630 }
3631
3632 return ABIArgInfo::getDirect();
3633 }
3634
computeInfo(CGFunctionInfo & FI) const3635 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
3636 bool IsVectorCall =
3637 FI.getCallingConvention() == llvm::CallingConv::X86_VectorCall;
3638
3639 // We can use up to 4 SSE return registers with vectorcall.
3640 unsigned FreeSSERegs = IsVectorCall ? 4 : 0;
3641 if (!getCXXABI().classifyReturnType(FI))
3642 FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true);
3643
3644 // We can use up to 6 SSE register parameters with vectorcall.
3645 FreeSSERegs = IsVectorCall ? 6 : 0;
3646 for (auto &I : FI.arguments())
3647 I.info = classify(I.type, FreeSSERegs, false);
3648 }
3649
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const3650 Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3651 QualType Ty) const {
3652 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
3653 CGF.getContext().getTypeInfoInChars(Ty),
3654 CharUnits::fromQuantity(8),
3655 /*allowHigherAlign*/ false);
3656 }
3657
3658 // PowerPC-32
3659 namespace {
3660 /// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information.
3661 class PPC32_SVR4_ABIInfo : public DefaultABIInfo {
3662 bool IsSoftFloatABI;
3663 public:
PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes & CGT,bool SoftFloatABI)3664 PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, bool SoftFloatABI)
3665 : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI) {}
3666
3667 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3668 QualType Ty) const override;
3669 };
3670
3671 class PPC32TargetCodeGenInfo : public TargetCodeGenInfo {
3672 public:
PPC32TargetCodeGenInfo(CodeGenTypes & CGT,bool SoftFloatABI)3673 PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI)
3674 : TargetCodeGenInfo(new PPC32_SVR4_ABIInfo(CGT, SoftFloatABI)) {}
3675
getDwarfEHStackPointer(CodeGen::CodeGenModule & M) const3676 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
3677 // This is recovered from gcc output.
3678 return 1; // r1 is the dedicated stack pointer
3679 }
3680
3681 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3682 llvm::Value *Address) const override;
3683 };
3684
3685 }
3686
3687 // TODO: this implementation is now likely redundant with
3688 // DefaultABIInfo::EmitVAArg.
EmitVAArg(CodeGenFunction & CGF,Address VAList,QualType Ty) const3689 Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
3690 QualType Ty) const {
3691 const unsigned OverflowLimit = 8;
3692 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
3693 // TODO: Implement this. For now ignore.
3694 (void)CTy;
3695 return Address::invalid(); // FIXME?
3696 }
3697
3698 // struct __va_list_tag {
3699 // unsigned char gpr;
3700 // unsigned char fpr;
3701 // unsigned short reserved;
3702 // void *overflow_arg_area;
3703 // void *reg_save_area;
3704 // };
3705
3706 bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64;
3707 bool isInt =
3708 Ty->isIntegerType() || Ty->isPointerType() || Ty->isAggregateType();
3709 bool isF64 = Ty->isFloatingType() && getContext().getTypeSize(Ty) == 64;
3710
3711 // All aggregates are passed indirectly? That doesn't seem consistent
3712 // with the argument-lowering code.
3713 bool isIndirect = Ty->isAggregateType();
3714
3715 CGBuilderTy &Builder = CGF.Builder;
3716
3717 // The calling convention either uses 1-2 GPRs or 1 FPR.
3718 Address NumRegsAddr = Address::invalid();
3719 if (isInt || IsSoftFloatABI) {
3720 NumRegsAddr = Builder.CreateStructGEP(VAList, 0, CharUnits::Zero(), "gpr");
3721 } else {
3722 NumRegsAddr = Builder.CreateStructGEP(VAList, 1, CharUnits::One(), "fpr");
3723 }
3724
3725 llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr, "numUsedRegs");
3726
3727 // "Align" the register count when TY is i64.
3728 if (isI64 || (isF64 && IsSoftFloatABI)) {
3729 NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1));
3730 NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U));
3731 }
3732
3733 llvm::Value *CC =
3734 Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit), "cond");
3735
3736 llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs");
3737 llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow");
3738 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
3739
3740 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
3741
3742 llvm::Type *DirectTy = CGF.ConvertType(Ty);
3743 if (isIndirect) DirectTy = DirectTy->getPointerTo(0);
3744
3745 // Case 1: consume registers.
3746 Address RegAddr = Address::invalid();
3747 {
3748 CGF.EmitBlock(UsingRegs);
3749
3750 Address RegSaveAreaPtr =
3751 Builder.CreateStructGEP(VAList, 4, CharUnits::fromQuantity(8));
3752 RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr),
3753 CharUnits::fromQuantity(8));
3754 assert(RegAddr.getElementType() == CGF.Int8Ty);
3755
3756 // Floating-point registers start after the general-purpose registers.
3757 if (!(isInt || IsSoftFloatABI)) {
3758 RegAddr = Builder.CreateConstInBoundsByteGEP(RegAddr,
3759 CharUnits::fromQuantity(32));
3760 }
3761
3762 // Get the address of the saved value by scaling the number of
3763 // registers we've used by the number of
3764 CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8);
3765 llvm::Value *RegOffset =
3766 Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity()));
3767 RegAddr = Address(Builder.CreateInBoundsGEP(CGF.Int8Ty,
3768 RegAddr.getPointer(), RegOffset),
3769 RegAddr.getAlignment().alignmentOfArrayElement(RegSize));
3770 RegAddr = Builder.CreateElementBitCast(RegAddr, DirectTy);
3771
3772 // Increase the used-register count.
3773 NumRegs =
3774 Builder.CreateAdd(NumRegs,
3775 Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1));
3776 Builder.CreateStore(NumRegs, NumRegsAddr);
3777
3778 CGF.EmitBranch(Cont);
3779 }
3780
3781 // Case 2: consume space in the overflow area.
3782 Address MemAddr = Address::invalid();
3783 {
3784 CGF.EmitBlock(UsingOverflow);
3785
3786 Builder.CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr);
3787
3788 // Everything in the overflow area is rounded up to a size of at least 4.
3789 CharUnits OverflowAreaAlign = CharUnits::fromQuantity(4);
3790
3791 CharUnits Size;
3792 if (!isIndirect) {
3793 auto TypeInfo = CGF.getContext().getTypeInfoInChars(Ty);
3794 Size = TypeInfo.first.alignTo(OverflowAreaAlign);
3795 } else {
3796 Size = CGF.getPointerSize();
3797 }
3798
3799 Address OverflowAreaAddr =
3800 Builder.CreateStructGEP(VAList, 3, CharUnits::fromQuantity(4));
3801 Address OverflowArea(Builder.CreateLoad(OverflowAreaAddr, "argp.cur"),
3802 OverflowAreaAlign);
3803 // Round up address of argument to alignment
3804 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
3805 if (Align > OverflowAreaAlign) {
3806 llvm::Value *Ptr = OverflowArea.getPointer();
3807 OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align),
3808 Align);
3809 }
3810
3811 MemAddr = Builder.CreateElementBitCast(OverflowArea, DirectTy);
3812
3813 // Increase the overflow area.
3814 OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size);
3815 Builder.CreateStore(OverflowArea.getPointer(), OverflowAreaAddr);
3816 CGF.EmitBranch(Cont);
3817 }
3818
3819 CGF.EmitBlock(Cont);
3820
3821 // Merge the cases with a phi.
3822 Address Result = emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow,
3823 "vaarg.addr");
3824
3825 // Load the pointer if the argument was passed indirectly.
3826 if (isIndirect) {
3827 Result = Address(Builder.CreateLoad(Result, "aggr"),
3828 getContext().getTypeAlignInChars(Ty));
3829 }
3830
3831 return Result;
3832 }
3833
3834 bool
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const3835 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3836 llvm::Value *Address) const {
3837 // This is calculated from the LLVM and GCC tables and verified
3838 // against gcc output. AFAIK all ABIs use the same encoding.
3839
3840 CodeGen::CGBuilderTy &Builder = CGF.Builder;
3841
3842 llvm::IntegerType *i8 = CGF.Int8Ty;
3843 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
3844 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
3845 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
3846
3847 // 0-31: r0-31, the 4-byte general-purpose registers
3848 AssignToArrayRange(Builder, Address, Four8, 0, 31);
3849
3850 // 32-63: fp0-31, the 8-byte floating-point registers
3851 AssignToArrayRange(Builder, Address, Eight8, 32, 63);
3852
3853 // 64-76 are various 4-byte special-purpose registers:
3854 // 64: mq
3855 // 65: lr
3856 // 66: ctr
3857 // 67: ap
3858 // 68-75 cr0-7
3859 // 76: xer
3860 AssignToArrayRange(Builder, Address, Four8, 64, 76);
3861
3862 // 77-108: v0-31, the 16-byte vector registers
3863 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
3864
3865 // 109: vrsave
3866 // 110: vscr
3867 // 111: spe_acc
3868 // 112: spefscr
3869 // 113: sfp
3870 AssignToArrayRange(Builder, Address, Four8, 109, 113);
3871
3872 return false;
3873 }
3874
3875 // PowerPC-64
3876
3877 namespace {
3878 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
3879 class PPC64_SVR4_ABIInfo : public ABIInfo {
3880 public:
3881 enum ABIKind {
3882 ELFv1 = 0,
3883 ELFv2
3884 };
3885
3886 private:
3887 static const unsigned GPRBits = 64;
3888 ABIKind Kind;
3889 bool HasQPX;
3890
3891 // A vector of float or double will be promoted to <4 x f32> or <4 x f64> and
3892 // will be passed in a QPX register.
IsQPXVectorTy(const Type * Ty) const3893 bool IsQPXVectorTy(const Type *Ty) const {
3894 if (!HasQPX)
3895 return false;
3896
3897 if (const VectorType *VT = Ty->getAs<VectorType>()) {
3898 unsigned NumElements = VT->getNumElements();
3899 if (NumElements == 1)
3900 return false;
3901
3902 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) {
3903 if (getContext().getTypeSize(Ty) <= 256)
3904 return true;
3905 } else if (VT->getElementType()->
3906 isSpecificBuiltinType(BuiltinType::Float)) {
3907 if (getContext().getTypeSize(Ty) <= 128)
3908 return true;
3909 }
3910 }
3911
3912 return false;
3913 }
3914
IsQPXVectorTy(QualType Ty) const3915 bool IsQPXVectorTy(QualType Ty) const {
3916 return IsQPXVectorTy(Ty.getTypePtr());
3917 }
3918
3919 public:
PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes & CGT,ABIKind Kind,bool HasQPX)3920 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind, bool HasQPX)
3921 : ABIInfo(CGT), Kind(Kind), HasQPX(HasQPX) {}
3922
3923 bool isPromotableTypeForABI(QualType Ty) const;
3924 CharUnits getParamTypeAlignment(QualType Ty) const;
3925
3926 ABIArgInfo classifyReturnType(QualType RetTy) const;
3927 ABIArgInfo classifyArgumentType(QualType Ty) const;
3928
3929 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
3930 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
3931 uint64_t Members) const override;
3932
3933 // TODO: We can add more logic to computeInfo to improve performance.
3934 // Example: For aggregate arguments that fit in a register, we could
3935 // use getDirectInReg (as is done below for structs containing a single
3936 // floating-point value) to avoid pushing them to memory on function
3937 // entry. This would require changing the logic in PPCISelLowering
3938 // when lowering the parameters in the caller and args in the callee.
computeInfo(CGFunctionInfo & FI) const3939 void computeInfo(CGFunctionInfo &FI) const override {
3940 if (!getCXXABI().classifyReturnType(FI))
3941 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
3942 for (auto &I : FI.arguments()) {
3943 // We rely on the default argument classification for the most part.
3944 // One exception: An aggregate containing a single floating-point
3945 // or vector item must be passed in a register if one is available.
3946 const Type *T = isSingleElementStruct(I.type, getContext());
3947 if (T) {
3948 const BuiltinType *BT = T->getAs<BuiltinType>();
3949 if (IsQPXVectorTy(T) ||
3950 (T->isVectorType() && getContext().getTypeSize(T) == 128) ||
3951 (BT && BT->isFloatingPoint())) {
3952 QualType QT(T, 0);
3953 I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
3954 continue;
3955 }
3956 }
3957 I.info = classifyArgumentType(I.type);
3958 }
3959 }
3960
3961 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3962 QualType Ty) const override;
3963 };
3964
3965 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
3966
3967 public:
PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes & CGT,PPC64_SVR4_ABIInfo::ABIKind Kind,bool HasQPX)3968 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT,
3969 PPC64_SVR4_ABIInfo::ABIKind Kind, bool HasQPX)
3970 : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT, Kind, HasQPX)) {}
3971
getDwarfEHStackPointer(CodeGen::CodeGenModule & M) const3972 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
3973 // This is recovered from gcc output.
3974 return 1; // r1 is the dedicated stack pointer
3975 }
3976
3977 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3978 llvm::Value *Address) const override;
3979 };
3980
3981 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
3982 public:
PPC64TargetCodeGenInfo(CodeGenTypes & CGT)3983 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
3984
getDwarfEHStackPointer(CodeGen::CodeGenModule & M) const3985 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
3986 // This is recovered from gcc output.
3987 return 1; // r1 is the dedicated stack pointer
3988 }
3989
3990 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3991 llvm::Value *Address) const override;
3992 };
3993
3994 }
3995
3996 // Return true if the ABI requires Ty to be passed sign- or zero-
3997 // extended to 64 bits.
3998 bool
isPromotableTypeForABI(QualType Ty) const3999 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
4000 // Treat an enum type as its underlying type.
4001 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4002 Ty = EnumTy->getDecl()->getIntegerType();
4003
4004 // Promotable integer types are required to be promoted by the ABI.
4005 if (Ty->isPromotableIntegerType())
4006 return true;
4007
4008 // In addition to the usual promotable integer types, we also need to
4009 // extend all 32-bit types, since the ABI requires promotion to 64 bits.
4010 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
4011 switch (BT->getKind()) {
4012 case BuiltinType::Int:
4013 case BuiltinType::UInt:
4014 return true;
4015 default:
4016 break;
4017 }
4018
4019 return false;
4020 }
4021
4022 /// isAlignedParamType - Determine whether a type requires 16-byte or
4023 /// higher alignment in the parameter area. Always returns at least 8.
getParamTypeAlignment(QualType Ty) const4024 CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
4025 // Complex types are passed just like their elements.
4026 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
4027 Ty = CTy->getElementType();
4028
4029 // Only vector types of size 16 bytes need alignment (larger types are
4030 // passed via reference, smaller types are not aligned).
4031 if (IsQPXVectorTy(Ty)) {
4032 if (getContext().getTypeSize(Ty) > 128)
4033 return CharUnits::fromQuantity(32);
4034
4035 return CharUnits::fromQuantity(16);
4036 } else if (Ty->isVectorType()) {
4037 return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8);
4038 }
4039
4040 // For single-element float/vector structs, we consider the whole type
4041 // to have the same alignment requirements as its single element.
4042 const Type *AlignAsType = nullptr;
4043 const Type *EltType = isSingleElementStruct(Ty, getContext());
4044 if (EltType) {
4045 const BuiltinType *BT = EltType->getAs<BuiltinType>();
4046 if (IsQPXVectorTy(EltType) || (EltType->isVectorType() &&
4047 getContext().getTypeSize(EltType) == 128) ||
4048 (BT && BT->isFloatingPoint()))
4049 AlignAsType = EltType;
4050 }
4051
4052 // Likewise for ELFv2 homogeneous aggregates.
4053 const Type *Base = nullptr;
4054 uint64_t Members = 0;
4055 if (!AlignAsType && Kind == ELFv2 &&
4056 isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members))
4057 AlignAsType = Base;
4058
4059 // With special case aggregates, only vector base types need alignment.
4060 if (AlignAsType && IsQPXVectorTy(AlignAsType)) {
4061 if (getContext().getTypeSize(AlignAsType) > 128)
4062 return CharUnits::fromQuantity(32);
4063
4064 return CharUnits::fromQuantity(16);
4065 } else if (AlignAsType) {
4066 return CharUnits::fromQuantity(AlignAsType->isVectorType() ? 16 : 8);
4067 }
4068
4069 // Otherwise, we only need alignment for any aggregate type that
4070 // has an alignment requirement of >= 16 bytes.
4071 if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) {
4072 if (HasQPX && getContext().getTypeAlign(Ty) >= 256)
4073 return CharUnits::fromQuantity(32);
4074 return CharUnits::fromQuantity(16);
4075 }
4076
4077 return CharUnits::fromQuantity(8);
4078 }
4079
4080 /// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous
4081 /// aggregate. Base is set to the base element type, and Members is set
4082 /// to the number of base elements.
isHomogeneousAggregate(QualType Ty,const Type * & Base,uint64_t & Members) const4083 bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
4084 uint64_t &Members) const {
4085 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
4086 uint64_t NElements = AT->getSize().getZExtValue();
4087 if (NElements == 0)
4088 return false;
4089 if (!isHomogeneousAggregate(AT->getElementType(), Base, Members))
4090 return false;
4091 Members *= NElements;
4092 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
4093 const RecordDecl *RD = RT->getDecl();
4094 if (RD->hasFlexibleArrayMember())
4095 return false;
4096
4097 Members = 0;
4098
4099 // If this is a C++ record, check the bases first.
4100 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
4101 for (const auto &I : CXXRD->bases()) {
4102 // Ignore empty records.
4103 if (isEmptyRecord(getContext(), I.getType(), true))
4104 continue;
4105
4106 uint64_t FldMembers;
4107 if (!isHomogeneousAggregate(I.getType(), Base, FldMembers))
4108 return false;
4109
4110 Members += FldMembers;
4111 }
4112 }
4113
4114 for (const auto *FD : RD->fields()) {
4115 // Ignore (non-zero arrays of) empty records.
4116 QualType FT = FD->getType();
4117 while (const ConstantArrayType *AT =
4118 getContext().getAsConstantArrayType(FT)) {
4119 if (AT->getSize().getZExtValue() == 0)
4120 return false;
4121 FT = AT->getElementType();
4122 }
4123 if (isEmptyRecord(getContext(), FT, true))
4124 continue;
4125
4126 // For compatibility with GCC, ignore empty bitfields in C++ mode.
4127 if (getContext().getLangOpts().CPlusPlus &&
4128 FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
4129 continue;
4130
4131 uint64_t FldMembers;
4132 if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers))
4133 return false;
4134
4135 Members = (RD->isUnion() ?
4136 std::max(Members, FldMembers) : Members + FldMembers);
4137 }
4138
4139 if (!Base)
4140 return false;
4141
4142 // Ensure there is no padding.
4143 if (getContext().getTypeSize(Base) * Members !=
4144 getContext().getTypeSize(Ty))
4145 return false;
4146 } else {
4147 Members = 1;
4148 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
4149 Members = 2;
4150 Ty = CT->getElementType();
4151 }
4152
4153 // Most ABIs only support float, double, and some vector type widths.
4154 if (!isHomogeneousAggregateBaseType(Ty))
4155 return false;
4156
4157 // The base type must be the same for all members. Types that
4158 // agree in both total size and mode (float vs. vector) are
4159 // treated as being equivalent here.
4160 const Type *TyPtr = Ty.getTypePtr();
4161 if (!Base) {
4162 Base = TyPtr;
4163 // If it's a non-power-of-2 vector, its size is already a power-of-2,
4164 // so make sure to widen it explicitly.
4165 if (const VectorType *VT = Base->getAs<VectorType>()) {
4166 QualType EltTy = VT->getElementType();
4167 unsigned NumElements =
4168 getContext().getTypeSize(VT) / getContext().getTypeSize(EltTy);
4169 Base = getContext()
4170 .getVectorType(EltTy, NumElements, VT->getVectorKind())
4171 .getTypePtr();
4172 }
4173 }
4174
4175 if (Base->isVectorType() != TyPtr->isVectorType() ||
4176 getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr))
4177 return false;
4178 }
4179 return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members);
4180 }
4181
isHomogeneousAggregateBaseType(QualType Ty) const4182 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
4183 // Homogeneous aggregates for ELFv2 must have base types of float,
4184 // double, long double, or 128-bit vectors.
4185 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
4186 if (BT->getKind() == BuiltinType::Float ||
4187 BT->getKind() == BuiltinType::Double ||
4188 BT->getKind() == BuiltinType::LongDouble)
4189 return true;
4190 }
4191 if (const VectorType *VT = Ty->getAs<VectorType>()) {
4192 if (getContext().getTypeSize(VT) == 128 || IsQPXVectorTy(Ty))
4193 return true;
4194 }
4195 return false;
4196 }
4197
isHomogeneousAggregateSmallEnough(const Type * Base,uint64_t Members) const4198 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
4199 const Type *Base, uint64_t Members) const {
4200 // Vector types require one register, floating point types require one
4201 // or two registers depending on their size.
4202 uint32_t NumRegs =
4203 Base->isVectorType() ? 1 : (getContext().getTypeSize(Base) + 63) / 64;
4204
4205 // Homogeneous Aggregates may occupy at most 8 registers.
4206 return Members * NumRegs <= 8;
4207 }
4208
4209 ABIArgInfo
classifyArgumentType(QualType Ty) const4210 PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
4211 Ty = useFirstFieldIfTransparentUnion(Ty);
4212
4213 if (Ty->isAnyComplexType())
4214 return ABIArgInfo::getDirect();
4215
4216 // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes)
4217 // or via reference (larger than 16 bytes).
4218 if (Ty->isVectorType() && !IsQPXVectorTy(Ty)) {
4219 uint64_t Size = getContext().getTypeSize(Ty);
4220 if (Size > 128)
4221 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
4222 else if (Size < 128) {
4223 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
4224 return ABIArgInfo::getDirect(CoerceTy);
4225 }
4226 }
4227
4228 if (isAggregateTypeForABI(Ty)) {
4229 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
4230 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
4231
4232 uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity();
4233 uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
4234
4235 // ELFv2 homogeneous aggregates are passed as array types.
4236 const Type *Base = nullptr;
4237 uint64_t Members = 0;
4238 if (Kind == ELFv2 &&
4239 isHomogeneousAggregate(Ty, Base, Members)) {
4240 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
4241 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4242 return ABIArgInfo::getDirect(CoerceTy);
4243 }
4244
4245 // If an aggregate may end up fully in registers, we do not
4246 // use the ByVal method, but pass the aggregate as array.
4247 // This is usually beneficial since we avoid forcing the
4248 // back-end to store the argument to memory.
4249 uint64_t Bits = getContext().getTypeSize(Ty);
4250 if (Bits > 0 && Bits <= 8 * GPRBits) {
4251 llvm::Type *CoerceTy;
4252
4253 // Types up to 8 bytes are passed as integer type (which will be
4254 // properly aligned in the argument save area doubleword).
4255 if (Bits <= GPRBits)
4256 CoerceTy =
4257 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
4258 // Larger types are passed as arrays, with the base type selected
4259 // according to the required alignment in the save area.
4260 else {
4261 uint64_t RegBits = ABIAlign * 8;
4262 uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits;
4263 llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits);
4264 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
4265 }
4266
4267 return ABIArgInfo::getDirect(CoerceTy);
4268 }
4269
4270 // All other aggregates are passed ByVal.
4271 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
4272 /*ByVal=*/true,
4273 /*Realign=*/TyAlign > ABIAlign);
4274 }
4275
4276 return (isPromotableTypeForABI(Ty) ?
4277 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
4278 }
4279
4280 ABIArgInfo
classifyReturnType(QualType RetTy) const4281 PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
4282 if (RetTy->isVoidType())
4283 return ABIArgInfo::getIgnore();
4284
4285 if (RetTy->isAnyComplexType())
4286 return ABIArgInfo::getDirect();
4287
4288 // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes)
4289 // or via reference (larger than 16 bytes).
4290 if (RetTy->isVectorType() && !IsQPXVectorTy(RetTy)) {
4291 uint64_t Size = getContext().getTypeSize(RetTy);
4292 if (Size > 128)
4293 return getNaturalAlignIndirect(RetTy);
4294 else if (Size < 128) {
4295 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
4296 return ABIArgInfo::getDirect(CoerceTy);
4297 }
4298 }
4299
4300 if (isAggregateTypeForABI(RetTy)) {
4301 // ELFv2 homogeneous aggregates are returned as array types.
4302 const Type *Base = nullptr;
4303 uint64_t Members = 0;
4304 if (Kind == ELFv2 &&
4305 isHomogeneousAggregate(RetTy, Base, Members)) {
4306 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
4307 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4308 return ABIArgInfo::getDirect(CoerceTy);
4309 }
4310
4311 // ELFv2 small aggregates are returned in up to two registers.
4312 uint64_t Bits = getContext().getTypeSize(RetTy);
4313 if (Kind == ELFv2 && Bits <= 2 * GPRBits) {
4314 if (Bits == 0)
4315 return ABIArgInfo::getIgnore();
4316
4317 llvm::Type *CoerceTy;
4318 if (Bits > GPRBits) {
4319 CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits);
4320 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy, nullptr);
4321 } else
4322 CoerceTy =
4323 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
4324 return ABIArgInfo::getDirect(CoerceTy);
4325 }
4326
4327 // All other aggregates are returned indirectly.
4328 return getNaturalAlignIndirect(RetTy);
4329 }
4330
4331 return (isPromotableTypeForABI(RetTy) ?
4332 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
4333 }
4334
4335 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const4336 Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4337 QualType Ty) const {
4338 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
4339 TypeInfo.second = getParamTypeAlignment(Ty);
4340
4341 CharUnits SlotSize = CharUnits::fromQuantity(8);
4342
4343 // If we have a complex type and the base type is smaller than 8 bytes,
4344 // the ABI calls for the real and imaginary parts to be right-adjusted
4345 // in separate doublewords. However, Clang expects us to produce a
4346 // pointer to a structure with the two parts packed tightly. So generate
4347 // loads of the real and imaginary parts relative to the va_list pointer,
4348 // and store them to a temporary structure.
4349 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
4350 CharUnits EltSize = TypeInfo.first / 2;
4351 if (EltSize < SlotSize) {
4352 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty,
4353 SlotSize * 2, SlotSize,
4354 SlotSize, /*AllowHigher*/ true);
4355
4356 Address RealAddr = Addr;
4357 Address ImagAddr = RealAddr;
4358 if (CGF.CGM.getDataLayout().isBigEndian()) {
4359 RealAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr,
4360 SlotSize - EltSize);
4361 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(ImagAddr,
4362 2 * SlotSize - EltSize);
4363 } else {
4364 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize);
4365 }
4366
4367 llvm::Type *EltTy = CGF.ConvertTypeForMem(CTy->getElementType());
4368 RealAddr = CGF.Builder.CreateElementBitCast(RealAddr, EltTy);
4369 ImagAddr = CGF.Builder.CreateElementBitCast(ImagAddr, EltTy);
4370 llvm::Value *Real = CGF.Builder.CreateLoad(RealAddr, ".vareal");
4371 llvm::Value *Imag = CGF.Builder.CreateLoad(ImagAddr, ".vaimag");
4372
4373 Address Temp = CGF.CreateMemTemp(Ty, "vacplx");
4374 CGF.EmitStoreOfComplex({Real, Imag}, CGF.MakeAddrLValue(Temp, Ty),
4375 /*init*/ true);
4376 return Temp;
4377 }
4378 }
4379
4380 // Otherwise, just use the general rule.
4381 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
4382 TypeInfo, SlotSize, /*AllowHigher*/ true);
4383 }
4384
4385 static bool
PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address)4386 PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4387 llvm::Value *Address) {
4388 // This is calculated from the LLVM and GCC tables and verified
4389 // against gcc output. AFAIK all ABIs use the same encoding.
4390
4391 CodeGen::CGBuilderTy &Builder = CGF.Builder;
4392
4393 llvm::IntegerType *i8 = CGF.Int8Ty;
4394 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4395 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4396 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4397
4398 // 0-31: r0-31, the 8-byte general-purpose registers
4399 AssignToArrayRange(Builder, Address, Eight8, 0, 31);
4400
4401 // 32-63: fp0-31, the 8-byte floating-point registers
4402 AssignToArrayRange(Builder, Address, Eight8, 32, 63);
4403
4404 // 64-76 are various 4-byte special-purpose registers:
4405 // 64: mq
4406 // 65: lr
4407 // 66: ctr
4408 // 67: ap
4409 // 68-75 cr0-7
4410 // 76: xer
4411 AssignToArrayRange(Builder, Address, Four8, 64, 76);
4412
4413 // 77-108: v0-31, the 16-byte vector registers
4414 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
4415
4416 // 109: vrsave
4417 // 110: vscr
4418 // 111: spe_acc
4419 // 112: spefscr
4420 // 113: sfp
4421 AssignToArrayRange(Builder, Address, Four8, 109, 113);
4422
4423 return false;
4424 }
4425
4426 bool
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const4427 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
4428 CodeGen::CodeGenFunction &CGF,
4429 llvm::Value *Address) const {
4430
4431 return PPC64_initDwarfEHRegSizeTable(CGF, Address);
4432 }
4433
4434 bool
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const4435 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4436 llvm::Value *Address) const {
4437
4438 return PPC64_initDwarfEHRegSizeTable(CGF, Address);
4439 }
4440
4441 //===----------------------------------------------------------------------===//
4442 // AArch64 ABI Implementation
4443 //===----------------------------------------------------------------------===//
4444
4445 namespace {
4446
4447 class AArch64ABIInfo : public SwiftABIInfo {
4448 public:
4449 enum ABIKind {
4450 AAPCS = 0,
4451 DarwinPCS
4452 };
4453
4454 private:
4455 ABIKind Kind;
4456
4457 public:
AArch64ABIInfo(CodeGenTypes & CGT,ABIKind Kind)4458 AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind)
4459 : SwiftABIInfo(CGT), Kind(Kind) {}
4460
4461 private:
getABIKind() const4462 ABIKind getABIKind() const { return Kind; }
isDarwinPCS() const4463 bool isDarwinPCS() const { return Kind == DarwinPCS; }
4464
4465 ABIArgInfo classifyReturnType(QualType RetTy) const;
4466 ABIArgInfo classifyArgumentType(QualType RetTy) const;
4467 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
4468 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
4469 uint64_t Members) const override;
4470
4471 bool isIllegalVectorType(QualType Ty) const;
4472
computeInfo(CGFunctionInfo & FI) const4473 void computeInfo(CGFunctionInfo &FI) const override {
4474 if (!getCXXABI().classifyReturnType(FI))
4475 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4476
4477 for (auto &it : FI.arguments())
4478 it.info = classifyArgumentType(it.type);
4479 }
4480
4481 Address EmitDarwinVAArg(Address VAListAddr, QualType Ty,
4482 CodeGenFunction &CGF) const;
4483
4484 Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
4485 CodeGenFunction &CGF) const;
4486
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const4487 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4488 QualType Ty) const override {
4489 return isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
4490 : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
4491 }
4492
shouldPassIndirectlyForSwift(CharUnits totalSize,ArrayRef<llvm::Type * > scalars,bool asReturnValue) const4493 bool shouldPassIndirectlyForSwift(CharUnits totalSize,
4494 ArrayRef<llvm::Type*> scalars,
4495 bool asReturnValue) const override {
4496 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
4497 }
4498 };
4499
4500 class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
4501 public:
AArch64TargetCodeGenInfo(CodeGenTypes & CGT,AArch64ABIInfo::ABIKind Kind)4502 AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind)
4503 : TargetCodeGenInfo(new AArch64ABIInfo(CGT, Kind)) {}
4504
getARCRetainAutoreleasedReturnValueMarker() const4505 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
4506 return "mov\tfp, fp\t\t; marker for objc_retainAutoreleaseReturnValue";
4507 }
4508
getDwarfEHStackPointer(CodeGen::CodeGenModule & M) const4509 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4510 return 31;
4511 }
4512
doesReturnSlotInterfereWithArgs() const4513 bool doesReturnSlotInterfereWithArgs() const override { return false; }
4514 };
4515 }
4516
classifyArgumentType(QualType Ty) const4517 ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const {
4518 Ty = useFirstFieldIfTransparentUnion(Ty);
4519
4520 // Handle illegal vector types here.
4521 if (isIllegalVectorType(Ty)) {
4522 uint64_t Size = getContext().getTypeSize(Ty);
4523 // Android promotes <2 x i8> to i16, not i32
4524 if (isAndroid() && (Size <= 16)) {
4525 llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext());
4526 return ABIArgInfo::getDirect(ResType);
4527 }
4528 if (Size <= 32) {
4529 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
4530 return ABIArgInfo::getDirect(ResType);
4531 }
4532 if (Size == 64) {
4533 llvm::Type *ResType =
4534 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
4535 return ABIArgInfo::getDirect(ResType);
4536 }
4537 if (Size == 128) {
4538 llvm::Type *ResType =
4539 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
4540 return ABIArgInfo::getDirect(ResType);
4541 }
4542 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
4543 }
4544
4545 if (!isAggregateTypeForABI(Ty)) {
4546 // Treat an enum type as its underlying type.
4547 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4548 Ty = EnumTy->getDecl()->getIntegerType();
4549
4550 return (Ty->isPromotableIntegerType() && isDarwinPCS()
4551 ? ABIArgInfo::getExtend()
4552 : ABIArgInfo::getDirect());
4553 }
4554
4555 // Structures with either a non-trivial destructor or a non-trivial
4556 // copy constructor are always indirect.
4557 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
4558 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
4559 CGCXXABI::RAA_DirectInMemory);
4560 }
4561
4562 // Empty records are always ignored on Darwin, but actually passed in C++ mode
4563 // elsewhere for GNU compatibility.
4564 if (isEmptyRecord(getContext(), Ty, true)) {
4565 if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS())
4566 return ABIArgInfo::getIgnore();
4567
4568 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
4569 }
4570
4571 // Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
4572 const Type *Base = nullptr;
4573 uint64_t Members = 0;
4574 if (isHomogeneousAggregate(Ty, Base, Members)) {
4575 return ABIArgInfo::getDirect(
4576 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members));
4577 }
4578
4579 // Aggregates <= 16 bytes are passed directly in registers or on the stack.
4580 uint64_t Size = getContext().getTypeSize(Ty);
4581 if (Size <= 128) {
4582 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
4583 // same size and alignment.
4584 if (getTarget().isRenderScriptTarget()) {
4585 return coerceToIntArray(Ty, getContext(), getVMContext());
4586 }
4587 unsigned Alignment = getContext().getTypeAlign(Ty);
4588 Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes
4589
4590 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
4591 // For aggregates with 16-byte alignment, we use i128.
4592 if (Alignment < 128 && Size == 128) {
4593 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
4594 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
4595 }
4596 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
4597 }
4598
4599 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
4600 }
4601
classifyReturnType(QualType RetTy) const4602 ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const {
4603 if (RetTy->isVoidType())
4604 return ABIArgInfo::getIgnore();
4605
4606 // Large vector types should be returned via memory.
4607 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
4608 return getNaturalAlignIndirect(RetTy);
4609
4610 if (!isAggregateTypeForABI(RetTy)) {
4611 // Treat an enum type as its underlying type.
4612 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
4613 RetTy = EnumTy->getDecl()->getIntegerType();
4614
4615 return (RetTy->isPromotableIntegerType() && isDarwinPCS()
4616 ? ABIArgInfo::getExtend()
4617 : ABIArgInfo::getDirect());
4618 }
4619
4620 if (isEmptyRecord(getContext(), RetTy, true))
4621 return ABIArgInfo::getIgnore();
4622
4623 const Type *Base = nullptr;
4624 uint64_t Members = 0;
4625 if (isHomogeneousAggregate(RetTy, Base, Members))
4626 // Homogeneous Floating-point Aggregates (HFAs) are returned directly.
4627 return ABIArgInfo::getDirect();
4628
4629 // Aggregates <= 16 bytes are returned directly in registers or on the stack.
4630 uint64_t Size = getContext().getTypeSize(RetTy);
4631 if (Size <= 128) {
4632 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
4633 // same size and alignment.
4634 if (getTarget().isRenderScriptTarget()) {
4635 return coerceToIntArray(RetTy, getContext(), getVMContext());
4636 }
4637 unsigned Alignment = getContext().getTypeAlign(RetTy);
4638 Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes
4639
4640 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
4641 // For aggregates with 16-byte alignment, we use i128.
4642 if (Alignment < 128 && Size == 128) {
4643 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
4644 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
4645 }
4646 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
4647 }
4648
4649 return getNaturalAlignIndirect(RetTy);
4650 }
4651
4652 /// isIllegalVectorType - check whether the vector type is legal for AArch64.
isIllegalVectorType(QualType Ty) const4653 bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const {
4654 if (const VectorType *VT = Ty->getAs<VectorType>()) {
4655 // Check whether VT is legal.
4656 unsigned NumElements = VT->getNumElements();
4657 uint64_t Size = getContext().getTypeSize(VT);
4658 // NumElements should be power of 2.
4659 if (!llvm::isPowerOf2_32(NumElements))
4660 return true;
4661 return Size != 64 && (Size != 128 || NumElements == 1);
4662 }
4663 return false;
4664 }
4665
isHomogeneousAggregateBaseType(QualType Ty) const4666 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
4667 // Homogeneous aggregates for AAPCS64 must have base types of a floating
4668 // point type or a short-vector type. This is the same as the 32-bit ABI,
4669 // but with the difference that any floating-point type is allowed,
4670 // including __fp16.
4671 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
4672 if (BT->isFloatingPoint())
4673 return true;
4674 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
4675 unsigned VecSize = getContext().getTypeSize(VT);
4676 if (VecSize == 64 || VecSize == 128)
4677 return true;
4678 }
4679 return false;
4680 }
4681
isHomogeneousAggregateSmallEnough(const Type * Base,uint64_t Members) const4682 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
4683 uint64_t Members) const {
4684 return Members <= 4;
4685 }
4686
EmitAAPCSVAArg(Address VAListAddr,QualType Ty,CodeGenFunction & CGF) const4687 Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr,
4688 QualType Ty,
4689 CodeGenFunction &CGF) const {
4690 ABIArgInfo AI = classifyArgumentType(Ty);
4691 bool IsIndirect = AI.isIndirect();
4692
4693 llvm::Type *BaseTy = CGF.ConvertType(Ty);
4694 if (IsIndirect)
4695 BaseTy = llvm::PointerType::getUnqual(BaseTy);
4696 else if (AI.getCoerceToType())
4697 BaseTy = AI.getCoerceToType();
4698
4699 unsigned NumRegs = 1;
4700 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
4701 BaseTy = ArrTy->getElementType();
4702 NumRegs = ArrTy->getNumElements();
4703 }
4704 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
4705
4706 // The AArch64 va_list type and handling is specified in the Procedure Call
4707 // Standard, section B.4:
4708 //
4709 // struct {
4710 // void *__stack;
4711 // void *__gr_top;
4712 // void *__vr_top;
4713 // int __gr_offs;
4714 // int __vr_offs;
4715 // };
4716
4717 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
4718 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
4719 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
4720 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
4721
4722 auto TyInfo = getContext().getTypeInfoInChars(Ty);
4723 CharUnits TyAlign = TyInfo.second;
4724
4725 Address reg_offs_p = Address::invalid();
4726 llvm::Value *reg_offs = nullptr;
4727 int reg_top_index;
4728 CharUnits reg_top_offset;
4729 int RegSize = IsIndirect ? 8 : TyInfo.first.getQuantity();
4730 if (!IsFPR) {
4731 // 3 is the field number of __gr_offs
4732 reg_offs_p =
4733 CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(24),
4734 "gr_offs_p");
4735 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
4736 reg_top_index = 1; // field number for __gr_top
4737 reg_top_offset = CharUnits::fromQuantity(8);
4738 RegSize = llvm::alignTo(RegSize, 8);
4739 } else {
4740 // 4 is the field number of __vr_offs.
4741 reg_offs_p =
4742 CGF.Builder.CreateStructGEP(VAListAddr, 4, CharUnits::fromQuantity(28),
4743 "vr_offs_p");
4744 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
4745 reg_top_index = 2; // field number for __vr_top
4746 reg_top_offset = CharUnits::fromQuantity(16);
4747 RegSize = 16 * NumRegs;
4748 }
4749
4750 //=======================================
4751 // Find out where argument was passed
4752 //=======================================
4753
4754 // If reg_offs >= 0 we're already using the stack for this type of
4755 // argument. We don't want to keep updating reg_offs (in case it overflows,
4756 // though anyone passing 2GB of arguments, each at most 16 bytes, deserves
4757 // whatever they get).
4758 llvm::Value *UsingStack = nullptr;
4759 UsingStack = CGF.Builder.CreateICmpSGE(
4760 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0));
4761
4762 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
4763
4764 // Otherwise, at least some kind of argument could go in these registers, the
4765 // question is whether this particular type is too big.
4766 CGF.EmitBlock(MaybeRegBlock);
4767
4768 // Integer arguments may need to correct register alignment (for example a
4769 // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
4770 // align __gr_offs to calculate the potential address.
4771 if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) {
4772 int Align = TyAlign.getQuantity();
4773
4774 reg_offs = CGF.Builder.CreateAdd(
4775 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
4776 "align_regoffs");
4777 reg_offs = CGF.Builder.CreateAnd(
4778 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align),
4779 "aligned_regoffs");
4780 }
4781
4782 // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
4783 // The fact that this is done unconditionally reflects the fact that
4784 // allocating an argument to the stack also uses up all the remaining
4785 // registers of the appropriate kind.
4786 llvm::Value *NewOffset = nullptr;
4787 NewOffset = CGF.Builder.CreateAdd(
4788 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs");
4789 CGF.Builder.CreateStore(NewOffset, reg_offs_p);
4790
4791 // Now we're in a position to decide whether this argument really was in
4792 // registers or not.
4793 llvm::Value *InRegs = nullptr;
4794 InRegs = CGF.Builder.CreateICmpSLE(
4795 NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg");
4796
4797 CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
4798
4799 //=======================================
4800 // Argument was in registers
4801 //=======================================
4802
4803 // Now we emit the code for if the argument was originally passed in
4804 // registers. First start the appropriate block:
4805 CGF.EmitBlock(InRegBlock);
4806
4807 llvm::Value *reg_top = nullptr;
4808 Address reg_top_p = CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index,
4809 reg_top_offset, "reg_top_p");
4810 reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
4811 Address BaseAddr(CGF.Builder.CreateInBoundsGEP(reg_top, reg_offs),
4812 CharUnits::fromQuantity(IsFPR ? 16 : 8));
4813 Address RegAddr = Address::invalid();
4814 llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty);
4815
4816 if (IsIndirect) {
4817 // If it's been passed indirectly (actually a struct), whatever we find from
4818 // stored registers or on the stack will actually be a struct **.
4819 MemTy = llvm::PointerType::getUnqual(MemTy);
4820 }
4821
4822 const Type *Base = nullptr;
4823 uint64_t NumMembers = 0;
4824 bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
4825 if (IsHFA && NumMembers > 1) {
4826 // Homogeneous aggregates passed in registers will have their elements split
4827 // and stored 16-bytes apart regardless of size (they're notionally in qN,
4828 // qN+1, ...). We reload and store into a temporary local variable
4829 // contiguously.
4830 assert(!IsIndirect && "Homogeneous aggregates should be passed directly");
4831 auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0));
4832 llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
4833 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
4834 Address Tmp = CGF.CreateTempAlloca(HFATy,
4835 std::max(TyAlign, BaseTyInfo.second));
4836
4837 // On big-endian platforms, the value will be right-aligned in its slot.
4838 int Offset = 0;
4839 if (CGF.CGM.getDataLayout().isBigEndian() &&
4840 BaseTyInfo.first.getQuantity() < 16)
4841 Offset = 16 - BaseTyInfo.first.getQuantity();
4842
4843 for (unsigned i = 0; i < NumMembers; ++i) {
4844 CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset);
4845 Address LoadAddr =
4846 CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset);
4847 LoadAddr = CGF.Builder.CreateElementBitCast(LoadAddr, BaseTy);
4848
4849 Address StoreAddr =
4850 CGF.Builder.CreateConstArrayGEP(Tmp, i, BaseTyInfo.first);
4851
4852 llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
4853 CGF.Builder.CreateStore(Elem, StoreAddr);
4854 }
4855
4856 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, MemTy);
4857 } else {
4858 // Otherwise the object is contiguous in memory.
4859
4860 // It might be right-aligned in its slot.
4861 CharUnits SlotSize = BaseAddr.getAlignment();
4862 if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect &&
4863 (IsHFA || !isAggregateTypeForABI(Ty)) &&
4864 TyInfo.first < SlotSize) {
4865 CharUnits Offset = SlotSize - TyInfo.first;
4866 BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset);
4867 }
4868
4869 RegAddr = CGF.Builder.CreateElementBitCast(BaseAddr, MemTy);
4870 }
4871
4872 CGF.EmitBranch(ContBlock);
4873
4874 //=======================================
4875 // Argument was on the stack
4876 //=======================================
4877 CGF.EmitBlock(OnStackBlock);
4878
4879 Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0,
4880 CharUnits::Zero(), "stack_p");
4881 llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack");
4882
4883 // Again, stack arguments may need realignment. In this case both integer and
4884 // floating-point ones might be affected.
4885 if (!IsIndirect && TyAlign.getQuantity() > 8) {
4886 int Align = TyAlign.getQuantity();
4887
4888 OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty);
4889
4890 OnStackPtr = CGF.Builder.CreateAdd(
4891 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
4892 "align_stack");
4893 OnStackPtr = CGF.Builder.CreateAnd(
4894 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
4895 "align_stack");
4896
4897 OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy);
4898 }
4899 Address OnStackAddr(OnStackPtr,
4900 std::max(CharUnits::fromQuantity(8), TyAlign));
4901
4902 // All stack slots are multiples of 8 bytes.
4903 CharUnits StackSlotSize = CharUnits::fromQuantity(8);
4904 CharUnits StackSize;
4905 if (IsIndirect)
4906 StackSize = StackSlotSize;
4907 else
4908 StackSize = TyInfo.first.alignTo(StackSlotSize);
4909
4910 llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize);
4911 llvm::Value *NewStack =
4912 CGF.Builder.CreateInBoundsGEP(OnStackPtr, StackSizeC, "new_stack");
4913
4914 // Write the new value of __stack for the next call to va_arg
4915 CGF.Builder.CreateStore(NewStack, stack_p);
4916
4917 if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
4918 TyInfo.first < StackSlotSize) {
4919 CharUnits Offset = StackSlotSize - TyInfo.first;
4920 OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset);
4921 }
4922
4923 OnStackAddr = CGF.Builder.CreateElementBitCast(OnStackAddr, MemTy);
4924
4925 CGF.EmitBranch(ContBlock);
4926
4927 //=======================================
4928 // Tidy up
4929 //=======================================
4930 CGF.EmitBlock(ContBlock);
4931
4932 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock,
4933 OnStackAddr, OnStackBlock, "vaargs.addr");
4934
4935 if (IsIndirect)
4936 return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"),
4937 TyInfo.second);
4938
4939 return ResAddr;
4940 }
4941
EmitDarwinVAArg(Address VAListAddr,QualType Ty,CodeGenFunction & CGF) const4942 Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty,
4943 CodeGenFunction &CGF) const {
4944 // The backend's lowering doesn't support va_arg for aggregates or
4945 // illegal vector types. Lower VAArg here for these cases and use
4946 // the LLVM va_arg instruction for everything else.
4947 if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty))
4948 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
4949
4950 CharUnits SlotSize = CharUnits::fromQuantity(8);
4951
4952 // Empty records are ignored for parameter passing purposes.
4953 if (isEmptyRecord(getContext(), Ty, true)) {
4954 Address Addr(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize);
4955 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
4956 return Addr;
4957 }
4958
4959 // The size of the actual thing passed, which might end up just
4960 // being a pointer for indirect types.
4961 auto TyInfo = getContext().getTypeInfoInChars(Ty);
4962
4963 // Arguments bigger than 16 bytes which aren't homogeneous
4964 // aggregates should be passed indirectly.
4965 bool IsIndirect = false;
4966 if (TyInfo.first.getQuantity() > 16) {
4967 const Type *Base = nullptr;
4968 uint64_t Members = 0;
4969 IsIndirect = !isHomogeneousAggregate(Ty, Base, Members);
4970 }
4971
4972 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
4973 TyInfo, SlotSize, /*AllowHigherAlign*/ true);
4974 }
4975
4976 //===----------------------------------------------------------------------===//
4977 // ARM ABI Implementation
4978 //===----------------------------------------------------------------------===//
4979
4980 namespace {
4981
4982 class ARMABIInfo : public SwiftABIInfo {
4983 public:
4984 enum ABIKind {
4985 APCS = 0,
4986 AAPCS = 1,
4987 AAPCS_VFP = 2,
4988 AAPCS16_VFP = 3,
4989 };
4990
4991 private:
4992 ABIKind Kind;
4993
4994 public:
ARMABIInfo(CodeGenTypes & CGT,ABIKind _Kind)4995 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind)
4996 : SwiftABIInfo(CGT), Kind(_Kind) {
4997 setCCs();
4998 }
4999
isEABI() const5000 bool isEABI() const {
5001 switch (getTarget().getTriple().getEnvironment()) {
5002 case llvm::Triple::Android:
5003 case llvm::Triple::EABI:
5004 case llvm::Triple::EABIHF:
5005 case llvm::Triple::GNUEABI:
5006 case llvm::Triple::GNUEABIHF:
5007 case llvm::Triple::MuslEABI:
5008 case llvm::Triple::MuslEABIHF:
5009 return true;
5010 default:
5011 return false;
5012 }
5013 }
5014
isEABIHF() const5015 bool isEABIHF() const {
5016 switch (getTarget().getTriple().getEnvironment()) {
5017 case llvm::Triple::EABIHF:
5018 case llvm::Triple::GNUEABIHF:
5019 case llvm::Triple::MuslEABIHF:
5020 return true;
5021 default:
5022 return false;
5023 }
5024 }
5025
getABIKind() const5026 ABIKind getABIKind() const { return Kind; }
5027
5028 private:
5029 ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic) const;
5030 ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic) const;
5031 bool isIllegalVectorType(QualType Ty) const;
5032
5033 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
5034 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
5035 uint64_t Members) const override;
5036
5037 void computeInfo(CGFunctionInfo &FI) const override;
5038
5039 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5040 QualType Ty) const override;
5041
5042 llvm::CallingConv::ID getLLVMDefaultCC() const;
5043 llvm::CallingConv::ID getABIDefaultCC() const;
5044 void setCCs();
5045
shouldPassIndirectlyForSwift(CharUnits totalSize,ArrayRef<llvm::Type * > scalars,bool asReturnValue) const5046 bool shouldPassIndirectlyForSwift(CharUnits totalSize,
5047 ArrayRef<llvm::Type*> scalars,
5048 bool asReturnValue) const override {
5049 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
5050 }
5051 };
5052
5053 class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
5054 public:
ARMTargetCodeGenInfo(CodeGenTypes & CGT,ARMABIInfo::ABIKind K)5055 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
5056 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {}
5057
getABIInfo() const5058 const ARMABIInfo &getABIInfo() const {
5059 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo());
5060 }
5061
getDwarfEHStackPointer(CodeGen::CodeGenModule & M) const5062 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
5063 return 13;
5064 }
5065
getARCRetainAutoreleasedReturnValueMarker() const5066 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
5067 return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue";
5068 }
5069
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const5070 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
5071 llvm::Value *Address) const override {
5072 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
5073
5074 // 0-15 are the 16 integer registers.
5075 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15);
5076 return false;
5077 }
5078
getSizeOfUnwindException() const5079 unsigned getSizeOfUnwindException() const override {
5080 if (getABIInfo().isEABI()) return 88;
5081 return TargetCodeGenInfo::getSizeOfUnwindException();
5082 }
5083
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const5084 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5085 CodeGen::CodeGenModule &CGM) const override {
5086 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
5087 if (!FD)
5088 return;
5089
5090 const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>();
5091 if (!Attr)
5092 return;
5093
5094 const char *Kind;
5095 switch (Attr->getInterrupt()) {
5096 case ARMInterruptAttr::Generic: Kind = ""; break;
5097 case ARMInterruptAttr::IRQ: Kind = "IRQ"; break;
5098 case ARMInterruptAttr::FIQ: Kind = "FIQ"; break;
5099 case ARMInterruptAttr::SWI: Kind = "SWI"; break;
5100 case ARMInterruptAttr::ABORT: Kind = "ABORT"; break;
5101 case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break;
5102 }
5103
5104 llvm::Function *Fn = cast<llvm::Function>(GV);
5105
5106 Fn->addFnAttr("interrupt", Kind);
5107
5108 ARMABIInfo::ABIKind ABI = cast<ARMABIInfo>(getABIInfo()).getABIKind();
5109 if (ABI == ARMABIInfo::APCS)
5110 return;
5111
5112 // AAPCS guarantees that sp will be 8-byte aligned on any public interface,
5113 // however this is not necessarily true on taking any interrupt. Instruct
5114 // the backend to perform a realignment as part of the function prologue.
5115 llvm::AttrBuilder B;
5116 B.addStackAlignmentAttr(8);
5117 Fn->addAttributes(llvm::AttributeSet::FunctionIndex,
5118 llvm::AttributeSet::get(CGM.getLLVMContext(),
5119 llvm::AttributeSet::FunctionIndex,
5120 B));
5121 }
5122 };
5123
5124 class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo {
5125 public:
WindowsARMTargetCodeGenInfo(CodeGenTypes & CGT,ARMABIInfo::ABIKind K)5126 WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
5127 : ARMTargetCodeGenInfo(CGT, K) {}
5128
5129 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5130 CodeGen::CodeGenModule &CGM) const override;
5131
getDependentLibraryOption(llvm::StringRef Lib,llvm::SmallString<24> & Opt) const5132 void getDependentLibraryOption(llvm::StringRef Lib,
5133 llvm::SmallString<24> &Opt) const override {
5134 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
5135 }
5136
getDetectMismatchOption(llvm::StringRef Name,llvm::StringRef Value,llvm::SmallString<32> & Opt) const5137 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
5138 llvm::SmallString<32> &Opt) const override {
5139 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
5140 }
5141 };
5142
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const5143 void WindowsARMTargetCodeGenInfo::setTargetAttributes(
5144 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
5145 ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
5146 addStackProbeSizeTargetAttribute(D, GV, CGM);
5147 }
5148 }
5149
computeInfo(CGFunctionInfo & FI) const5150 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
5151 if (!getCXXABI().classifyReturnType(FI))
5152 FI.getReturnInfo() =
5153 classifyReturnType(FI.getReturnType(), FI.isVariadic());
5154
5155 for (auto &I : FI.arguments())
5156 I.info = classifyArgumentType(I.type, FI.isVariadic());
5157
5158 // Always honor user-specified calling convention.
5159 if (FI.getCallingConvention() != llvm::CallingConv::C)
5160 return;
5161
5162 llvm::CallingConv::ID cc = getRuntimeCC();
5163 if (cc != llvm::CallingConv::C)
5164 FI.setEffectiveCallingConvention(cc);
5165 }
5166
5167 /// Return the default calling convention that LLVM will use.
getLLVMDefaultCC() const5168 llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const {
5169 // The default calling convention that LLVM will infer.
5170 if (isEABIHF() || getTarget().getTriple().isWatchABI())
5171 return llvm::CallingConv::ARM_AAPCS_VFP;
5172 else if (isEABI())
5173 return llvm::CallingConv::ARM_AAPCS;
5174 else
5175 return llvm::CallingConv::ARM_APCS;
5176 }
5177
5178 /// Return the calling convention that our ABI would like us to use
5179 /// as the C calling convention.
getABIDefaultCC() const5180 llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const {
5181 switch (getABIKind()) {
5182 case APCS: return llvm::CallingConv::ARM_APCS;
5183 case AAPCS: return llvm::CallingConv::ARM_AAPCS;
5184 case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
5185 case AAPCS16_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
5186 }
5187 llvm_unreachable("bad ABI kind");
5188 }
5189
setCCs()5190 void ARMABIInfo::setCCs() {
5191 assert(getRuntimeCC() == llvm::CallingConv::C);
5192
5193 // Don't muddy up the IR with a ton of explicit annotations if
5194 // they'd just match what LLVM will infer from the triple.
5195 llvm::CallingConv::ID abiCC = getABIDefaultCC();
5196 if (abiCC != getLLVMDefaultCC())
5197 RuntimeCC = abiCC;
5198
5199 // AAPCS apparently requires runtime support functions to be soft-float, but
5200 // that's almost certainly for historic reasons (Thumb1 not supporting VFP
5201 // most likely). It's more convenient for AAPCS16_VFP to be hard-float.
5202 switch (getABIKind()) {
5203 case APCS:
5204 case AAPCS16_VFP:
5205 if (abiCC != getLLVMDefaultCC())
5206 BuiltinCC = abiCC;
5207 break;
5208 case AAPCS:
5209 case AAPCS_VFP:
5210 BuiltinCC = llvm::CallingConv::ARM_AAPCS;
5211 break;
5212 }
5213 }
5214
classifyArgumentType(QualType Ty,bool isVariadic) const5215 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
5216 bool isVariadic) const {
5217 // 6.1.2.1 The following argument types are VFP CPRCs:
5218 // A single-precision floating-point type (including promoted
5219 // half-precision types); A double-precision floating-point type;
5220 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate
5221 // with a Base Type of a single- or double-precision floating-point type,
5222 // 64-bit containerized vectors or 128-bit containerized vectors with one
5223 // to four Elements.
5224 bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic;
5225
5226 Ty = useFirstFieldIfTransparentUnion(Ty);
5227
5228 // Handle illegal vector types here.
5229 if (isIllegalVectorType(Ty)) {
5230 uint64_t Size = getContext().getTypeSize(Ty);
5231 if (Size <= 32) {
5232 llvm::Type *ResType =
5233 llvm::Type::getInt32Ty(getVMContext());
5234 return ABIArgInfo::getDirect(ResType);
5235 }
5236 if (Size == 64) {
5237 llvm::Type *ResType = llvm::VectorType::get(
5238 llvm::Type::getInt32Ty(getVMContext()), 2);
5239 return ABIArgInfo::getDirect(ResType);
5240 }
5241 if (Size == 128) {
5242 llvm::Type *ResType = llvm::VectorType::get(
5243 llvm::Type::getInt32Ty(getVMContext()), 4);
5244 return ABIArgInfo::getDirect(ResType);
5245 }
5246 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
5247 }
5248
5249 // __fp16 gets passed as if it were an int or float, but with the top 16 bits
5250 // unspecified. This is not done for OpenCL as it handles the half type
5251 // natively, and does not need to interwork with AAPCS code.
5252 if (Ty->isHalfType() && !getContext().getLangOpts().NativeHalfArgsAndReturns) {
5253 llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
5254 llvm::Type::getFloatTy(getVMContext()) :
5255 llvm::Type::getInt32Ty(getVMContext());
5256 return ABIArgInfo::getDirect(ResType);
5257 }
5258
5259 if (!isAggregateTypeForABI(Ty)) {
5260 // Treat an enum type as its underlying type.
5261 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
5262 Ty = EnumTy->getDecl()->getIntegerType();
5263 }
5264
5265 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend()
5266 : ABIArgInfo::getDirect());
5267 }
5268
5269 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
5270 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
5271 }
5272
5273 // Ignore empty records.
5274 if (isEmptyRecord(getContext(), Ty, true))
5275 return ABIArgInfo::getIgnore();
5276
5277 if (IsEffectivelyAAPCS_VFP) {
5278 // Homogeneous Aggregates need to be expanded when we can fit the aggregate
5279 // into VFP registers.
5280 const Type *Base = nullptr;
5281 uint64_t Members = 0;
5282 if (isHomogeneousAggregate(Ty, Base, Members)) {
5283 assert(Base && "Base class should be set for homogeneous aggregate");
5284 // Base can be a floating-point or a vector.
5285 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
5286 }
5287 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
5288 // WatchOS does have homogeneous aggregates. Note that we intentionally use
5289 // this convention even for a variadic function: the backend will use GPRs
5290 // if needed.
5291 const Type *Base = nullptr;
5292 uint64_t Members = 0;
5293 if (isHomogeneousAggregate(Ty, Base, Members)) {
5294 assert(Base && Members <= 4 && "unexpected homogeneous aggregate");
5295 llvm::Type *Ty =
5296 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members);
5297 return ABIArgInfo::getDirect(Ty, 0, nullptr, false);
5298 }
5299 }
5300
5301 if (getABIKind() == ARMABIInfo::AAPCS16_VFP &&
5302 getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(16)) {
5303 // WatchOS is adopting the 64-bit AAPCS rule on composite types: if they're
5304 // bigger than 128-bits, they get placed in space allocated by the caller,
5305 // and a pointer is passed.
5306 return ABIArgInfo::getIndirect(
5307 CharUnits::fromQuantity(getContext().getTypeAlign(Ty) / 8), false);
5308 }
5309
5310 // Support byval for ARM.
5311 // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at
5312 // most 8-byte. We realign the indirect argument if type alignment is bigger
5313 // than ABI alignment.
5314 uint64_t ABIAlign = 4;
5315 uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8;
5316 if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
5317 getABIKind() == ARMABIInfo::AAPCS)
5318 ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
5319
5320 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) {
5321 assert(getABIKind() != ARMABIInfo::AAPCS16_VFP && "unexpected byval");
5322 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
5323 /*ByVal=*/true,
5324 /*Realign=*/TyAlign > ABIAlign);
5325 }
5326
5327 // On RenderScript, coerce Aggregates <= 64 bytes to an integer array of
5328 // same size and alignment.
5329 if (getTarget().isRenderScriptTarget()) {
5330 return coerceToIntArray(Ty, getContext(), getVMContext());
5331 }
5332
5333 // Otherwise, pass by coercing to a structure of the appropriate size.
5334 llvm::Type* ElemTy;
5335 unsigned SizeRegs;
5336 // FIXME: Try to match the types of the arguments more accurately where
5337 // we can.
5338 if (getContext().getTypeAlign(Ty) <= 32) {
5339 ElemTy = llvm::Type::getInt32Ty(getVMContext());
5340 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
5341 } else {
5342 ElemTy = llvm::Type::getInt64Ty(getVMContext());
5343 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
5344 }
5345
5346 return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs));
5347 }
5348
isIntegerLikeType(QualType Ty,ASTContext & Context,llvm::LLVMContext & VMContext)5349 static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
5350 llvm::LLVMContext &VMContext) {
5351 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
5352 // is called integer-like if its size is less than or equal to one word, and
5353 // the offset of each of its addressable sub-fields is zero.
5354
5355 uint64_t Size = Context.getTypeSize(Ty);
5356
5357 // Check that the type fits in a word.
5358 if (Size > 32)
5359 return false;
5360
5361 // FIXME: Handle vector types!
5362 if (Ty->isVectorType())
5363 return false;
5364
5365 // Float types are never treated as "integer like".
5366 if (Ty->isRealFloatingType())
5367 return false;
5368
5369 // If this is a builtin or pointer type then it is ok.
5370 if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
5371 return true;
5372
5373 // Small complex integer types are "integer like".
5374 if (const ComplexType *CT = Ty->getAs<ComplexType>())
5375 return isIntegerLikeType(CT->getElementType(), Context, VMContext);
5376
5377 // Single element and zero sized arrays should be allowed, by the definition
5378 // above, but they are not.
5379
5380 // Otherwise, it must be a record type.
5381 const RecordType *RT = Ty->getAs<RecordType>();
5382 if (!RT) return false;
5383
5384 // Ignore records with flexible arrays.
5385 const RecordDecl *RD = RT->getDecl();
5386 if (RD->hasFlexibleArrayMember())
5387 return false;
5388
5389 // Check that all sub-fields are at offset 0, and are themselves "integer
5390 // like".
5391 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
5392
5393 bool HadField = false;
5394 unsigned idx = 0;
5395 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
5396 i != e; ++i, ++idx) {
5397 const FieldDecl *FD = *i;
5398
5399 // Bit-fields are not addressable, we only need to verify they are "integer
5400 // like". We still have to disallow a subsequent non-bitfield, for example:
5401 // struct { int : 0; int x }
5402 // is non-integer like according to gcc.
5403 if (FD->isBitField()) {
5404 if (!RD->isUnion())
5405 HadField = true;
5406
5407 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
5408 return false;
5409
5410 continue;
5411 }
5412
5413 // Check if this field is at offset 0.
5414 if (Layout.getFieldOffset(idx) != 0)
5415 return false;
5416
5417 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
5418 return false;
5419
5420 // Only allow at most one field in a structure. This doesn't match the
5421 // wording above, but follows gcc in situations with a field following an
5422 // empty structure.
5423 if (!RD->isUnion()) {
5424 if (HadField)
5425 return false;
5426
5427 HadField = true;
5428 }
5429 }
5430
5431 return true;
5432 }
5433
classifyReturnType(QualType RetTy,bool isVariadic) const5434 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
5435 bool isVariadic) const {
5436 bool IsEffectivelyAAPCS_VFP =
5437 (getABIKind() == AAPCS_VFP || getABIKind() == AAPCS16_VFP) && !isVariadic;
5438
5439 if (RetTy->isVoidType())
5440 return ABIArgInfo::getIgnore();
5441
5442 // Large vector types should be returned via memory.
5443 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) {
5444 return getNaturalAlignIndirect(RetTy);
5445 }
5446
5447 // __fp16 gets returned as if it were an int or float, but with the top 16
5448 // bits unspecified. This is not done for OpenCL as it handles the half type
5449 // natively, and does not need to interwork with AAPCS code.
5450 if (RetTy->isHalfType() && !getContext().getLangOpts().NativeHalfArgsAndReturns) {
5451 llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
5452 llvm::Type::getFloatTy(getVMContext()) :
5453 llvm::Type::getInt32Ty(getVMContext());
5454 return ABIArgInfo::getDirect(ResType);
5455 }
5456
5457 if (!isAggregateTypeForABI(RetTy)) {
5458 // Treat an enum type as its underlying type.
5459 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
5460 RetTy = EnumTy->getDecl()->getIntegerType();
5461
5462 return RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend()
5463 : ABIArgInfo::getDirect();
5464 }
5465
5466 // Are we following APCS?
5467 if (getABIKind() == APCS) {
5468 if (isEmptyRecord(getContext(), RetTy, false))
5469 return ABIArgInfo::getIgnore();
5470
5471 // Complex types are all returned as packed integers.
5472 //
5473 // FIXME: Consider using 2 x vector types if the back end handles them
5474 // correctly.
5475 if (RetTy->isAnyComplexType())
5476 return ABIArgInfo::getDirect(llvm::IntegerType::get(
5477 getVMContext(), getContext().getTypeSize(RetTy)));
5478
5479 // Integer like structures are returned in r0.
5480 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
5481 // Return in the smallest viable integer type.
5482 uint64_t Size = getContext().getTypeSize(RetTy);
5483 if (Size <= 8)
5484 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
5485 if (Size <= 16)
5486 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
5487 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
5488 }
5489
5490 // Otherwise return in memory.
5491 return getNaturalAlignIndirect(RetTy);
5492 }
5493
5494 // Otherwise this is an AAPCS variant.
5495
5496 if (isEmptyRecord(getContext(), RetTy, true))
5497 return ABIArgInfo::getIgnore();
5498
5499 // Check for homogeneous aggregates with AAPCS-VFP.
5500 if (IsEffectivelyAAPCS_VFP) {
5501 const Type *Base = nullptr;
5502 uint64_t Members = 0;
5503 if (isHomogeneousAggregate(RetTy, Base, Members)) {
5504 assert(Base && "Base class should be set for homogeneous aggregate");
5505 // Homogeneous Aggregates are returned directly.
5506 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
5507 }
5508 }
5509
5510 // Aggregates <= 4 bytes are returned in r0; other aggregates
5511 // are returned indirectly.
5512 uint64_t Size = getContext().getTypeSize(RetTy);
5513 if (Size <= 32) {
5514 // On RenderScript, coerce Aggregates <= 4 bytes to an integer array of
5515 // same size and alignment.
5516 if (getTarget().isRenderScriptTarget()) {
5517 return coerceToIntArray(RetTy, getContext(), getVMContext());
5518 }
5519 if (getDataLayout().isBigEndian())
5520 // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4)
5521 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
5522
5523 // Return in the smallest viable integer type.
5524 if (Size <= 8)
5525 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
5526 if (Size <= 16)
5527 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
5528 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
5529 } else if (Size <= 128 && getABIKind() == AAPCS16_VFP) {
5530 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(getVMContext());
5531 llvm::Type *CoerceTy =
5532 llvm::ArrayType::get(Int32Ty, llvm::alignTo(Size, 32) / 32);
5533 return ABIArgInfo::getDirect(CoerceTy);
5534 }
5535
5536 return getNaturalAlignIndirect(RetTy);
5537 }
5538
5539 /// isIllegalVector - check whether Ty is an illegal vector type.
isIllegalVectorType(QualType Ty) const5540 bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
5541 if (const VectorType *VT = Ty->getAs<VectorType> ()) {
5542 if (isAndroid()) {
5543 // Android shipped using Clang 3.1, which supported a slightly different
5544 // vector ABI. The primary differences were that 3-element vector types
5545 // were legal, and so were sub 32-bit vectors (i.e. <2 x i8>). This path
5546 // accepts that legacy behavior for Android only.
5547 // Check whether VT is legal.
5548 unsigned NumElements = VT->getNumElements();
5549 // NumElements should be power of 2 or equal to 3.
5550 if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3)
5551 return true;
5552 } else {
5553 // Check whether VT is legal.
5554 unsigned NumElements = VT->getNumElements();
5555 uint64_t Size = getContext().getTypeSize(VT);
5556 // NumElements should be power of 2.
5557 if (!llvm::isPowerOf2_32(NumElements))
5558 return true;
5559 // Size should be greater than 32 bits.
5560 return Size <= 32;
5561 }
5562 }
5563 return false;
5564 }
5565
isHomogeneousAggregateBaseType(QualType Ty) const5566 bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
5567 // Homogeneous aggregates for AAPCS-VFP must have base types of float,
5568 // double, or 64-bit or 128-bit vectors.
5569 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
5570 if (BT->getKind() == BuiltinType::Float ||
5571 BT->getKind() == BuiltinType::Double ||
5572 BT->getKind() == BuiltinType::LongDouble)
5573 return true;
5574 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
5575 unsigned VecSize = getContext().getTypeSize(VT);
5576 if (VecSize == 64 || VecSize == 128)
5577 return true;
5578 }
5579 return false;
5580 }
5581
isHomogeneousAggregateSmallEnough(const Type * Base,uint64_t Members) const5582 bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
5583 uint64_t Members) const {
5584 return Members <= 4;
5585 }
5586
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const5587 Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5588 QualType Ty) const {
5589 CharUnits SlotSize = CharUnits::fromQuantity(4);
5590
5591 // Empty records are ignored for parameter passing purposes.
5592 if (isEmptyRecord(getContext(), Ty, true)) {
5593 Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize);
5594 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
5595 return Addr;
5596 }
5597
5598 auto TyInfo = getContext().getTypeInfoInChars(Ty);
5599 CharUnits TyAlignForABI = TyInfo.second;
5600
5601 // Use indirect if size of the illegal vector is bigger than 16 bytes.
5602 bool IsIndirect = false;
5603 const Type *Base = nullptr;
5604 uint64_t Members = 0;
5605 if (TyInfo.first > CharUnits::fromQuantity(16) && isIllegalVectorType(Ty)) {
5606 IsIndirect = true;
5607
5608 // ARMv7k passes structs bigger than 16 bytes indirectly, in space
5609 // allocated by the caller.
5610 } else if (TyInfo.first > CharUnits::fromQuantity(16) &&
5611 getABIKind() == ARMABIInfo::AAPCS16_VFP &&
5612 !isHomogeneousAggregate(Ty, Base, Members)) {
5613 IsIndirect = true;
5614
5615 // Otherwise, bound the type's ABI alignment.
5616 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
5617 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
5618 // Our callers should be prepared to handle an under-aligned address.
5619 } else if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
5620 getABIKind() == ARMABIInfo::AAPCS) {
5621 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
5622 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(8));
5623 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
5624 // ARMv7k allows type alignment up to 16 bytes.
5625 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
5626 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(16));
5627 } else {
5628 TyAlignForABI = CharUnits::fromQuantity(4);
5629 }
5630 TyInfo.second = TyAlignForABI;
5631
5632 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo,
5633 SlotSize, /*AllowHigherAlign*/ true);
5634 }
5635
5636 //===----------------------------------------------------------------------===//
5637 // NVPTX ABI Implementation
5638 //===----------------------------------------------------------------------===//
5639
5640 namespace {
5641
5642 class NVPTXABIInfo : public ABIInfo {
5643 public:
NVPTXABIInfo(CodeGenTypes & CGT)5644 NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
5645
5646 ABIArgInfo classifyReturnType(QualType RetTy) const;
5647 ABIArgInfo classifyArgumentType(QualType Ty) const;
5648
5649 void computeInfo(CGFunctionInfo &FI) const override;
5650 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5651 QualType Ty) const override;
5652 };
5653
5654 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
5655 public:
NVPTXTargetCodeGenInfo(CodeGenTypes & CGT)5656 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT)
5657 : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {}
5658
5659 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5660 CodeGen::CodeGenModule &M) const override;
5661 private:
5662 // Adds a NamedMDNode with F, Name, and Operand as operands, and adds the
5663 // resulting MDNode to the nvvm.annotations MDNode.
5664 static void addNVVMMetadata(llvm::Function *F, StringRef Name, int Operand);
5665 };
5666
classifyReturnType(QualType RetTy) const5667 ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
5668 if (RetTy->isVoidType())
5669 return ABIArgInfo::getIgnore();
5670
5671 // note: this is different from default ABI
5672 if (!RetTy->isScalarType())
5673 return ABIArgInfo::getDirect();
5674
5675 // Treat an enum type as its underlying type.
5676 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
5677 RetTy = EnumTy->getDecl()->getIntegerType();
5678
5679 return (RetTy->isPromotableIntegerType() ?
5680 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5681 }
5682
classifyArgumentType(QualType Ty) const5683 ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
5684 // Treat an enum type as its underlying type.
5685 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5686 Ty = EnumTy->getDecl()->getIntegerType();
5687
5688 // Return aggregates type as indirect by value
5689 if (isAggregateTypeForABI(Ty))
5690 return getNaturalAlignIndirect(Ty, /* byval */ true);
5691
5692 return (Ty->isPromotableIntegerType() ?
5693 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5694 }
5695
computeInfo(CGFunctionInfo & FI) const5696 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
5697 if (!getCXXABI().classifyReturnType(FI))
5698 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
5699 for (auto &I : FI.arguments())
5700 I.info = classifyArgumentType(I.type);
5701
5702 // Always honor user-specified calling convention.
5703 if (FI.getCallingConvention() != llvm::CallingConv::C)
5704 return;
5705
5706 FI.setEffectiveCallingConvention(getRuntimeCC());
5707 }
5708
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const5709 Address NVPTXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5710 QualType Ty) const {
5711 llvm_unreachable("NVPTX does not support varargs");
5712 }
5713
5714 void NVPTXTargetCodeGenInfo::
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & M) const5715 setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5716 CodeGen::CodeGenModule &M) const{
5717 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
5718 if (!FD) return;
5719
5720 llvm::Function *F = cast<llvm::Function>(GV);
5721
5722 // Perform special handling in OpenCL mode
5723 if (M.getLangOpts().OpenCL) {
5724 // Use OpenCL function attributes to check for kernel functions
5725 // By default, all functions are device functions
5726 if (FD->hasAttr<OpenCLKernelAttr>()) {
5727 // OpenCL __kernel functions get kernel metadata
5728 // Create !{<func-ref>, metadata !"kernel", i32 1} node
5729 addNVVMMetadata(F, "kernel", 1);
5730 // And kernel functions are not subject to inlining
5731 F->addFnAttr(llvm::Attribute::NoInline);
5732 }
5733 }
5734
5735 // Perform special handling in CUDA mode.
5736 if (M.getLangOpts().CUDA) {
5737 // CUDA __global__ functions get a kernel metadata entry. Since
5738 // __global__ functions cannot be called from the device, we do not
5739 // need to set the noinline attribute.
5740 if (FD->hasAttr<CUDAGlobalAttr>()) {
5741 // Create !{<func-ref>, metadata !"kernel", i32 1} node
5742 addNVVMMetadata(F, "kernel", 1);
5743 }
5744 if (CUDALaunchBoundsAttr *Attr = FD->getAttr<CUDALaunchBoundsAttr>()) {
5745 // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node
5746 llvm::APSInt MaxThreads(32);
5747 MaxThreads = Attr->getMaxThreads()->EvaluateKnownConstInt(M.getContext());
5748 if (MaxThreads > 0)
5749 addNVVMMetadata(F, "maxntidx", MaxThreads.getExtValue());
5750
5751 // min blocks is an optional argument for CUDALaunchBoundsAttr. If it was
5752 // not specified in __launch_bounds__ or if the user specified a 0 value,
5753 // we don't have to add a PTX directive.
5754 if (Attr->getMinBlocks()) {
5755 llvm::APSInt MinBlocks(32);
5756 MinBlocks = Attr->getMinBlocks()->EvaluateKnownConstInt(M.getContext());
5757 if (MinBlocks > 0)
5758 // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node
5759 addNVVMMetadata(F, "minctasm", MinBlocks.getExtValue());
5760 }
5761 }
5762 }
5763 }
5764
addNVVMMetadata(llvm::Function * F,StringRef Name,int Operand)5765 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name,
5766 int Operand) {
5767 llvm::Module *M = F->getParent();
5768 llvm::LLVMContext &Ctx = M->getContext();
5769
5770 // Get "nvvm.annotations" metadata node
5771 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
5772
5773 llvm::Metadata *MDVals[] = {
5774 llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, Name),
5775 llvm::ConstantAsMetadata::get(
5776 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))};
5777 // Append metadata to nvvm.annotations
5778 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
5779 }
5780 }
5781
5782 //===----------------------------------------------------------------------===//
5783 // SystemZ ABI Implementation
5784 //===----------------------------------------------------------------------===//
5785
5786 namespace {
5787
5788 class SystemZABIInfo : public SwiftABIInfo {
5789 bool HasVector;
5790
5791 public:
SystemZABIInfo(CodeGenTypes & CGT,bool HV)5792 SystemZABIInfo(CodeGenTypes &CGT, bool HV)
5793 : SwiftABIInfo(CGT), HasVector(HV) {}
5794
5795 bool isPromotableIntegerType(QualType Ty) const;
5796 bool isCompoundType(QualType Ty) const;
5797 bool isVectorArgumentType(QualType Ty) const;
5798 bool isFPArgumentType(QualType Ty) const;
5799 QualType GetSingleElementType(QualType Ty) const;
5800
5801 ABIArgInfo classifyReturnType(QualType RetTy) const;
5802 ABIArgInfo classifyArgumentType(QualType ArgTy) const;
5803
computeInfo(CGFunctionInfo & FI) const5804 void computeInfo(CGFunctionInfo &FI) const override {
5805 if (!getCXXABI().classifyReturnType(FI))
5806 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
5807 for (auto &I : FI.arguments())
5808 I.info = classifyArgumentType(I.type);
5809 }
5810
5811 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5812 QualType Ty) const override;
5813
shouldPassIndirectlyForSwift(CharUnits totalSize,ArrayRef<llvm::Type * > scalars,bool asReturnValue) const5814 bool shouldPassIndirectlyForSwift(CharUnits totalSize,
5815 ArrayRef<llvm::Type*> scalars,
5816 bool asReturnValue) const override {
5817 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
5818 }
5819 };
5820
5821 class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
5822 public:
SystemZTargetCodeGenInfo(CodeGenTypes & CGT,bool HasVector)5823 SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector)
5824 : TargetCodeGenInfo(new SystemZABIInfo(CGT, HasVector)) {}
5825 };
5826
5827 }
5828
isPromotableIntegerType(QualType Ty) const5829 bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
5830 // Treat an enum type as its underlying type.
5831 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5832 Ty = EnumTy->getDecl()->getIntegerType();
5833
5834 // Promotable integer types are required to be promoted by the ABI.
5835 if (Ty->isPromotableIntegerType())
5836 return true;
5837
5838 // 32-bit values must also be promoted.
5839 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
5840 switch (BT->getKind()) {
5841 case BuiltinType::Int:
5842 case BuiltinType::UInt:
5843 return true;
5844 default:
5845 return false;
5846 }
5847 return false;
5848 }
5849
isCompoundType(QualType Ty) const5850 bool SystemZABIInfo::isCompoundType(QualType Ty) const {
5851 return (Ty->isAnyComplexType() ||
5852 Ty->isVectorType() ||
5853 isAggregateTypeForABI(Ty));
5854 }
5855
isVectorArgumentType(QualType Ty) const5856 bool SystemZABIInfo::isVectorArgumentType(QualType Ty) const {
5857 return (HasVector &&
5858 Ty->isVectorType() &&
5859 getContext().getTypeSize(Ty) <= 128);
5860 }
5861
isFPArgumentType(QualType Ty) const5862 bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
5863 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
5864 switch (BT->getKind()) {
5865 case BuiltinType::Float:
5866 case BuiltinType::Double:
5867 return true;
5868 default:
5869 return false;
5870 }
5871
5872 return false;
5873 }
5874
GetSingleElementType(QualType Ty) const5875 QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const {
5876 if (const RecordType *RT = Ty->getAsStructureType()) {
5877 const RecordDecl *RD = RT->getDecl();
5878 QualType Found;
5879
5880 // If this is a C++ record, check the bases first.
5881 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
5882 for (const auto &I : CXXRD->bases()) {
5883 QualType Base = I.getType();
5884
5885 // Empty bases don't affect things either way.
5886 if (isEmptyRecord(getContext(), Base, true))
5887 continue;
5888
5889 if (!Found.isNull())
5890 return Ty;
5891 Found = GetSingleElementType(Base);
5892 }
5893
5894 // Check the fields.
5895 for (const auto *FD : RD->fields()) {
5896 // For compatibility with GCC, ignore empty bitfields in C++ mode.
5897 // Unlike isSingleElementStruct(), empty structure and array fields
5898 // do count. So do anonymous bitfields that aren't zero-sized.
5899 if (getContext().getLangOpts().CPlusPlus &&
5900 FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
5901 continue;
5902
5903 // Unlike isSingleElementStruct(), arrays do not count.
5904 // Nested structures still do though.
5905 if (!Found.isNull())
5906 return Ty;
5907 Found = GetSingleElementType(FD->getType());
5908 }
5909
5910 // Unlike isSingleElementStruct(), trailing padding is allowed.
5911 // An 8-byte aligned struct s { float f; } is passed as a double.
5912 if (!Found.isNull())
5913 return Found;
5914 }
5915
5916 return Ty;
5917 }
5918
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const5919 Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5920 QualType Ty) const {
5921 // Assume that va_list type is correct; should be pointer to LLVM type:
5922 // struct {
5923 // i64 __gpr;
5924 // i64 __fpr;
5925 // i8 *__overflow_arg_area;
5926 // i8 *__reg_save_area;
5927 // };
5928
5929 // Every non-vector argument occupies 8 bytes and is passed by preference
5930 // in either GPRs or FPRs. Vector arguments occupy 8 or 16 bytes and are
5931 // always passed on the stack.
5932 Ty = getContext().getCanonicalType(Ty);
5933 auto TyInfo = getContext().getTypeInfoInChars(Ty);
5934 llvm::Type *ArgTy = CGF.ConvertTypeForMem(Ty);
5935 llvm::Type *DirectTy = ArgTy;
5936 ABIArgInfo AI = classifyArgumentType(Ty);
5937 bool IsIndirect = AI.isIndirect();
5938 bool InFPRs = false;
5939 bool IsVector = false;
5940 CharUnits UnpaddedSize;
5941 CharUnits DirectAlign;
5942 if (IsIndirect) {
5943 DirectTy = llvm::PointerType::getUnqual(DirectTy);
5944 UnpaddedSize = DirectAlign = CharUnits::fromQuantity(8);
5945 } else {
5946 if (AI.getCoerceToType())
5947 ArgTy = AI.getCoerceToType();
5948 InFPRs = ArgTy->isFloatTy() || ArgTy->isDoubleTy();
5949 IsVector = ArgTy->isVectorTy();
5950 UnpaddedSize = TyInfo.first;
5951 DirectAlign = TyInfo.second;
5952 }
5953 CharUnits PaddedSize = CharUnits::fromQuantity(8);
5954 if (IsVector && UnpaddedSize > PaddedSize)
5955 PaddedSize = CharUnits::fromQuantity(16);
5956 assert((UnpaddedSize <= PaddedSize) && "Invalid argument size.");
5957
5958 CharUnits Padding = (PaddedSize - UnpaddedSize);
5959
5960 llvm::Type *IndexTy = CGF.Int64Ty;
5961 llvm::Value *PaddedSizeV =
5962 llvm::ConstantInt::get(IndexTy, PaddedSize.getQuantity());
5963
5964 if (IsVector) {
5965 // Work out the address of a vector argument on the stack.
5966 // Vector arguments are always passed in the high bits of a
5967 // single (8 byte) or double (16 byte) stack slot.
5968 Address OverflowArgAreaPtr =
5969 CGF.Builder.CreateStructGEP(VAListAddr, 2, CharUnits::fromQuantity(16),
5970 "overflow_arg_area_ptr");
5971 Address OverflowArgArea =
5972 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
5973 TyInfo.second);
5974 Address MemAddr =
5975 CGF.Builder.CreateElementBitCast(OverflowArgArea, DirectTy, "mem_addr");
5976
5977 // Update overflow_arg_area_ptr pointer
5978 llvm::Value *NewOverflowArgArea =
5979 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV,
5980 "overflow_arg_area");
5981 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
5982
5983 return MemAddr;
5984 }
5985
5986 assert(PaddedSize.getQuantity() == 8);
5987
5988 unsigned MaxRegs, RegCountField, RegSaveIndex;
5989 CharUnits RegPadding;
5990 if (InFPRs) {
5991 MaxRegs = 4; // Maximum of 4 FPR arguments
5992 RegCountField = 1; // __fpr
5993 RegSaveIndex = 16; // save offset for f0
5994 RegPadding = CharUnits(); // floats are passed in the high bits of an FPR
5995 } else {
5996 MaxRegs = 5; // Maximum of 5 GPR arguments
5997 RegCountField = 0; // __gpr
5998 RegSaveIndex = 2; // save offset for r2
5999 RegPadding = Padding; // values are passed in the low bits of a GPR
6000 }
6001
6002 Address RegCountPtr = CGF.Builder.CreateStructGEP(
6003 VAListAddr, RegCountField, RegCountField * CharUnits::fromQuantity(8),
6004 "reg_count_ptr");
6005 llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count");
6006 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
6007 llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV,
6008 "fits_in_regs");
6009
6010 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
6011 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
6012 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
6013 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
6014
6015 // Emit code to load the value if it was passed in registers.
6016 CGF.EmitBlock(InRegBlock);
6017
6018 // Work out the address of an argument register.
6019 llvm::Value *ScaledRegCount =
6020 CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count");
6021 llvm::Value *RegBase =
6022 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.getQuantity()
6023 + RegPadding.getQuantity());
6024 llvm::Value *RegOffset =
6025 CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset");
6026 Address RegSaveAreaPtr =
6027 CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(24),
6028 "reg_save_area_ptr");
6029 llvm::Value *RegSaveArea =
6030 CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area");
6031 Address RawRegAddr(CGF.Builder.CreateGEP(RegSaveArea, RegOffset,
6032 "raw_reg_addr"),
6033 PaddedSize);
6034 Address RegAddr =
6035 CGF.Builder.CreateElementBitCast(RawRegAddr, DirectTy, "reg_addr");
6036
6037 // Update the register count
6038 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
6039 llvm::Value *NewRegCount =
6040 CGF.Builder.CreateAdd(RegCount, One, "reg_count");
6041 CGF.Builder.CreateStore(NewRegCount, RegCountPtr);
6042 CGF.EmitBranch(ContBlock);
6043
6044 // Emit code to load the value if it was passed in memory.
6045 CGF.EmitBlock(InMemBlock);
6046
6047 // Work out the address of a stack argument.
6048 Address OverflowArgAreaPtr = CGF.Builder.CreateStructGEP(
6049 VAListAddr, 2, CharUnits::fromQuantity(16), "overflow_arg_area_ptr");
6050 Address OverflowArgArea =
6051 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
6052 PaddedSize);
6053 Address RawMemAddr =
6054 CGF.Builder.CreateConstByteGEP(OverflowArgArea, Padding, "raw_mem_addr");
6055 Address MemAddr =
6056 CGF.Builder.CreateElementBitCast(RawMemAddr, DirectTy, "mem_addr");
6057
6058 // Update overflow_arg_area_ptr pointer
6059 llvm::Value *NewOverflowArgArea =
6060 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV,
6061 "overflow_arg_area");
6062 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
6063 CGF.EmitBranch(ContBlock);
6064
6065 // Return the appropriate result.
6066 CGF.EmitBlock(ContBlock);
6067 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock,
6068 MemAddr, InMemBlock, "va_arg.addr");
6069
6070 if (IsIndirect)
6071 ResAddr = Address(CGF.Builder.CreateLoad(ResAddr, "indirect_arg"),
6072 TyInfo.second);
6073
6074 return ResAddr;
6075 }
6076
classifyReturnType(QualType RetTy) const6077 ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
6078 if (RetTy->isVoidType())
6079 return ABIArgInfo::getIgnore();
6080 if (isVectorArgumentType(RetTy))
6081 return ABIArgInfo::getDirect();
6082 if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
6083 return getNaturalAlignIndirect(RetTy);
6084 return (isPromotableIntegerType(RetTy) ?
6085 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
6086 }
6087
classifyArgumentType(QualType Ty) const6088 ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
6089 // Handle the generic C++ ABI.
6090 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
6091 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
6092
6093 // Integers and enums are extended to full register width.
6094 if (isPromotableIntegerType(Ty))
6095 return ABIArgInfo::getExtend();
6096
6097 // Handle vector types and vector-like structure types. Note that
6098 // as opposed to float-like structure types, we do not allow any
6099 // padding for vector-like structures, so verify the sizes match.
6100 uint64_t Size = getContext().getTypeSize(Ty);
6101 QualType SingleElementTy = GetSingleElementType(Ty);
6102 if (isVectorArgumentType(SingleElementTy) &&
6103 getContext().getTypeSize(SingleElementTy) == Size)
6104 return ABIArgInfo::getDirect(CGT.ConvertType(SingleElementTy));
6105
6106 // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly.
6107 if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
6108 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
6109
6110 // Handle small structures.
6111 if (const RecordType *RT = Ty->getAs<RecordType>()) {
6112 // Structures with flexible arrays have variable length, so really
6113 // fail the size test above.
6114 const RecordDecl *RD = RT->getDecl();
6115 if (RD->hasFlexibleArrayMember())
6116 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
6117
6118 // The structure is passed as an unextended integer, a float, or a double.
6119 llvm::Type *PassTy;
6120 if (isFPArgumentType(SingleElementTy)) {
6121 assert(Size == 32 || Size == 64);
6122 if (Size == 32)
6123 PassTy = llvm::Type::getFloatTy(getVMContext());
6124 else
6125 PassTy = llvm::Type::getDoubleTy(getVMContext());
6126 } else
6127 PassTy = llvm::IntegerType::get(getVMContext(), Size);
6128 return ABIArgInfo::getDirect(PassTy);
6129 }
6130
6131 // Non-structure compounds are passed indirectly.
6132 if (isCompoundType(Ty))
6133 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
6134
6135 return ABIArgInfo::getDirect(nullptr);
6136 }
6137
6138 //===----------------------------------------------------------------------===//
6139 // MSP430 ABI Implementation
6140 //===----------------------------------------------------------------------===//
6141
6142 namespace {
6143
6144 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
6145 public:
MSP430TargetCodeGenInfo(CodeGenTypes & CGT)6146 MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
6147 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
6148 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6149 CodeGen::CodeGenModule &M) const override;
6150 };
6151
6152 }
6153
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & M) const6154 void MSP430TargetCodeGenInfo::setTargetAttributes(const Decl *D,
6155 llvm::GlobalValue *GV,
6156 CodeGen::CodeGenModule &M) const {
6157 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
6158 if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) {
6159 // Handle 'interrupt' attribute:
6160 llvm::Function *F = cast<llvm::Function>(GV);
6161
6162 // Step 1: Set ISR calling convention.
6163 F->setCallingConv(llvm::CallingConv::MSP430_INTR);
6164
6165 // Step 2: Add attributes goodness.
6166 F->addFnAttr(llvm::Attribute::NoInline);
6167
6168 // Step 3: Emit ISR vector alias.
6169 unsigned Num = attr->getNumber() / 2;
6170 llvm::GlobalAlias::create(llvm::Function::ExternalLinkage,
6171 "__isr_" + Twine(Num), F);
6172 }
6173 }
6174 }
6175
6176 //===----------------------------------------------------------------------===//
6177 // MIPS ABI Implementation. This works for both little-endian and
6178 // big-endian variants.
6179 //===----------------------------------------------------------------------===//
6180
6181 namespace {
6182 class MipsABIInfo : public ABIInfo {
6183 bool IsO32;
6184 unsigned MinABIStackAlignInBytes, StackAlignInBytes;
6185 void CoerceToIntArgs(uint64_t TySize,
6186 SmallVectorImpl<llvm::Type *> &ArgList) const;
6187 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const;
6188 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const;
6189 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const;
6190 public:
MipsABIInfo(CodeGenTypes & CGT,bool _IsO32)6191 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) :
6192 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
6193 StackAlignInBytes(IsO32 ? 8 : 16) {}
6194
6195 ABIArgInfo classifyReturnType(QualType RetTy) const;
6196 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
6197 void computeInfo(CGFunctionInfo &FI) const override;
6198 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6199 QualType Ty) const override;
6200 bool shouldSignExtUnsignedType(QualType Ty) const override;
6201 };
6202
6203 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
6204 unsigned SizeOfUnwindException;
6205 public:
MIPSTargetCodeGenInfo(CodeGenTypes & CGT,bool IsO32)6206 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32)
6207 : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)),
6208 SizeOfUnwindException(IsO32 ? 24 : 32) {}
6209
getDwarfEHStackPointer(CodeGen::CodeGenModule & CGM) const6210 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
6211 return 29;
6212 }
6213
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const6214 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6215 CodeGen::CodeGenModule &CGM) const override {
6216 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6217 if (!FD) return;
6218 llvm::Function *Fn = cast<llvm::Function>(GV);
6219 if (FD->hasAttr<Mips16Attr>()) {
6220 Fn->addFnAttr("mips16");
6221 }
6222 else if (FD->hasAttr<NoMips16Attr>()) {
6223 Fn->addFnAttr("nomips16");
6224 }
6225
6226 const MipsInterruptAttr *Attr = FD->getAttr<MipsInterruptAttr>();
6227 if (!Attr)
6228 return;
6229
6230 const char *Kind;
6231 switch (Attr->getInterrupt()) {
6232 case MipsInterruptAttr::eic: Kind = "eic"; break;
6233 case MipsInterruptAttr::sw0: Kind = "sw0"; break;
6234 case MipsInterruptAttr::sw1: Kind = "sw1"; break;
6235 case MipsInterruptAttr::hw0: Kind = "hw0"; break;
6236 case MipsInterruptAttr::hw1: Kind = "hw1"; break;
6237 case MipsInterruptAttr::hw2: Kind = "hw2"; break;
6238 case MipsInterruptAttr::hw3: Kind = "hw3"; break;
6239 case MipsInterruptAttr::hw4: Kind = "hw4"; break;
6240 case MipsInterruptAttr::hw5: Kind = "hw5"; break;
6241 }
6242
6243 Fn->addFnAttr("interrupt", Kind);
6244
6245 }
6246
6247 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
6248 llvm::Value *Address) const override;
6249
getSizeOfUnwindException() const6250 unsigned getSizeOfUnwindException() const override {
6251 return SizeOfUnwindException;
6252 }
6253 };
6254 }
6255
CoerceToIntArgs(uint64_t TySize,SmallVectorImpl<llvm::Type * > & ArgList) const6256 void MipsABIInfo::CoerceToIntArgs(
6257 uint64_t TySize, SmallVectorImpl<llvm::Type *> &ArgList) const {
6258 llvm::IntegerType *IntTy =
6259 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
6260
6261 // Add (TySize / MinABIStackAlignInBytes) args of IntTy.
6262 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
6263 ArgList.push_back(IntTy);
6264
6265 // If necessary, add one more integer type to ArgList.
6266 unsigned R = TySize % (MinABIStackAlignInBytes * 8);
6267
6268 if (R)
6269 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
6270 }
6271
6272 // In N32/64, an aligned double precision floating point field is passed in
6273 // a register.
HandleAggregates(QualType Ty,uint64_t TySize) const6274 llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const {
6275 SmallVector<llvm::Type*, 8> ArgList, IntArgList;
6276
6277 if (IsO32) {
6278 CoerceToIntArgs(TySize, ArgList);
6279 return llvm::StructType::get(getVMContext(), ArgList);
6280 }
6281
6282 if (Ty->isComplexType())
6283 return CGT.ConvertType(Ty);
6284
6285 const RecordType *RT = Ty->getAs<RecordType>();
6286
6287 // Unions/vectors are passed in integer registers.
6288 if (!RT || !RT->isStructureOrClassType()) {
6289 CoerceToIntArgs(TySize, ArgList);
6290 return llvm::StructType::get(getVMContext(), ArgList);
6291 }
6292
6293 const RecordDecl *RD = RT->getDecl();
6294 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
6295 assert(!(TySize % 8) && "Size of structure must be multiple of 8.");
6296
6297 uint64_t LastOffset = 0;
6298 unsigned idx = 0;
6299 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
6300
6301 // Iterate over fields in the struct/class and check if there are any aligned
6302 // double fields.
6303 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
6304 i != e; ++i, ++idx) {
6305 const QualType Ty = i->getType();
6306 const BuiltinType *BT = Ty->getAs<BuiltinType>();
6307
6308 if (!BT || BT->getKind() != BuiltinType::Double)
6309 continue;
6310
6311 uint64_t Offset = Layout.getFieldOffset(idx);
6312 if (Offset % 64) // Ignore doubles that are not aligned.
6313 continue;
6314
6315 // Add ((Offset - LastOffset) / 64) args of type i64.
6316 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
6317 ArgList.push_back(I64);
6318
6319 // Add double type.
6320 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext()));
6321 LastOffset = Offset + 64;
6322 }
6323
6324 CoerceToIntArgs(TySize - LastOffset, IntArgList);
6325 ArgList.append(IntArgList.begin(), IntArgList.end());
6326
6327 return llvm::StructType::get(getVMContext(), ArgList);
6328 }
6329
getPaddingType(uint64_t OrigOffset,uint64_t Offset) const6330 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset,
6331 uint64_t Offset) const {
6332 if (OrigOffset + MinABIStackAlignInBytes > Offset)
6333 return nullptr;
6334
6335 return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8);
6336 }
6337
6338 ABIArgInfo
classifyArgumentType(QualType Ty,uint64_t & Offset) const6339 MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
6340 Ty = useFirstFieldIfTransparentUnion(Ty);
6341
6342 uint64_t OrigOffset = Offset;
6343 uint64_t TySize = getContext().getTypeSize(Ty);
6344 uint64_t Align = getContext().getTypeAlign(Ty) / 8;
6345
6346 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes),
6347 (uint64_t)StackAlignInBytes);
6348 unsigned CurrOffset = llvm::alignTo(Offset, Align);
6349 Offset = CurrOffset + llvm::alignTo(TySize, Align * 8) / 8;
6350
6351 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) {
6352 // Ignore empty aggregates.
6353 if (TySize == 0)
6354 return ABIArgInfo::getIgnore();
6355
6356 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
6357 Offset = OrigOffset + MinABIStackAlignInBytes;
6358 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
6359 }
6360
6361 // If we have reached here, aggregates are passed directly by coercing to
6362 // another structure type. Padding is inserted if the offset of the
6363 // aggregate is unaligned.
6364 ABIArgInfo ArgInfo =
6365 ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0,
6366 getPaddingType(OrigOffset, CurrOffset));
6367 ArgInfo.setInReg(true);
6368 return ArgInfo;
6369 }
6370
6371 // Treat an enum type as its underlying type.
6372 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
6373 Ty = EnumTy->getDecl()->getIntegerType();
6374
6375 // All integral types are promoted to the GPR width.
6376 if (Ty->isIntegralOrEnumerationType())
6377 return ABIArgInfo::getExtend();
6378
6379 return ABIArgInfo::getDirect(
6380 nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset));
6381 }
6382
6383 llvm::Type*
returnAggregateInRegs(QualType RetTy,uint64_t Size) const6384 MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
6385 const RecordType *RT = RetTy->getAs<RecordType>();
6386 SmallVector<llvm::Type*, 8> RTList;
6387
6388 if (RT && RT->isStructureOrClassType()) {
6389 const RecordDecl *RD = RT->getDecl();
6390 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
6391 unsigned FieldCnt = Layout.getFieldCount();
6392
6393 // N32/64 returns struct/classes in floating point registers if the
6394 // following conditions are met:
6395 // 1. The size of the struct/class is no larger than 128-bit.
6396 // 2. The struct/class has one or two fields all of which are floating
6397 // point types.
6398 // 3. The offset of the first field is zero (this follows what gcc does).
6399 //
6400 // Any other composite results are returned in integer registers.
6401 //
6402 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) {
6403 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end();
6404 for (; b != e; ++b) {
6405 const BuiltinType *BT = b->getType()->getAs<BuiltinType>();
6406
6407 if (!BT || !BT->isFloatingPoint())
6408 break;
6409
6410 RTList.push_back(CGT.ConvertType(b->getType()));
6411 }
6412
6413 if (b == e)
6414 return llvm::StructType::get(getVMContext(), RTList,
6415 RD->hasAttr<PackedAttr>());
6416
6417 RTList.clear();
6418 }
6419 }
6420
6421 CoerceToIntArgs(Size, RTList);
6422 return llvm::StructType::get(getVMContext(), RTList);
6423 }
6424
classifyReturnType(QualType RetTy) const6425 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
6426 uint64_t Size = getContext().getTypeSize(RetTy);
6427
6428 if (RetTy->isVoidType())
6429 return ABIArgInfo::getIgnore();
6430
6431 // O32 doesn't treat zero-sized structs differently from other structs.
6432 // However, N32/N64 ignores zero sized return values.
6433 if (!IsO32 && Size == 0)
6434 return ABIArgInfo::getIgnore();
6435
6436 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) {
6437 if (Size <= 128) {
6438 if (RetTy->isAnyComplexType())
6439 return ABIArgInfo::getDirect();
6440
6441 // O32 returns integer vectors in registers and N32/N64 returns all small
6442 // aggregates in registers.
6443 if (!IsO32 ||
6444 (RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())) {
6445 ABIArgInfo ArgInfo =
6446 ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
6447 ArgInfo.setInReg(true);
6448 return ArgInfo;
6449 }
6450 }
6451
6452 return getNaturalAlignIndirect(RetTy);
6453 }
6454
6455 // Treat an enum type as its underlying type.
6456 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
6457 RetTy = EnumTy->getDecl()->getIntegerType();
6458
6459 return (RetTy->isPromotableIntegerType() ?
6460 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
6461 }
6462
computeInfo(CGFunctionInfo & FI) const6463 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
6464 ABIArgInfo &RetInfo = FI.getReturnInfo();
6465 if (!getCXXABI().classifyReturnType(FI))
6466 RetInfo = classifyReturnType(FI.getReturnType());
6467
6468 // Check if a pointer to an aggregate is passed as a hidden argument.
6469 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0;
6470
6471 for (auto &I : FI.arguments())
6472 I.info = classifyArgumentType(I.type, Offset);
6473 }
6474
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType OrigTy) const6475 Address MipsABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6476 QualType OrigTy) const {
6477 QualType Ty = OrigTy;
6478
6479 // Integer arguments are promoted to 32-bit on O32 and 64-bit on N32/N64.
6480 // Pointers are also promoted in the same way but this only matters for N32.
6481 unsigned SlotSizeInBits = IsO32 ? 32 : 64;
6482 unsigned PtrWidth = getTarget().getPointerWidth(0);
6483 bool DidPromote = false;
6484 if ((Ty->isIntegerType() &&
6485 getContext().getIntWidth(Ty) < SlotSizeInBits) ||
6486 (Ty->isPointerType() && PtrWidth < SlotSizeInBits)) {
6487 DidPromote = true;
6488 Ty = getContext().getIntTypeForBitwidth(SlotSizeInBits,
6489 Ty->isSignedIntegerType());
6490 }
6491
6492 auto TyInfo = getContext().getTypeInfoInChars(Ty);
6493
6494 // The alignment of things in the argument area is never larger than
6495 // StackAlignInBytes.
6496 TyInfo.second =
6497 std::min(TyInfo.second, CharUnits::fromQuantity(StackAlignInBytes));
6498
6499 // MinABIStackAlignInBytes is the size of argument slots on the stack.
6500 CharUnits ArgSlotSize = CharUnits::fromQuantity(MinABIStackAlignInBytes);
6501
6502 Address Addr = emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
6503 TyInfo, ArgSlotSize, /*AllowHigherAlign*/ true);
6504
6505
6506 // If there was a promotion, "unpromote" into a temporary.
6507 // TODO: can we just use a pointer into a subset of the original slot?
6508 if (DidPromote) {
6509 Address Temp = CGF.CreateMemTemp(OrigTy, "vaarg.promotion-temp");
6510 llvm::Value *Promoted = CGF.Builder.CreateLoad(Addr);
6511
6512 // Truncate down to the right width.
6513 llvm::Type *IntTy = (OrigTy->isIntegerType() ? Temp.getElementType()
6514 : CGF.IntPtrTy);
6515 llvm::Value *V = CGF.Builder.CreateTrunc(Promoted, IntTy);
6516 if (OrigTy->isPointerType())
6517 V = CGF.Builder.CreateIntToPtr(V, Temp.getElementType());
6518
6519 CGF.Builder.CreateStore(V, Temp);
6520 Addr = Temp;
6521 }
6522
6523 return Addr;
6524 }
6525
shouldSignExtUnsignedType(QualType Ty) const6526 bool MipsABIInfo::shouldSignExtUnsignedType(QualType Ty) const {
6527 int TySize = getContext().getTypeSize(Ty);
6528
6529 // MIPS64 ABI requires unsigned 32 bit integers to be sign extended.
6530 if (Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
6531 return true;
6532
6533 return false;
6534 }
6535
6536 bool
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const6537 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
6538 llvm::Value *Address) const {
6539 // This information comes from gcc's implementation, which seems to
6540 // as canonical as it gets.
6541
6542 // Everything on MIPS is 4 bytes. Double-precision FP registers
6543 // are aliased to pairs of single-precision FP registers.
6544 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
6545
6546 // 0-31 are the general purpose registers, $0 - $31.
6547 // 32-63 are the floating-point registers, $f0 - $f31.
6548 // 64 and 65 are the multiply/divide registers, $hi and $lo.
6549 // 66 is the (notional, I think) register for signal-handler return.
6550 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65);
6551
6552 // 67-74 are the floating-point status registers, $fcc0 - $fcc7.
6553 // They are one bit wide and ignored here.
6554
6555 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
6556 // (coprocessor 1 is the FP unit)
6557 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
6558 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
6559 // 176-181 are the DSP accumulator registers.
6560 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181);
6561 return false;
6562 }
6563
6564 //===----------------------------------------------------------------------===//
6565 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults.
6566 // Currently subclassed only to implement custom OpenCL C function attribute
6567 // handling.
6568 //===----------------------------------------------------------------------===//
6569
6570 namespace {
6571
6572 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo {
6573 public:
TCETargetCodeGenInfo(CodeGenTypes & CGT)6574 TCETargetCodeGenInfo(CodeGenTypes &CGT)
6575 : DefaultTargetCodeGenInfo(CGT) {}
6576
6577 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6578 CodeGen::CodeGenModule &M) const override;
6579 };
6580
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & M) const6581 void TCETargetCodeGenInfo::setTargetAttributes(
6582 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
6583 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6584 if (!FD) return;
6585
6586 llvm::Function *F = cast<llvm::Function>(GV);
6587
6588 if (M.getLangOpts().OpenCL) {
6589 if (FD->hasAttr<OpenCLKernelAttr>()) {
6590 // OpenCL C Kernel functions are not subject to inlining
6591 F->addFnAttr(llvm::Attribute::NoInline);
6592 const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>();
6593 if (Attr) {
6594 // Convert the reqd_work_group_size() attributes to metadata.
6595 llvm::LLVMContext &Context = F->getContext();
6596 llvm::NamedMDNode *OpenCLMetadata =
6597 M.getModule().getOrInsertNamedMetadata(
6598 "opencl.kernel_wg_size_info");
6599
6600 SmallVector<llvm::Metadata *, 5> Operands;
6601 Operands.push_back(llvm::ConstantAsMetadata::get(F));
6602
6603 Operands.push_back(
6604 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
6605 M.Int32Ty, llvm::APInt(32, Attr->getXDim()))));
6606 Operands.push_back(
6607 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
6608 M.Int32Ty, llvm::APInt(32, Attr->getYDim()))));
6609 Operands.push_back(
6610 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
6611 M.Int32Ty, llvm::APInt(32, Attr->getZDim()))));
6612
6613 // Add a boolean constant operand for "required" (true) or "hint"
6614 // (false) for implementing the work_group_size_hint attr later.
6615 // Currently always true as the hint is not yet implemented.
6616 Operands.push_back(
6617 llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context)));
6618 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
6619 }
6620 }
6621 }
6622 }
6623
6624 }
6625
6626 //===----------------------------------------------------------------------===//
6627 // Hexagon ABI Implementation
6628 //===----------------------------------------------------------------------===//
6629
6630 namespace {
6631
6632 class HexagonABIInfo : public ABIInfo {
6633
6634
6635 public:
HexagonABIInfo(CodeGenTypes & CGT)6636 HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
6637
6638 private:
6639
6640 ABIArgInfo classifyReturnType(QualType RetTy) const;
6641 ABIArgInfo classifyArgumentType(QualType RetTy) const;
6642
6643 void computeInfo(CGFunctionInfo &FI) const override;
6644
6645 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6646 QualType Ty) const override;
6647 };
6648
6649 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
6650 public:
HexagonTargetCodeGenInfo(CodeGenTypes & CGT)6651 HexagonTargetCodeGenInfo(CodeGenTypes &CGT)
6652 :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {}
6653
getDwarfEHStackPointer(CodeGen::CodeGenModule & M) const6654 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
6655 return 29;
6656 }
6657 };
6658
6659 }
6660
computeInfo(CGFunctionInfo & FI) const6661 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
6662 if (!getCXXABI().classifyReturnType(FI))
6663 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
6664 for (auto &I : FI.arguments())
6665 I.info = classifyArgumentType(I.type);
6666 }
6667
classifyArgumentType(QualType Ty) const6668 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const {
6669 if (!isAggregateTypeForABI(Ty)) {
6670 // Treat an enum type as its underlying type.
6671 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
6672 Ty = EnumTy->getDecl()->getIntegerType();
6673
6674 return (Ty->isPromotableIntegerType() ?
6675 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
6676 }
6677
6678 // Ignore empty records.
6679 if (isEmptyRecord(getContext(), Ty, true))
6680 return ABIArgInfo::getIgnore();
6681
6682 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
6683 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
6684
6685 uint64_t Size = getContext().getTypeSize(Ty);
6686 if (Size > 64)
6687 return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
6688 // Pass in the smallest viable integer type.
6689 else if (Size > 32)
6690 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
6691 else if (Size > 16)
6692 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
6693 else if (Size > 8)
6694 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
6695 else
6696 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
6697 }
6698
classifyReturnType(QualType RetTy) const6699 ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
6700 if (RetTy->isVoidType())
6701 return ABIArgInfo::getIgnore();
6702
6703 // Large vector types should be returned via memory.
6704 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64)
6705 return getNaturalAlignIndirect(RetTy);
6706
6707 if (!isAggregateTypeForABI(RetTy)) {
6708 // Treat an enum type as its underlying type.
6709 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
6710 RetTy = EnumTy->getDecl()->getIntegerType();
6711
6712 return (RetTy->isPromotableIntegerType() ?
6713 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
6714 }
6715
6716 if (isEmptyRecord(getContext(), RetTy, true))
6717 return ABIArgInfo::getIgnore();
6718
6719 // Aggregates <= 8 bytes are returned in r0; other aggregates
6720 // are returned indirectly.
6721 uint64_t Size = getContext().getTypeSize(RetTy);
6722 if (Size <= 64) {
6723 // Return in the smallest viable integer type.
6724 if (Size <= 8)
6725 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
6726 if (Size <= 16)
6727 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
6728 if (Size <= 32)
6729 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
6730 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
6731 }
6732
6733 return getNaturalAlignIndirect(RetTy, /*ByVal=*/true);
6734 }
6735
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const6736 Address HexagonABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6737 QualType Ty) const {
6738 // FIXME: Someone needs to audit that this handle alignment correctly.
6739 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
6740 getContext().getTypeInfoInChars(Ty),
6741 CharUnits::fromQuantity(4),
6742 /*AllowHigherAlign*/ true);
6743 }
6744
6745 //===----------------------------------------------------------------------===//
6746 // Lanai ABI Implementation
6747 //===----------------------------------------------------------------------===//
6748
6749 namespace {
6750 class LanaiABIInfo : public DefaultABIInfo {
6751 public:
LanaiABIInfo(CodeGen::CodeGenTypes & CGT)6752 LanaiABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
6753
6754 bool shouldUseInReg(QualType Ty, CCState &State) const;
6755
computeInfo(CGFunctionInfo & FI) const6756 void computeInfo(CGFunctionInfo &FI) const override {
6757 CCState State(FI.getCallingConvention());
6758 // Lanai uses 4 registers to pass arguments unless the function has the
6759 // regparm attribute set.
6760 if (FI.getHasRegParm()) {
6761 State.FreeRegs = FI.getRegParm();
6762 } else {
6763 State.FreeRegs = 4;
6764 }
6765
6766 if (!getCXXABI().classifyReturnType(FI))
6767 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
6768 for (auto &I : FI.arguments())
6769 I.info = classifyArgumentType(I.type, State);
6770 }
6771
6772 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
6773 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
6774 };
6775 } // end anonymous namespace
6776
shouldUseInReg(QualType Ty,CCState & State) const6777 bool LanaiABIInfo::shouldUseInReg(QualType Ty, CCState &State) const {
6778 unsigned Size = getContext().getTypeSize(Ty);
6779 unsigned SizeInRegs = llvm::alignTo(Size, 32U) / 32U;
6780
6781 if (SizeInRegs == 0)
6782 return false;
6783
6784 if (SizeInRegs > State.FreeRegs) {
6785 State.FreeRegs = 0;
6786 return false;
6787 }
6788
6789 State.FreeRegs -= SizeInRegs;
6790
6791 return true;
6792 }
6793
getIndirectResult(QualType Ty,bool ByVal,CCState & State) const6794 ABIArgInfo LanaiABIInfo::getIndirectResult(QualType Ty, bool ByVal,
6795 CCState &State) const {
6796 if (!ByVal) {
6797 if (State.FreeRegs) {
6798 --State.FreeRegs; // Non-byval indirects just use one pointer.
6799 return getNaturalAlignIndirectInReg(Ty);
6800 }
6801 return getNaturalAlignIndirect(Ty, false);
6802 }
6803
6804 // Compute the byval alignment.
6805 const unsigned MinABIStackAlignInBytes = 4;
6806 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
6807 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true,
6808 /*Realign=*/TypeAlign >
6809 MinABIStackAlignInBytes);
6810 }
6811
classifyArgumentType(QualType Ty,CCState & State) const6812 ABIArgInfo LanaiABIInfo::classifyArgumentType(QualType Ty,
6813 CCState &State) const {
6814 // Check with the C++ ABI first.
6815 const RecordType *RT = Ty->getAs<RecordType>();
6816 if (RT) {
6817 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
6818 if (RAA == CGCXXABI::RAA_Indirect) {
6819 return getIndirectResult(Ty, /*ByVal=*/false, State);
6820 } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
6821 return getNaturalAlignIndirect(Ty, /*ByRef=*/true);
6822 }
6823 }
6824
6825 if (isAggregateTypeForABI(Ty)) {
6826 // Structures with flexible arrays are always indirect.
6827 if (RT && RT->getDecl()->hasFlexibleArrayMember())
6828 return getIndirectResult(Ty, /*ByVal=*/true, State);
6829
6830 // Ignore empty structs/unions.
6831 if (isEmptyRecord(getContext(), Ty, true))
6832 return ABIArgInfo::getIgnore();
6833
6834 llvm::LLVMContext &LLVMContext = getVMContext();
6835 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
6836 if (SizeInRegs <= State.FreeRegs) {
6837 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
6838 SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32);
6839 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
6840 State.FreeRegs -= SizeInRegs;
6841 return ABIArgInfo::getDirectInReg(Result);
6842 } else {
6843 State.FreeRegs = 0;
6844 }
6845 return getIndirectResult(Ty, true, State);
6846 }
6847
6848 // Treat an enum type as its underlying type.
6849 if (const auto *EnumTy = Ty->getAs<EnumType>())
6850 Ty = EnumTy->getDecl()->getIntegerType();
6851
6852 bool InReg = shouldUseInReg(Ty, State);
6853 if (Ty->isPromotableIntegerType()) {
6854 if (InReg)
6855 return ABIArgInfo::getDirectInReg();
6856 return ABIArgInfo::getExtend();
6857 }
6858 if (InReg)
6859 return ABIArgInfo::getDirectInReg();
6860 return ABIArgInfo::getDirect();
6861 }
6862
6863 namespace {
6864 class LanaiTargetCodeGenInfo : public TargetCodeGenInfo {
6865 public:
LanaiTargetCodeGenInfo(CodeGen::CodeGenTypes & CGT)6866 LanaiTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
6867 : TargetCodeGenInfo(new LanaiABIInfo(CGT)) {}
6868 };
6869 }
6870
6871 //===----------------------------------------------------------------------===//
6872 // AMDGPU ABI Implementation
6873 //===----------------------------------------------------------------------===//
6874
6875 namespace {
6876
6877 class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo {
6878 public:
AMDGPUTargetCodeGenInfo(CodeGenTypes & CGT)6879 AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT)
6880 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
6881 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6882 CodeGen::CodeGenModule &M) const override;
6883 unsigned getOpenCLKernelCallingConv() const override;
6884 };
6885
6886 }
6887
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & M) const6888 void AMDGPUTargetCodeGenInfo::setTargetAttributes(
6889 const Decl *D,
6890 llvm::GlobalValue *GV,
6891 CodeGen::CodeGenModule &M) const {
6892 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6893 if (!FD)
6894 return;
6895
6896 if (const auto Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) {
6897 llvm::Function *F = cast<llvm::Function>(GV);
6898 uint32_t NumVGPR = Attr->getNumVGPR();
6899 if (NumVGPR != 0)
6900 F->addFnAttr("amdgpu_num_vgpr", llvm::utostr(NumVGPR));
6901 }
6902
6903 if (const auto Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) {
6904 llvm::Function *F = cast<llvm::Function>(GV);
6905 unsigned NumSGPR = Attr->getNumSGPR();
6906 if (NumSGPR != 0)
6907 F->addFnAttr("amdgpu_num_sgpr", llvm::utostr(NumSGPR));
6908 }
6909 }
6910
6911
getOpenCLKernelCallingConv() const6912 unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
6913 return llvm::CallingConv::AMDGPU_KERNEL;
6914 }
6915
6916 //===----------------------------------------------------------------------===//
6917 // SPARC v8 ABI Implementation.
6918 // Based on the SPARC Compliance Definition version 2.4.1.
6919 //
6920 // Ensures that complex values are passed in registers.
6921 //
6922 namespace {
6923 class SparcV8ABIInfo : public DefaultABIInfo {
6924 public:
SparcV8ABIInfo(CodeGenTypes & CGT)6925 SparcV8ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
6926
6927 private:
6928 ABIArgInfo classifyReturnType(QualType RetTy) const;
6929 void computeInfo(CGFunctionInfo &FI) const override;
6930 };
6931 } // end anonymous namespace
6932
6933
6934 ABIArgInfo
classifyReturnType(QualType Ty) const6935 SparcV8ABIInfo::classifyReturnType(QualType Ty) const {
6936 if (Ty->isAnyComplexType()) {
6937 return ABIArgInfo::getDirect();
6938 }
6939 else {
6940 return DefaultABIInfo::classifyReturnType(Ty);
6941 }
6942 }
6943
computeInfo(CGFunctionInfo & FI) const6944 void SparcV8ABIInfo::computeInfo(CGFunctionInfo &FI) const {
6945
6946 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
6947 for (auto &Arg : FI.arguments())
6948 Arg.info = classifyArgumentType(Arg.type);
6949 }
6950
6951 namespace {
6952 class SparcV8TargetCodeGenInfo : public TargetCodeGenInfo {
6953 public:
SparcV8TargetCodeGenInfo(CodeGenTypes & CGT)6954 SparcV8TargetCodeGenInfo(CodeGenTypes &CGT)
6955 : TargetCodeGenInfo(new SparcV8ABIInfo(CGT)) {}
6956 };
6957 } // end anonymous namespace
6958
6959 //===----------------------------------------------------------------------===//
6960 // SPARC v9 ABI Implementation.
6961 // Based on the SPARC Compliance Definition version 2.4.1.
6962 //
6963 // Function arguments a mapped to a nominal "parameter array" and promoted to
6964 // registers depending on their type. Each argument occupies 8 or 16 bytes in
6965 // the array, structs larger than 16 bytes are passed indirectly.
6966 //
6967 // One case requires special care:
6968 //
6969 // struct mixed {
6970 // int i;
6971 // float f;
6972 // };
6973 //
6974 // When a struct mixed is passed by value, it only occupies 8 bytes in the
6975 // parameter array, but the int is passed in an integer register, and the float
6976 // is passed in a floating point register. This is represented as two arguments
6977 // with the LLVM IR inreg attribute:
6978 //
6979 // declare void f(i32 inreg %i, float inreg %f)
6980 //
6981 // The code generator will only allocate 4 bytes from the parameter array for
6982 // the inreg arguments. All other arguments are allocated a multiple of 8
6983 // bytes.
6984 //
6985 namespace {
6986 class SparcV9ABIInfo : public ABIInfo {
6987 public:
SparcV9ABIInfo(CodeGenTypes & CGT)6988 SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
6989
6990 private:
6991 ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const;
6992 void computeInfo(CGFunctionInfo &FI) const override;
6993 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6994 QualType Ty) const override;
6995
6996 // Coercion type builder for structs passed in registers. The coercion type
6997 // serves two purposes:
6998 //
6999 // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned'
7000 // in registers.
7001 // 2. Expose aligned floating point elements as first-level elements, so the
7002 // code generator knows to pass them in floating point registers.
7003 //
7004 // We also compute the InReg flag which indicates that the struct contains
7005 // aligned 32-bit floats.
7006 //
7007 struct CoerceBuilder {
7008 llvm::LLVMContext &Context;
7009 const llvm::DataLayout &DL;
7010 SmallVector<llvm::Type*, 8> Elems;
7011 uint64_t Size;
7012 bool InReg;
7013
CoerceBuilder__anonbc557ed21211::SparcV9ABIInfo::CoerceBuilder7014 CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl)
7015 : Context(c), DL(dl), Size(0), InReg(false) {}
7016
7017 // Pad Elems with integers until Size is ToSize.
pad__anonbc557ed21211::SparcV9ABIInfo::CoerceBuilder7018 void pad(uint64_t ToSize) {
7019 assert(ToSize >= Size && "Cannot remove elements");
7020 if (ToSize == Size)
7021 return;
7022
7023 // Finish the current 64-bit word.
7024 uint64_t Aligned = llvm::alignTo(Size, 64);
7025 if (Aligned > Size && Aligned <= ToSize) {
7026 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
7027 Size = Aligned;
7028 }
7029
7030 // Add whole 64-bit words.
7031 while (Size + 64 <= ToSize) {
7032 Elems.push_back(llvm::Type::getInt64Ty(Context));
7033 Size += 64;
7034 }
7035
7036 // Final in-word padding.
7037 if (Size < ToSize) {
7038 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
7039 Size = ToSize;
7040 }
7041 }
7042
7043 // Add a floating point element at Offset.
addFloat__anonbc557ed21211::SparcV9ABIInfo::CoerceBuilder7044 void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) {
7045 // Unaligned floats are treated as integers.
7046 if (Offset % Bits)
7047 return;
7048 // The InReg flag is only required if there are any floats < 64 bits.
7049 if (Bits < 64)
7050 InReg = true;
7051 pad(Offset);
7052 Elems.push_back(Ty);
7053 Size = Offset + Bits;
7054 }
7055
7056 // Add a struct type to the coercion type, starting at Offset (in bits).
addStruct__anonbc557ed21211::SparcV9ABIInfo::CoerceBuilder7057 void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
7058 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
7059 for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
7060 llvm::Type *ElemTy = StrTy->getElementType(i);
7061 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
7062 switch (ElemTy->getTypeID()) {
7063 case llvm::Type::StructTyID:
7064 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
7065 break;
7066 case llvm::Type::FloatTyID:
7067 addFloat(ElemOffset, ElemTy, 32);
7068 break;
7069 case llvm::Type::DoubleTyID:
7070 addFloat(ElemOffset, ElemTy, 64);
7071 break;
7072 case llvm::Type::FP128TyID:
7073 addFloat(ElemOffset, ElemTy, 128);
7074 break;
7075 case llvm::Type::PointerTyID:
7076 if (ElemOffset % 64 == 0) {
7077 pad(ElemOffset);
7078 Elems.push_back(ElemTy);
7079 Size += 64;
7080 }
7081 break;
7082 default:
7083 break;
7084 }
7085 }
7086 }
7087
7088 // Check if Ty is a usable substitute for the coercion type.
isUsableType__anonbc557ed21211::SparcV9ABIInfo::CoerceBuilder7089 bool isUsableType(llvm::StructType *Ty) const {
7090 return llvm::makeArrayRef(Elems) == Ty->elements();
7091 }
7092
7093 // Get the coercion type as a literal struct type.
getType__anonbc557ed21211::SparcV9ABIInfo::CoerceBuilder7094 llvm::Type *getType() const {
7095 if (Elems.size() == 1)
7096 return Elems.front();
7097 else
7098 return llvm::StructType::get(Context, Elems);
7099 }
7100 };
7101 };
7102 } // end anonymous namespace
7103
7104 ABIArgInfo
classifyType(QualType Ty,unsigned SizeLimit) const7105 SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
7106 if (Ty->isVoidType())
7107 return ABIArgInfo::getIgnore();
7108
7109 uint64_t Size = getContext().getTypeSize(Ty);
7110
7111 // Anything too big to fit in registers is passed with an explicit indirect
7112 // pointer / sret pointer.
7113 if (Size > SizeLimit)
7114 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
7115
7116 // Treat an enum type as its underlying type.
7117 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
7118 Ty = EnumTy->getDecl()->getIntegerType();
7119
7120 // Integer types smaller than a register are extended.
7121 if (Size < 64 && Ty->isIntegerType())
7122 return ABIArgInfo::getExtend();
7123
7124 // Other non-aggregates go in registers.
7125 if (!isAggregateTypeForABI(Ty))
7126 return ABIArgInfo::getDirect();
7127
7128 // If a C++ object has either a non-trivial copy constructor or a non-trivial
7129 // destructor, it is passed with an explicit indirect pointer / sret pointer.
7130 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
7131 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
7132
7133 // This is a small aggregate type that should be passed in registers.
7134 // Build a coercion type from the LLVM struct type.
7135 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
7136 if (!StrTy)
7137 return ABIArgInfo::getDirect();
7138
7139 CoerceBuilder CB(getVMContext(), getDataLayout());
7140 CB.addStruct(0, StrTy);
7141 CB.pad(llvm::alignTo(CB.DL.getTypeSizeInBits(StrTy), 64));
7142
7143 // Try to use the original type for coercion.
7144 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
7145
7146 if (CB.InReg)
7147 return ABIArgInfo::getDirectInReg(CoerceTy);
7148 else
7149 return ABIArgInfo::getDirect(CoerceTy);
7150 }
7151
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const7152 Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7153 QualType Ty) const {
7154 ABIArgInfo AI = classifyType(Ty, 16 * 8);
7155 llvm::Type *ArgTy = CGT.ConvertType(Ty);
7156 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
7157 AI.setCoerceToType(ArgTy);
7158
7159 CharUnits SlotSize = CharUnits::fromQuantity(8);
7160
7161 CGBuilderTy &Builder = CGF.Builder;
7162 Address Addr(Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize);
7163 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
7164
7165 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
7166
7167 Address ArgAddr = Address::invalid();
7168 CharUnits Stride;
7169 switch (AI.getKind()) {
7170 case ABIArgInfo::Expand:
7171 case ABIArgInfo::CoerceAndExpand:
7172 case ABIArgInfo::InAlloca:
7173 llvm_unreachable("Unsupported ABI kind for va_arg");
7174
7175 case ABIArgInfo::Extend: {
7176 Stride = SlotSize;
7177 CharUnits Offset = SlotSize - TypeInfo.first;
7178 ArgAddr = Builder.CreateConstInBoundsByteGEP(Addr, Offset, "extend");
7179 break;
7180 }
7181
7182 case ABIArgInfo::Direct: {
7183 auto AllocSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
7184 Stride = CharUnits::fromQuantity(AllocSize).alignTo(SlotSize);
7185 ArgAddr = Addr;
7186 break;
7187 }
7188
7189 case ABIArgInfo::Indirect:
7190 Stride = SlotSize;
7191 ArgAddr = Builder.CreateElementBitCast(Addr, ArgPtrTy, "indirect");
7192 ArgAddr = Address(Builder.CreateLoad(ArgAddr, "indirect.arg"),
7193 TypeInfo.second);
7194 break;
7195
7196 case ABIArgInfo::Ignore:
7197 return Address(llvm::UndefValue::get(ArgPtrTy), TypeInfo.second);
7198 }
7199
7200 // Update VAList.
7201 llvm::Value *NextPtr =
7202 Builder.CreateConstInBoundsByteGEP(Addr.getPointer(), Stride, "ap.next");
7203 Builder.CreateStore(NextPtr, VAListAddr);
7204
7205 return Builder.CreateBitCast(ArgAddr, ArgPtrTy, "arg.addr");
7206 }
7207
computeInfo(CGFunctionInfo & FI) const7208 void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const {
7209 FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8);
7210 for (auto &I : FI.arguments())
7211 I.info = classifyType(I.type, 16 * 8);
7212 }
7213
7214 namespace {
7215 class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo {
7216 public:
SparcV9TargetCodeGenInfo(CodeGenTypes & CGT)7217 SparcV9TargetCodeGenInfo(CodeGenTypes &CGT)
7218 : TargetCodeGenInfo(new SparcV9ABIInfo(CGT)) {}
7219
getDwarfEHStackPointer(CodeGen::CodeGenModule & M) const7220 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
7221 return 14;
7222 }
7223
7224 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
7225 llvm::Value *Address) const override;
7226 };
7227 } // end anonymous namespace
7228
7229 bool
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address) const7230 SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
7231 llvm::Value *Address) const {
7232 // This is calculated from the LLVM and GCC tables and verified
7233 // against gcc output. AFAIK all ABIs use the same encoding.
7234
7235 CodeGen::CGBuilderTy &Builder = CGF.Builder;
7236
7237 llvm::IntegerType *i8 = CGF.Int8Ty;
7238 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
7239 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
7240
7241 // 0-31: the 8-byte general-purpose registers
7242 AssignToArrayRange(Builder, Address, Eight8, 0, 31);
7243
7244 // 32-63: f0-31, the 4-byte floating-point registers
7245 AssignToArrayRange(Builder, Address, Four8, 32, 63);
7246
7247 // Y = 64
7248 // PSR = 65
7249 // WIM = 66
7250 // TBR = 67
7251 // PC = 68
7252 // NPC = 69
7253 // FSR = 70
7254 // CSR = 71
7255 AssignToArrayRange(Builder, Address, Eight8, 64, 71);
7256
7257 // 72-87: d0-15, the 8-byte floating-point registers
7258 AssignToArrayRange(Builder, Address, Eight8, 72, 87);
7259
7260 return false;
7261 }
7262
7263
7264 //===----------------------------------------------------------------------===//
7265 // XCore ABI Implementation
7266 //===----------------------------------------------------------------------===//
7267
7268 namespace {
7269
7270 /// A SmallStringEnc instance is used to build up the TypeString by passing
7271 /// it by reference between functions that append to it.
7272 typedef llvm::SmallString<128> SmallStringEnc;
7273
7274 /// TypeStringCache caches the meta encodings of Types.
7275 ///
7276 /// The reason for caching TypeStrings is two fold:
7277 /// 1. To cache a type's encoding for later uses;
7278 /// 2. As a means to break recursive member type inclusion.
7279 ///
7280 /// A cache Entry can have a Status of:
7281 /// NonRecursive: The type encoding is not recursive;
7282 /// Recursive: The type encoding is recursive;
7283 /// Incomplete: An incomplete TypeString;
7284 /// IncompleteUsed: An incomplete TypeString that has been used in a
7285 /// Recursive type encoding.
7286 ///
7287 /// A NonRecursive entry will have all of its sub-members expanded as fully
7288 /// as possible. Whilst it may contain types which are recursive, the type
7289 /// itself is not recursive and thus its encoding may be safely used whenever
7290 /// the type is encountered.
7291 ///
7292 /// A Recursive entry will have all of its sub-members expanded as fully as
7293 /// possible. The type itself is recursive and it may contain other types which
7294 /// are recursive. The Recursive encoding must not be used during the expansion
7295 /// of a recursive type's recursive branch. For simplicity the code uses
7296 /// IncompleteCount to reject all usage of Recursive encodings for member types.
7297 ///
7298 /// An Incomplete entry is always a RecordType and only encodes its
7299 /// identifier e.g. "s(S){}". Incomplete 'StubEnc' entries are ephemeral and
7300 /// are placed into the cache during type expansion as a means to identify and
7301 /// handle recursive inclusion of types as sub-members. If there is recursion
7302 /// the entry becomes IncompleteUsed.
7303 ///
7304 /// During the expansion of a RecordType's members:
7305 ///
7306 /// If the cache contains a NonRecursive encoding for the member type, the
7307 /// cached encoding is used;
7308 ///
7309 /// If the cache contains a Recursive encoding for the member type, the
7310 /// cached encoding is 'Swapped' out, as it may be incorrect, and...
7311 ///
7312 /// If the member is a RecordType, an Incomplete encoding is placed into the
7313 /// cache to break potential recursive inclusion of itself as a sub-member;
7314 ///
7315 /// Once a member RecordType has been expanded, its temporary incomplete
7316 /// entry is removed from the cache. If a Recursive encoding was swapped out
7317 /// it is swapped back in;
7318 ///
7319 /// If an incomplete entry is used to expand a sub-member, the incomplete
7320 /// entry is marked as IncompleteUsed. The cache keeps count of how many
7321 /// IncompleteUsed entries it currently contains in IncompleteUsedCount;
7322 ///
7323 /// If a member's encoding is found to be a NonRecursive or Recursive viz:
7324 /// IncompleteUsedCount==0, the member's encoding is added to the cache.
7325 /// Else the member is part of a recursive type and thus the recursion has
7326 /// been exited too soon for the encoding to be correct for the member.
7327 ///
7328 class TypeStringCache {
7329 enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed};
7330 struct Entry {
7331 std::string Str; // The encoded TypeString for the type.
7332 enum Status State; // Information about the encoding in 'Str'.
7333 std::string Swapped; // A temporary place holder for a Recursive encoding
7334 // during the expansion of RecordType's members.
7335 };
7336 std::map<const IdentifierInfo *, struct Entry> Map;
7337 unsigned IncompleteCount; // Number of Incomplete entries in the Map.
7338 unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map.
7339 public:
TypeStringCache()7340 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {}
7341 void addIncomplete(const IdentifierInfo *ID, std::string StubEnc);
7342 bool removeIncomplete(const IdentifierInfo *ID);
7343 void addIfComplete(const IdentifierInfo *ID, StringRef Str,
7344 bool IsRecursive);
7345 StringRef lookupStr(const IdentifierInfo *ID);
7346 };
7347
7348 /// TypeString encodings for enum & union fields must be order.
7349 /// FieldEncoding is a helper for this ordering process.
7350 class FieldEncoding {
7351 bool HasName;
7352 std::string Enc;
7353 public:
FieldEncoding(bool b,SmallStringEnc & e)7354 FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {}
str()7355 StringRef str() {return Enc.c_str();}
operator <(const FieldEncoding & rhs) const7356 bool operator<(const FieldEncoding &rhs) const {
7357 if (HasName != rhs.HasName) return HasName;
7358 return Enc < rhs.Enc;
7359 }
7360 };
7361
7362 class XCoreABIInfo : public DefaultABIInfo {
7363 public:
XCoreABIInfo(CodeGen::CodeGenTypes & CGT)7364 XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
7365 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7366 QualType Ty) const override;
7367 };
7368
7369 class XCoreTargetCodeGenInfo : public TargetCodeGenInfo {
7370 mutable TypeStringCache TSC;
7371 public:
XCoreTargetCodeGenInfo(CodeGenTypes & CGT)7372 XCoreTargetCodeGenInfo(CodeGenTypes &CGT)
7373 :TargetCodeGenInfo(new XCoreABIInfo(CGT)) {}
7374 void emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
7375 CodeGen::CodeGenModule &M) const override;
7376 };
7377
7378 } // End anonymous namespace.
7379
7380 // TODO: this implementation is likely now redundant with the default
7381 // EmitVAArg.
EmitVAArg(CodeGenFunction & CGF,Address VAListAddr,QualType Ty) const7382 Address XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7383 QualType Ty) const {
7384 CGBuilderTy &Builder = CGF.Builder;
7385
7386 // Get the VAList.
7387 CharUnits SlotSize = CharUnits::fromQuantity(4);
7388 Address AP(Builder.CreateLoad(VAListAddr), SlotSize);
7389
7390 // Handle the argument.
7391 ABIArgInfo AI = classifyArgumentType(Ty);
7392 CharUnits TypeAlign = getContext().getTypeAlignInChars(Ty);
7393 llvm::Type *ArgTy = CGT.ConvertType(Ty);
7394 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
7395 AI.setCoerceToType(ArgTy);
7396 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
7397
7398 Address Val = Address::invalid();
7399 CharUnits ArgSize = CharUnits::Zero();
7400 switch (AI.getKind()) {
7401 case ABIArgInfo::Expand:
7402 case ABIArgInfo::CoerceAndExpand:
7403 case ABIArgInfo::InAlloca:
7404 llvm_unreachable("Unsupported ABI kind for va_arg");
7405 case ABIArgInfo::Ignore:
7406 Val = Address(llvm::UndefValue::get(ArgPtrTy), TypeAlign);
7407 ArgSize = CharUnits::Zero();
7408 break;
7409 case ABIArgInfo::Extend:
7410 case ABIArgInfo::Direct:
7411 Val = Builder.CreateBitCast(AP, ArgPtrTy);
7412 ArgSize = CharUnits::fromQuantity(
7413 getDataLayout().getTypeAllocSize(AI.getCoerceToType()));
7414 ArgSize = ArgSize.alignTo(SlotSize);
7415 break;
7416 case ABIArgInfo::Indirect:
7417 Val = Builder.CreateElementBitCast(AP, ArgPtrTy);
7418 Val = Address(Builder.CreateLoad(Val), TypeAlign);
7419 ArgSize = SlotSize;
7420 break;
7421 }
7422
7423 // Increment the VAList.
7424 if (!ArgSize.isZero()) {
7425 llvm::Value *APN =
7426 Builder.CreateConstInBoundsByteGEP(AP.getPointer(), ArgSize);
7427 Builder.CreateStore(APN, VAListAddr);
7428 }
7429
7430 return Val;
7431 }
7432
7433 /// During the expansion of a RecordType, an incomplete TypeString is placed
7434 /// into the cache as a means to identify and break recursion.
7435 /// If there is a Recursive encoding in the cache, it is swapped out and will
7436 /// be reinserted by removeIncomplete().
7437 /// All other types of encoding should have been used rather than arriving here.
addIncomplete(const IdentifierInfo * ID,std::string StubEnc)7438 void TypeStringCache::addIncomplete(const IdentifierInfo *ID,
7439 std::string StubEnc) {
7440 if (!ID)
7441 return;
7442 Entry &E = Map[ID];
7443 assert( (E.Str.empty() || E.State == Recursive) &&
7444 "Incorrectly use of addIncomplete");
7445 assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()");
7446 E.Swapped.swap(E.Str); // swap out the Recursive
7447 E.Str.swap(StubEnc);
7448 E.State = Incomplete;
7449 ++IncompleteCount;
7450 }
7451
7452 /// Once the RecordType has been expanded, the temporary incomplete TypeString
7453 /// must be removed from the cache.
7454 /// If a Recursive was swapped out by addIncomplete(), it will be replaced.
7455 /// Returns true if the RecordType was defined recursively.
removeIncomplete(const IdentifierInfo * ID)7456 bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) {
7457 if (!ID)
7458 return false;
7459 auto I = Map.find(ID);
7460 assert(I != Map.end() && "Entry not present");
7461 Entry &E = I->second;
7462 assert( (E.State == Incomplete ||
7463 E.State == IncompleteUsed) &&
7464 "Entry must be an incomplete type");
7465 bool IsRecursive = false;
7466 if (E.State == IncompleteUsed) {
7467 // We made use of our Incomplete encoding, thus we are recursive.
7468 IsRecursive = true;
7469 --IncompleteUsedCount;
7470 }
7471 if (E.Swapped.empty())
7472 Map.erase(I);
7473 else {
7474 // Swap the Recursive back.
7475 E.Swapped.swap(E.Str);
7476 E.Swapped.clear();
7477 E.State = Recursive;
7478 }
7479 --IncompleteCount;
7480 return IsRecursive;
7481 }
7482
7483 /// Add the encoded TypeString to the cache only if it is NonRecursive or
7484 /// Recursive (viz: all sub-members were expanded as fully as possible).
addIfComplete(const IdentifierInfo * ID,StringRef Str,bool IsRecursive)7485 void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str,
7486 bool IsRecursive) {
7487 if (!ID || IncompleteUsedCount)
7488 return; // No key or it is is an incomplete sub-type so don't add.
7489 Entry &E = Map[ID];
7490 if (IsRecursive && !E.Str.empty()) {
7491 assert(E.State==Recursive && E.Str.size() == Str.size() &&
7492 "This is not the same Recursive entry");
7493 // The parent container was not recursive after all, so we could have used
7494 // this Recursive sub-member entry after all, but we assumed the worse when
7495 // we started viz: IncompleteCount!=0.
7496 return;
7497 }
7498 assert(E.Str.empty() && "Entry already present");
7499 E.Str = Str.str();
7500 E.State = IsRecursive? Recursive : NonRecursive;
7501 }
7502
7503 /// Return a cached TypeString encoding for the ID. If there isn't one, or we
7504 /// are recursively expanding a type (IncompleteCount != 0) and the cached
7505 /// encoding is Recursive, return an empty StringRef.
lookupStr(const IdentifierInfo * ID)7506 StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) {
7507 if (!ID)
7508 return StringRef(); // We have no key.
7509 auto I = Map.find(ID);
7510 if (I == Map.end())
7511 return StringRef(); // We have no encoding.
7512 Entry &E = I->second;
7513 if (E.State == Recursive && IncompleteCount)
7514 return StringRef(); // We don't use Recursive encodings for member types.
7515
7516 if (E.State == Incomplete) {
7517 // The incomplete type is being used to break out of recursion.
7518 E.State = IncompleteUsed;
7519 ++IncompleteUsedCount;
7520 }
7521 return E.Str.c_str();
7522 }
7523
7524 /// The XCore ABI includes a type information section that communicates symbol
7525 /// type information to the linker. The linker uses this information to verify
7526 /// safety/correctness of things such as array bound and pointers et al.
7527 /// The ABI only requires C (and XC) language modules to emit TypeStrings.
7528 /// This type information (TypeString) is emitted into meta data for all global
7529 /// symbols: definitions, declarations, functions & variables.
7530 ///
7531 /// The TypeString carries type, qualifier, name, size & value details.
7532 /// Please see 'Tools Development Guide' section 2.16.2 for format details:
7533 /// https://www.xmos.com/download/public/Tools-Development-Guide%28X9114A%29.pdf
7534 /// The output is tested by test/CodeGen/xcore-stringtype.c.
7535 ///
7536 static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
7537 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC);
7538
7539 /// XCore uses emitTargetMD to emit TypeString metadata for global symbols.
emitTargetMD(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const7540 void XCoreTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
7541 CodeGen::CodeGenModule &CGM) const {
7542 SmallStringEnc Enc;
7543 if (getTypeString(Enc, D, CGM, TSC)) {
7544 llvm::LLVMContext &Ctx = CGM.getModule().getContext();
7545 llvm::Metadata *MDVals[] = {llvm::ConstantAsMetadata::get(GV),
7546 llvm::MDString::get(Ctx, Enc.str())};
7547 llvm::NamedMDNode *MD =
7548 CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings");
7549 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
7550 }
7551 }
7552
7553 //===----------------------------------------------------------------------===//
7554 // SPIR ABI Implementation
7555 //===----------------------------------------------------------------------===//
7556
7557 namespace {
7558 class SPIRTargetCodeGenInfo : public TargetCodeGenInfo {
7559 public:
SPIRTargetCodeGenInfo(CodeGen::CodeGenTypes & CGT)7560 SPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
7561 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
7562 void emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
7563 CodeGen::CodeGenModule &M) const override;
7564 unsigned getOpenCLKernelCallingConv() const override;
7565 };
7566 } // End anonymous namespace.
7567
7568 /// Emit SPIR specific metadata: OpenCL and SPIR version.
emitTargetMD(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & CGM) const7569 void SPIRTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
7570 CodeGen::CodeGenModule &CGM) const {
7571 llvm::LLVMContext &Ctx = CGM.getModule().getContext();
7572 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(Ctx);
7573 llvm::Module &M = CGM.getModule();
7574 // SPIR v2.0 s2.12 - The SPIR version used by the module is stored in the
7575 // opencl.spir.version named metadata.
7576 llvm::Metadata *SPIRVerElts[] = {
7577 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(Int32Ty, 2)),
7578 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(Int32Ty, 0))};
7579 llvm::NamedMDNode *SPIRVerMD =
7580 M.getOrInsertNamedMetadata("opencl.spir.version");
7581 SPIRVerMD->addOperand(llvm::MDNode::get(Ctx, SPIRVerElts));
7582 // SPIR v2.0 s2.13 - The OpenCL version used by the module is stored in the
7583 // opencl.ocl.version named metadata node.
7584 llvm::Metadata *OCLVerElts[] = {
7585 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
7586 Int32Ty, CGM.getLangOpts().OpenCLVersion / 100)),
7587 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
7588 Int32Ty, (CGM.getLangOpts().OpenCLVersion % 100) / 10))};
7589 llvm::NamedMDNode *OCLVerMD =
7590 M.getOrInsertNamedMetadata("opencl.ocl.version");
7591 OCLVerMD->addOperand(llvm::MDNode::get(Ctx, OCLVerElts));
7592 }
7593
getOpenCLKernelCallingConv() const7594 unsigned SPIRTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
7595 return llvm::CallingConv::SPIR_KERNEL;
7596 }
7597
7598 static bool appendType(SmallStringEnc &Enc, QualType QType,
7599 const CodeGen::CodeGenModule &CGM,
7600 TypeStringCache &TSC);
7601
7602 /// Helper function for appendRecordType().
7603 /// Builds a SmallVector containing the encoded field types in declaration
7604 /// order.
extractFieldType(SmallVectorImpl<FieldEncoding> & FE,const RecordDecl * RD,const CodeGen::CodeGenModule & CGM,TypeStringCache & TSC)7605 static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE,
7606 const RecordDecl *RD,
7607 const CodeGen::CodeGenModule &CGM,
7608 TypeStringCache &TSC) {
7609 for (const auto *Field : RD->fields()) {
7610 SmallStringEnc Enc;
7611 Enc += "m(";
7612 Enc += Field->getName();
7613 Enc += "){";
7614 if (Field->isBitField()) {
7615 Enc += "b(";
7616 llvm::raw_svector_ostream OS(Enc);
7617 OS << Field->getBitWidthValue(CGM.getContext());
7618 Enc += ':';
7619 }
7620 if (!appendType(Enc, Field->getType(), CGM, TSC))
7621 return false;
7622 if (Field->isBitField())
7623 Enc += ')';
7624 Enc += '}';
7625 FE.emplace_back(!Field->getName().empty(), Enc);
7626 }
7627 return true;
7628 }
7629
7630 /// Appends structure and union types to Enc and adds encoding to cache.
7631 /// Recursively calls appendType (via extractFieldType) for each field.
7632 /// Union types have their fields ordered according to the ABI.
appendRecordType(SmallStringEnc & Enc,const RecordType * RT,const CodeGen::CodeGenModule & CGM,TypeStringCache & TSC,const IdentifierInfo * ID)7633 static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT,
7634 const CodeGen::CodeGenModule &CGM,
7635 TypeStringCache &TSC, const IdentifierInfo *ID) {
7636 // Append the cached TypeString if we have one.
7637 StringRef TypeString = TSC.lookupStr(ID);
7638 if (!TypeString.empty()) {
7639 Enc += TypeString;
7640 return true;
7641 }
7642
7643 // Start to emit an incomplete TypeString.
7644 size_t Start = Enc.size();
7645 Enc += (RT->isUnionType()? 'u' : 's');
7646 Enc += '(';
7647 if (ID)
7648 Enc += ID->getName();
7649 Enc += "){";
7650
7651 // We collect all encoded fields and order as necessary.
7652 bool IsRecursive = false;
7653 const RecordDecl *RD = RT->getDecl()->getDefinition();
7654 if (RD && !RD->field_empty()) {
7655 // An incomplete TypeString stub is placed in the cache for this RecordType
7656 // so that recursive calls to this RecordType will use it whilst building a
7657 // complete TypeString for this RecordType.
7658 SmallVector<FieldEncoding, 16> FE;
7659 std::string StubEnc(Enc.substr(Start).str());
7660 StubEnc += '}'; // StubEnc now holds a valid incomplete TypeString.
7661 TSC.addIncomplete(ID, std::move(StubEnc));
7662 if (!extractFieldType(FE, RD, CGM, TSC)) {
7663 (void) TSC.removeIncomplete(ID);
7664 return false;
7665 }
7666 IsRecursive = TSC.removeIncomplete(ID);
7667 // The ABI requires unions to be sorted but not structures.
7668 // See FieldEncoding::operator< for sort algorithm.
7669 if (RT->isUnionType())
7670 std::sort(FE.begin(), FE.end());
7671 // We can now complete the TypeString.
7672 unsigned E = FE.size();
7673 for (unsigned I = 0; I != E; ++I) {
7674 if (I)
7675 Enc += ',';
7676 Enc += FE[I].str();
7677 }
7678 }
7679 Enc += '}';
7680 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive);
7681 return true;
7682 }
7683
7684 /// Appends enum types to Enc and adds the encoding to the cache.
appendEnumType(SmallStringEnc & Enc,const EnumType * ET,TypeStringCache & TSC,const IdentifierInfo * ID)7685 static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET,
7686 TypeStringCache &TSC,
7687 const IdentifierInfo *ID) {
7688 // Append the cached TypeString if we have one.
7689 StringRef TypeString = TSC.lookupStr(ID);
7690 if (!TypeString.empty()) {
7691 Enc += TypeString;
7692 return true;
7693 }
7694
7695 size_t Start = Enc.size();
7696 Enc += "e(";
7697 if (ID)
7698 Enc += ID->getName();
7699 Enc += "){";
7700
7701 // We collect all encoded enumerations and order them alphanumerically.
7702 if (const EnumDecl *ED = ET->getDecl()->getDefinition()) {
7703 SmallVector<FieldEncoding, 16> FE;
7704 for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E;
7705 ++I) {
7706 SmallStringEnc EnumEnc;
7707 EnumEnc += "m(";
7708 EnumEnc += I->getName();
7709 EnumEnc += "){";
7710 I->getInitVal().toString(EnumEnc);
7711 EnumEnc += '}';
7712 FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc));
7713 }
7714 std::sort(FE.begin(), FE.end());
7715 unsigned E = FE.size();
7716 for (unsigned I = 0; I != E; ++I) {
7717 if (I)
7718 Enc += ',';
7719 Enc += FE[I].str();
7720 }
7721 }
7722 Enc += '}';
7723 TSC.addIfComplete(ID, Enc.substr(Start), false);
7724 return true;
7725 }
7726
7727 /// Appends type's qualifier to Enc.
7728 /// This is done prior to appending the type's encoding.
appendQualifier(SmallStringEnc & Enc,QualType QT)7729 static void appendQualifier(SmallStringEnc &Enc, QualType QT) {
7730 // Qualifiers are emitted in alphabetical order.
7731 static const char *const Table[]={"","c:","r:","cr:","v:","cv:","rv:","crv:"};
7732 int Lookup = 0;
7733 if (QT.isConstQualified())
7734 Lookup += 1<<0;
7735 if (QT.isRestrictQualified())
7736 Lookup += 1<<1;
7737 if (QT.isVolatileQualified())
7738 Lookup += 1<<2;
7739 Enc += Table[Lookup];
7740 }
7741
7742 /// Appends built-in types to Enc.
appendBuiltinType(SmallStringEnc & Enc,const BuiltinType * BT)7743 static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) {
7744 const char *EncType;
7745 switch (BT->getKind()) {
7746 case BuiltinType::Void:
7747 EncType = "0";
7748 break;
7749 case BuiltinType::Bool:
7750 EncType = "b";
7751 break;
7752 case BuiltinType::Char_U:
7753 EncType = "uc";
7754 break;
7755 case BuiltinType::UChar:
7756 EncType = "uc";
7757 break;
7758 case BuiltinType::SChar:
7759 EncType = "sc";
7760 break;
7761 case BuiltinType::UShort:
7762 EncType = "us";
7763 break;
7764 case BuiltinType::Short:
7765 EncType = "ss";
7766 break;
7767 case BuiltinType::UInt:
7768 EncType = "ui";
7769 break;
7770 case BuiltinType::Int:
7771 EncType = "si";
7772 break;
7773 case BuiltinType::ULong:
7774 EncType = "ul";
7775 break;
7776 case BuiltinType::Long:
7777 EncType = "sl";
7778 break;
7779 case BuiltinType::ULongLong:
7780 EncType = "ull";
7781 break;
7782 case BuiltinType::LongLong:
7783 EncType = "sll";
7784 break;
7785 case BuiltinType::Float:
7786 EncType = "ft";
7787 break;
7788 case BuiltinType::Double:
7789 EncType = "d";
7790 break;
7791 case BuiltinType::LongDouble:
7792 EncType = "ld";
7793 break;
7794 default:
7795 return false;
7796 }
7797 Enc += EncType;
7798 return true;
7799 }
7800
7801 /// Appends a pointer encoding to Enc before calling appendType for the pointee.
appendPointerType(SmallStringEnc & Enc,const PointerType * PT,const CodeGen::CodeGenModule & CGM,TypeStringCache & TSC)7802 static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT,
7803 const CodeGen::CodeGenModule &CGM,
7804 TypeStringCache &TSC) {
7805 Enc += "p(";
7806 if (!appendType(Enc, PT->getPointeeType(), CGM, TSC))
7807 return false;
7808 Enc += ')';
7809 return true;
7810 }
7811
7812 /// Appends array encoding to Enc before calling appendType for the element.
appendArrayType(SmallStringEnc & Enc,QualType QT,const ArrayType * AT,const CodeGen::CodeGenModule & CGM,TypeStringCache & TSC,StringRef NoSizeEnc)7813 static bool appendArrayType(SmallStringEnc &Enc, QualType QT,
7814 const ArrayType *AT,
7815 const CodeGen::CodeGenModule &CGM,
7816 TypeStringCache &TSC, StringRef NoSizeEnc) {
7817 if (AT->getSizeModifier() != ArrayType::Normal)
7818 return false;
7819 Enc += "a(";
7820 if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT))
7821 CAT->getSize().toStringUnsigned(Enc);
7822 else
7823 Enc += NoSizeEnc; // Global arrays use "*", otherwise it is "".
7824 Enc += ':';
7825 // The Qualifiers should be attached to the type rather than the array.
7826 appendQualifier(Enc, QT);
7827 if (!appendType(Enc, AT->getElementType(), CGM, TSC))
7828 return false;
7829 Enc += ')';
7830 return true;
7831 }
7832
7833 /// Appends a function encoding to Enc, calling appendType for the return type
7834 /// and the arguments.
appendFunctionType(SmallStringEnc & Enc,const FunctionType * FT,const CodeGen::CodeGenModule & CGM,TypeStringCache & TSC)7835 static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT,
7836 const CodeGen::CodeGenModule &CGM,
7837 TypeStringCache &TSC) {
7838 Enc += "f{";
7839 if (!appendType(Enc, FT->getReturnType(), CGM, TSC))
7840 return false;
7841 Enc += "}(";
7842 if (const FunctionProtoType *FPT = FT->getAs<FunctionProtoType>()) {
7843 // N.B. we are only interested in the adjusted param types.
7844 auto I = FPT->param_type_begin();
7845 auto E = FPT->param_type_end();
7846 if (I != E) {
7847 do {
7848 if (!appendType(Enc, *I, CGM, TSC))
7849 return false;
7850 ++I;
7851 if (I != E)
7852 Enc += ',';
7853 } while (I != E);
7854 if (FPT->isVariadic())
7855 Enc += ",va";
7856 } else {
7857 if (FPT->isVariadic())
7858 Enc += "va";
7859 else
7860 Enc += '0';
7861 }
7862 }
7863 Enc += ')';
7864 return true;
7865 }
7866
7867 /// Handles the type's qualifier before dispatching a call to handle specific
7868 /// type encodings.
appendType(SmallStringEnc & Enc,QualType QType,const CodeGen::CodeGenModule & CGM,TypeStringCache & TSC)7869 static bool appendType(SmallStringEnc &Enc, QualType QType,
7870 const CodeGen::CodeGenModule &CGM,
7871 TypeStringCache &TSC) {
7872
7873 QualType QT = QType.getCanonicalType();
7874
7875 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe())
7876 // The Qualifiers should be attached to the type rather than the array.
7877 // Thus we don't call appendQualifier() here.
7878 return appendArrayType(Enc, QT, AT, CGM, TSC, "");
7879
7880 appendQualifier(Enc, QT);
7881
7882 if (const BuiltinType *BT = QT->getAs<BuiltinType>())
7883 return appendBuiltinType(Enc, BT);
7884
7885 if (const PointerType *PT = QT->getAs<PointerType>())
7886 return appendPointerType(Enc, PT, CGM, TSC);
7887
7888 if (const EnumType *ET = QT->getAs<EnumType>())
7889 return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier());
7890
7891 if (const RecordType *RT = QT->getAsStructureType())
7892 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
7893
7894 if (const RecordType *RT = QT->getAsUnionType())
7895 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
7896
7897 if (const FunctionType *FT = QT->getAs<FunctionType>())
7898 return appendFunctionType(Enc, FT, CGM, TSC);
7899
7900 return false;
7901 }
7902
getTypeString(SmallStringEnc & Enc,const Decl * D,CodeGen::CodeGenModule & CGM,TypeStringCache & TSC)7903 static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
7904 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC) {
7905 if (!D)
7906 return false;
7907
7908 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
7909 if (FD->getLanguageLinkage() != CLanguageLinkage)
7910 return false;
7911 return appendType(Enc, FD->getType(), CGM, TSC);
7912 }
7913
7914 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
7915 if (VD->getLanguageLinkage() != CLanguageLinkage)
7916 return false;
7917 QualType QT = VD->getType().getCanonicalType();
7918 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) {
7919 // Global ArrayTypes are given a size of '*' if the size is unknown.
7920 // The Qualifiers should be attached to the type rather than the array.
7921 // Thus we don't call appendQualifier() here.
7922 return appendArrayType(Enc, QT, AT, CGM, TSC, "*");
7923 }
7924 return appendType(Enc, QT, CGM, TSC);
7925 }
7926 return false;
7927 }
7928
7929
7930 //===----------------------------------------------------------------------===//
7931 // Driver code
7932 //===----------------------------------------------------------------------===//
7933
getTriple() const7934 const llvm::Triple &CodeGenModule::getTriple() const {
7935 return getTarget().getTriple();
7936 }
7937
supportsCOMDAT() const7938 bool CodeGenModule::supportsCOMDAT() const {
7939 return getTriple().supportsCOMDAT();
7940 }
7941
getTargetCodeGenInfo()7942 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
7943 if (TheTargetCodeGenInfo)
7944 return *TheTargetCodeGenInfo;
7945
7946 // Helper to set the unique_ptr while still keeping the return value.
7947 auto SetCGInfo = [&](TargetCodeGenInfo *P) -> const TargetCodeGenInfo & {
7948 this->TheTargetCodeGenInfo.reset(P);
7949 return *P;
7950 };
7951
7952 const llvm::Triple &Triple = getTarget().getTriple();
7953 switch (Triple.getArch()) {
7954 default:
7955 return SetCGInfo(new DefaultTargetCodeGenInfo(Types));
7956
7957 case llvm::Triple::le32:
7958 return SetCGInfo(new PNaClTargetCodeGenInfo(Types));
7959 case llvm::Triple::mips:
7960 case llvm::Triple::mipsel:
7961 if (Triple.getOS() == llvm::Triple::NaCl)
7962 return SetCGInfo(new PNaClTargetCodeGenInfo(Types));
7963 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, true));
7964
7965 case llvm::Triple::mips64:
7966 case llvm::Triple::mips64el:
7967 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, false));
7968
7969 case llvm::Triple::aarch64:
7970 case llvm::Triple::aarch64_be: {
7971 AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS;
7972 if (getTarget().getABI() == "darwinpcs")
7973 Kind = AArch64ABIInfo::DarwinPCS;
7974
7975 return SetCGInfo(new AArch64TargetCodeGenInfo(Types, Kind));
7976 }
7977
7978 case llvm::Triple::wasm32:
7979 case llvm::Triple::wasm64:
7980 return SetCGInfo(new WebAssemblyTargetCodeGenInfo(Types));
7981
7982 case llvm::Triple::arm:
7983 case llvm::Triple::armeb:
7984 case llvm::Triple::thumb:
7985 case llvm::Triple::thumbeb: {
7986 if (Triple.getOS() == llvm::Triple::Win32) {
7987 return SetCGInfo(
7988 new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP));
7989 }
7990
7991 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
7992 StringRef ABIStr = getTarget().getABI();
7993 if (ABIStr == "apcs-gnu")
7994 Kind = ARMABIInfo::APCS;
7995 else if (ABIStr == "aapcs16")
7996 Kind = ARMABIInfo::AAPCS16_VFP;
7997 else if (CodeGenOpts.FloatABI == "hard" ||
7998 (CodeGenOpts.FloatABI != "soft" &&
7999 (Triple.getEnvironment() == llvm::Triple::GNUEABIHF ||
8000 Triple.getEnvironment() == llvm::Triple::MuslEABIHF ||
8001 Triple.getEnvironment() == llvm::Triple::EABIHF)))
8002 Kind = ARMABIInfo::AAPCS_VFP;
8003
8004 return SetCGInfo(new ARMTargetCodeGenInfo(Types, Kind));
8005 }
8006
8007 case llvm::Triple::ppc:
8008 return SetCGInfo(
8009 new PPC32TargetCodeGenInfo(Types, CodeGenOpts.FloatABI == "soft"));
8010 case llvm::Triple::ppc64:
8011 if (Triple.isOSBinFormatELF()) {
8012 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1;
8013 if (getTarget().getABI() == "elfv2")
8014 Kind = PPC64_SVR4_ABIInfo::ELFv2;
8015 bool HasQPX = getTarget().getABI() == "elfv1-qpx";
8016
8017 return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX));
8018 } else
8019 return SetCGInfo(new PPC64TargetCodeGenInfo(Types));
8020 case llvm::Triple::ppc64le: {
8021 assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!");
8022 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2;
8023 if (getTarget().getABI() == "elfv1" || getTarget().getABI() == "elfv1-qpx")
8024 Kind = PPC64_SVR4_ABIInfo::ELFv1;
8025 bool HasQPX = getTarget().getABI() == "elfv1-qpx";
8026
8027 return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX));
8028 }
8029
8030 case llvm::Triple::nvptx:
8031 case llvm::Triple::nvptx64:
8032 return SetCGInfo(new NVPTXTargetCodeGenInfo(Types));
8033
8034 case llvm::Triple::msp430:
8035 return SetCGInfo(new MSP430TargetCodeGenInfo(Types));
8036
8037 case llvm::Triple::systemz: {
8038 bool HasVector = getTarget().getABI() == "vector";
8039 return SetCGInfo(new SystemZTargetCodeGenInfo(Types, HasVector));
8040 }
8041
8042 case llvm::Triple::tce:
8043 return SetCGInfo(new TCETargetCodeGenInfo(Types));
8044
8045 case llvm::Triple::x86: {
8046 bool IsDarwinVectorABI = Triple.isOSDarwin();
8047 bool RetSmallStructInRegABI =
8048 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
8049 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing();
8050
8051 if (Triple.getOS() == llvm::Triple::Win32) {
8052 return SetCGInfo(new WinX86_32TargetCodeGenInfo(
8053 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
8054 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters));
8055 } else {
8056 return SetCGInfo(new X86_32TargetCodeGenInfo(
8057 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
8058 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters,
8059 CodeGenOpts.FloatABI == "soft"));
8060 }
8061 }
8062
8063 case llvm::Triple::x86_64: {
8064 StringRef ABI = getTarget().getABI();
8065 X86AVXABILevel AVXLevel =
8066 (ABI == "avx512"
8067 ? X86AVXABILevel::AVX512
8068 : ABI == "avx" ? X86AVXABILevel::AVX : X86AVXABILevel::None);
8069
8070 switch (Triple.getOS()) {
8071 case llvm::Triple::Win32:
8072 return SetCGInfo(new WinX86_64TargetCodeGenInfo(Types, AVXLevel));
8073 case llvm::Triple::PS4:
8074 return SetCGInfo(new PS4TargetCodeGenInfo(Types, AVXLevel));
8075 default:
8076 return SetCGInfo(new X86_64TargetCodeGenInfo(Types, AVXLevel));
8077 }
8078 }
8079 case llvm::Triple::hexagon:
8080 return SetCGInfo(new HexagonTargetCodeGenInfo(Types));
8081 case llvm::Triple::lanai:
8082 return SetCGInfo(new LanaiTargetCodeGenInfo(Types));
8083 case llvm::Triple::r600:
8084 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types));
8085 case llvm::Triple::amdgcn:
8086 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types));
8087 case llvm::Triple::sparc:
8088 return SetCGInfo(new SparcV8TargetCodeGenInfo(Types));
8089 case llvm::Triple::sparcv9:
8090 return SetCGInfo(new SparcV9TargetCodeGenInfo(Types));
8091 case llvm::Triple::xcore:
8092 return SetCGInfo(new XCoreTargetCodeGenInfo(Types));
8093 case llvm::Triple::spir:
8094 case llvm::Triple::spir64:
8095 return SetCGInfo(new SPIRTargetCodeGenInfo(Types));
8096 }
8097 }
8098