1 //===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This provides C++ code generation targeting the Itanium C++ ABI. The class
11 // in this file generates structures that follow the Itanium C++ ABI, which is
12 // documented at:
13 // http://www.codesourcery.com/public/cxx-abi/abi.html
14 // http://www.codesourcery.com/public/cxx-abi/abi-eh.html
15 //
16 // It also supports the closely-related ARM ABI, documented at:
17 // http://infocenter.arm.com/help/topic/com.arm.doc.ihi0041c/IHI0041C_cppabi.pdf
18 //
19 //===----------------------------------------------------------------------===//
20
21 #include "CGCXXABI.h"
22 #include "CGRecordLayout.h"
23 #include "CodeGenFunction.h"
24 #include "CodeGenModule.h"
25 #include <clang/AST/Mangle.h>
26 #include <clang/AST/Type.h>
27 #include <llvm/Intrinsics.h>
28 #include <llvm/Target/TargetData.h>
29 #include <llvm/Value.h>
30
31 using namespace clang;
32 using namespace CodeGen;
33
34 namespace {
35 class ItaniumCXXABI : public CodeGen::CGCXXABI {
36 private:
37 llvm::IntegerType *PtrDiffTy;
38 protected:
39 bool IsARM;
40
41 // It's a little silly for us to cache this.
getPtrDiffTy()42 llvm::IntegerType *getPtrDiffTy() {
43 if (!PtrDiffTy) {
44 QualType T = getContext().getPointerDiffType();
45 llvm::Type *Ty = CGM.getTypes().ConvertType(T);
46 PtrDiffTy = cast<llvm::IntegerType>(Ty);
47 }
48 return PtrDiffTy;
49 }
50
51 bool NeedsArrayCookie(const CXXNewExpr *expr);
52 bool NeedsArrayCookie(const CXXDeleteExpr *expr,
53 QualType elementType);
54
55 public:
ItaniumCXXABI(CodeGen::CodeGenModule & CGM,bool IsARM=false)56 ItaniumCXXABI(CodeGen::CodeGenModule &CGM, bool IsARM = false) :
57 CGCXXABI(CGM), PtrDiffTy(0), IsARM(IsARM) { }
58
59 bool isZeroInitializable(const MemberPointerType *MPT);
60
61 llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT);
62
63 llvm::Value *EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
64 llvm::Value *&This,
65 llvm::Value *MemFnPtr,
66 const MemberPointerType *MPT);
67
68 llvm::Value *EmitMemberDataPointerAddress(CodeGenFunction &CGF,
69 llvm::Value *Base,
70 llvm::Value *MemPtr,
71 const MemberPointerType *MPT);
72
73 llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
74 const CastExpr *E,
75 llvm::Value *Src);
76
77 llvm::Constant *EmitMemberPointerConversion(llvm::Constant *C,
78 const CastExpr *E);
79
80 llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT);
81
82 llvm::Constant *EmitMemberPointer(const CXXMethodDecl *MD);
83 llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
84 CharUnits offset);
85
86 llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
87 llvm::Value *L,
88 llvm::Value *R,
89 const MemberPointerType *MPT,
90 bool Inequality);
91
92 llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
93 llvm::Value *Addr,
94 const MemberPointerType *MPT);
95
96 void BuildConstructorSignature(const CXXConstructorDecl *Ctor,
97 CXXCtorType T,
98 CanQualType &ResTy,
99 llvm::SmallVectorImpl<CanQualType> &ArgTys);
100
101 void BuildDestructorSignature(const CXXDestructorDecl *Dtor,
102 CXXDtorType T,
103 CanQualType &ResTy,
104 llvm::SmallVectorImpl<CanQualType> &ArgTys);
105
106 void BuildInstanceFunctionParams(CodeGenFunction &CGF,
107 QualType &ResTy,
108 FunctionArgList &Params);
109
110 void EmitInstanceFunctionProlog(CodeGenFunction &CGF);
111
112 CharUnits GetArrayCookieSize(const CXXNewExpr *expr);
113 llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
114 llvm::Value *NewPtr,
115 llvm::Value *NumElements,
116 const CXXNewExpr *expr,
117 QualType ElementType);
118 void ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
119 const CXXDeleteExpr *expr,
120 QualType ElementType, llvm::Value *&NumElements,
121 llvm::Value *&AllocPtr, CharUnits &CookieSize);
122
123 void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
124 llvm::GlobalVariable *DeclPtr);
125 };
126
127 class ARMCXXABI : public ItaniumCXXABI {
128 public:
ARMCXXABI(CodeGen::CodeGenModule & CGM)129 ARMCXXABI(CodeGen::CodeGenModule &CGM) : ItaniumCXXABI(CGM, /*ARM*/ true) {}
130
131 void BuildConstructorSignature(const CXXConstructorDecl *Ctor,
132 CXXCtorType T,
133 CanQualType &ResTy,
134 llvm::SmallVectorImpl<CanQualType> &ArgTys);
135
136 void BuildDestructorSignature(const CXXDestructorDecl *Dtor,
137 CXXDtorType T,
138 CanQualType &ResTy,
139 llvm::SmallVectorImpl<CanQualType> &ArgTys);
140
141 void BuildInstanceFunctionParams(CodeGenFunction &CGF,
142 QualType &ResTy,
143 FunctionArgList &Params);
144
145 void EmitInstanceFunctionProlog(CodeGenFunction &CGF);
146
147 void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV, QualType ResTy);
148
149 CharUnits GetArrayCookieSize(const CXXNewExpr *expr);
150 llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
151 llvm::Value *NewPtr,
152 llvm::Value *NumElements,
153 const CXXNewExpr *expr,
154 QualType ElementType);
155 void ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
156 const CXXDeleteExpr *expr,
157 QualType ElementType, llvm::Value *&NumElements,
158 llvm::Value *&AllocPtr, CharUnits &CookieSize);
159
160 private:
161 /// \brief Returns true if the given instance method is one of the
162 /// kinds that the ARM ABI says returns 'this'.
HasThisReturn(GlobalDecl GD)163 static bool HasThisReturn(GlobalDecl GD) {
164 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
165 return ((isa<CXXDestructorDecl>(MD) && GD.getDtorType() != Dtor_Deleting) ||
166 (isa<CXXConstructorDecl>(MD)));
167 }
168 };
169 }
170
CreateItaniumCXXABI(CodeGenModule & CGM)171 CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
172 return new ItaniumCXXABI(CGM);
173 }
174
CreateARMCXXABI(CodeGenModule & CGM)175 CodeGen::CGCXXABI *CodeGen::CreateARMCXXABI(CodeGenModule &CGM) {
176 return new ARMCXXABI(CGM);
177 }
178
179 llvm::Type *
ConvertMemberPointerType(const MemberPointerType * MPT)180 ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
181 if (MPT->isMemberDataPointer())
182 return getPtrDiffTy();
183 return llvm::StructType::get(getPtrDiffTy(), getPtrDiffTy(), NULL);
184 }
185
186 /// In the Itanium and ARM ABIs, method pointers have the form:
187 /// struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
188 ///
189 /// In the Itanium ABI:
190 /// - method pointers are virtual if (memptr.ptr & 1) is nonzero
191 /// - the this-adjustment is (memptr.adj)
192 /// - the virtual offset is (memptr.ptr - 1)
193 ///
194 /// In the ARM ABI:
195 /// - method pointers are virtual if (memptr.adj & 1) is nonzero
196 /// - the this-adjustment is (memptr.adj >> 1)
197 /// - the virtual offset is (memptr.ptr)
198 /// ARM uses 'adj' for the virtual flag because Thumb functions
199 /// may be only single-byte aligned.
200 ///
201 /// If the member is virtual, the adjusted 'this' pointer points
202 /// to a vtable pointer from which the virtual offset is applied.
203 ///
204 /// If the member is non-virtual, memptr.ptr is the address of
205 /// the function to call.
206 llvm::Value *
EmitLoadOfMemberFunctionPointer(CodeGenFunction & CGF,llvm::Value * & This,llvm::Value * MemFnPtr,const MemberPointerType * MPT)207 ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
208 llvm::Value *&This,
209 llvm::Value *MemFnPtr,
210 const MemberPointerType *MPT) {
211 CGBuilderTy &Builder = CGF.Builder;
212
213 const FunctionProtoType *FPT =
214 MPT->getPointeeType()->getAs<FunctionProtoType>();
215 const CXXRecordDecl *RD =
216 cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
217
218 llvm::FunctionType *FTy =
219 CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(RD, FPT),
220 FPT->isVariadic());
221
222 llvm::IntegerType *ptrdiff = getPtrDiffTy();
223 llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(ptrdiff, 1);
224
225 llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
226 llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual");
227 llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end");
228
229 // Extract memptr.adj, which is in the second field.
230 llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj");
231
232 // Compute the true adjustment.
233 llvm::Value *Adj = RawAdj;
234 if (IsARM)
235 Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted");
236
237 // Apply the adjustment and cast back to the original struct type
238 // for consistency.
239 llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy());
240 Ptr = Builder.CreateInBoundsGEP(Ptr, Adj);
241 This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted");
242
243 // Load the function pointer.
244 llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr");
245
246 // If the LSB in the function pointer is 1, the function pointer points to
247 // a virtual function.
248 llvm::Value *IsVirtual;
249 if (IsARM)
250 IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1);
251 else
252 IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1);
253 IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual");
254 Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
255
256 // In the virtual path, the adjustment left 'This' pointing to the
257 // vtable of the correct base subobject. The "function pointer" is an
258 // offset within the vtable (+1 for the virtual flag on non-ARM).
259 CGF.EmitBlock(FnVirtual);
260
261 // Cast the adjusted this to a pointer to vtable pointer and load.
262 llvm::Type *VTableTy = Builder.getInt8PtrTy();
263 llvm::Value *VTable = Builder.CreateBitCast(This, VTableTy->getPointerTo());
264 VTable = Builder.CreateLoad(VTable, "memptr.vtable");
265
266 // Apply the offset.
267 llvm::Value *VTableOffset = FnAsInt;
268 if (!IsARM) VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1);
269 VTable = Builder.CreateGEP(VTable, VTableOffset);
270
271 // Load the virtual function to call.
272 VTable = Builder.CreateBitCast(VTable, FTy->getPointerTo()->getPointerTo());
273 llvm::Value *VirtualFn = Builder.CreateLoad(VTable, "memptr.virtualfn");
274 CGF.EmitBranch(FnEnd);
275
276 // In the non-virtual path, the function pointer is actually a
277 // function pointer.
278 CGF.EmitBlock(FnNonVirtual);
279 llvm::Value *NonVirtualFn =
280 Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn");
281
282 // We're done.
283 CGF.EmitBlock(FnEnd);
284 llvm::PHINode *Callee = Builder.CreatePHI(FTy->getPointerTo(), 2);
285 Callee->addIncoming(VirtualFn, FnVirtual);
286 Callee->addIncoming(NonVirtualFn, FnNonVirtual);
287 return Callee;
288 }
289
290 /// Compute an l-value by applying the given pointer-to-member to a
291 /// base object.
EmitMemberDataPointerAddress(CodeGenFunction & CGF,llvm::Value * Base,llvm::Value * MemPtr,const MemberPointerType * MPT)292 llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(CodeGenFunction &CGF,
293 llvm::Value *Base,
294 llvm::Value *MemPtr,
295 const MemberPointerType *MPT) {
296 assert(MemPtr->getType() == getPtrDiffTy());
297
298 CGBuilderTy &Builder = CGF.Builder;
299
300 unsigned AS = cast<llvm::PointerType>(Base->getType())->getAddressSpace();
301
302 // Cast to char*.
303 Base = Builder.CreateBitCast(Base, Builder.getInt8Ty()->getPointerTo(AS));
304
305 // Apply the offset, which we assume is non-null.
306 llvm::Value *Addr = Builder.CreateInBoundsGEP(Base, MemPtr, "memptr.offset");
307
308 // Cast the address to the appropriate pointer type, adopting the
309 // address space of the base pointer.
310 llvm::Type *PType
311 = CGF.ConvertTypeForMem(MPT->getPointeeType())->getPointerTo(AS);
312 return Builder.CreateBitCast(Addr, PType);
313 }
314
315 /// Perform a derived-to-base or base-to-derived member pointer conversion.
316 ///
317 /// Obligatory offset/adjustment diagram:
318 /// <-- offset --> <-- adjustment -->
319 /// |--------------------------|----------------------|--------------------|
320 /// ^Derived address point ^Base address point ^Member address point
321 ///
322 /// So when converting a base member pointer to a derived member pointer,
323 /// we add the offset to the adjustment because the address point has
324 /// decreased; and conversely, when converting a derived MP to a base MP
325 /// we subtract the offset from the adjustment because the address point
326 /// has increased.
327 ///
328 /// The standard forbids (at compile time) conversion to and from
329 /// virtual bases, which is why we don't have to consider them here.
330 ///
331 /// The standard forbids (at run time) casting a derived MP to a base
332 /// MP when the derived MP does not point to a member of the base.
333 /// This is why -1 is a reasonable choice for null data member
334 /// pointers.
335 llvm::Value *
EmitMemberPointerConversion(CodeGenFunction & CGF,const CastExpr * E,llvm::Value * Src)336 ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
337 const CastExpr *E,
338 llvm::Value *Src) {
339 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
340 E->getCastKind() == CK_BaseToDerivedMemberPointer);
341
342 if (isa<llvm::Constant>(Src))
343 return EmitMemberPointerConversion(cast<llvm::Constant>(Src), E);
344
345 CGBuilderTy &Builder = CGF.Builder;
346
347 const MemberPointerType *SrcTy =
348 E->getSubExpr()->getType()->getAs<MemberPointerType>();
349 const MemberPointerType *DestTy = E->getType()->getAs<MemberPointerType>();
350
351 const CXXRecordDecl *SrcDecl = SrcTy->getClass()->getAsCXXRecordDecl();
352 const CXXRecordDecl *DestDecl = DestTy->getClass()->getAsCXXRecordDecl();
353
354 bool DerivedToBase =
355 E->getCastKind() == CK_DerivedToBaseMemberPointer;
356
357 const CXXRecordDecl *DerivedDecl;
358 if (DerivedToBase)
359 DerivedDecl = SrcDecl;
360 else
361 DerivedDecl = DestDecl;
362
363 llvm::Constant *Adj =
364 CGF.CGM.GetNonVirtualBaseClassOffset(DerivedDecl,
365 E->path_begin(),
366 E->path_end());
367 if (!Adj) return Src;
368
369 // For member data pointers, this is just a matter of adding the
370 // offset if the source is non-null.
371 if (SrcTy->isMemberDataPointer()) {
372 llvm::Value *Dst;
373 if (DerivedToBase)
374 Dst = Builder.CreateNSWSub(Src, Adj, "adj");
375 else
376 Dst = Builder.CreateNSWAdd(Src, Adj, "adj");
377
378 // Null check.
379 llvm::Value *Null = llvm::Constant::getAllOnesValue(Src->getType());
380 llvm::Value *IsNull = Builder.CreateICmpEQ(Src, Null, "memptr.isnull");
381 return Builder.CreateSelect(IsNull, Src, Dst);
382 }
383
384 // The this-adjustment is left-shifted by 1 on ARM.
385 if (IsARM) {
386 uint64_t Offset = cast<llvm::ConstantInt>(Adj)->getZExtValue();
387 Offset <<= 1;
388 Adj = llvm::ConstantInt::get(Adj->getType(), Offset);
389 }
390
391 llvm::Value *SrcAdj = Builder.CreateExtractValue(Src, 1, "src.adj");
392 llvm::Value *DstAdj;
393 if (DerivedToBase)
394 DstAdj = Builder.CreateNSWSub(SrcAdj, Adj, "adj");
395 else
396 DstAdj = Builder.CreateNSWAdd(SrcAdj, Adj, "adj");
397
398 return Builder.CreateInsertValue(Src, DstAdj, 1);
399 }
400
401 llvm::Constant *
EmitMemberPointerConversion(llvm::Constant * C,const CastExpr * E)402 ItaniumCXXABI::EmitMemberPointerConversion(llvm::Constant *C,
403 const CastExpr *E) {
404 const MemberPointerType *SrcTy =
405 E->getSubExpr()->getType()->getAs<MemberPointerType>();
406 const MemberPointerType *DestTy =
407 E->getType()->getAs<MemberPointerType>();
408
409 bool DerivedToBase =
410 E->getCastKind() == CK_DerivedToBaseMemberPointer;
411
412 const CXXRecordDecl *DerivedDecl;
413 if (DerivedToBase)
414 DerivedDecl = SrcTy->getClass()->getAsCXXRecordDecl();
415 else
416 DerivedDecl = DestTy->getClass()->getAsCXXRecordDecl();
417
418 // Calculate the offset to the base class.
419 llvm::Constant *Offset =
420 CGM.GetNonVirtualBaseClassOffset(DerivedDecl,
421 E->path_begin(),
422 E->path_end());
423 // If there's no offset, we're done.
424 if (!Offset) return C;
425
426 // If the source is a member data pointer, we have to do a null
427 // check and then add the offset. In the common case, we can fold
428 // away the offset.
429 if (SrcTy->isMemberDataPointer()) {
430 assert(C->getType() == getPtrDiffTy());
431
432 // If it's a constant int, just create a new constant int.
433 if (llvm::ConstantInt *CI = dyn_cast<llvm::ConstantInt>(C)) {
434 int64_t Src = CI->getSExtValue();
435
436 // Null converts to null.
437 if (Src == -1) return CI;
438
439 // Otherwise, just add the offset.
440 int64_t OffsetV = cast<llvm::ConstantInt>(Offset)->getSExtValue();
441 int64_t Dst = (DerivedToBase ? Src - OffsetV : Src + OffsetV);
442 return llvm::ConstantInt::get(CI->getType(), Dst, /*signed*/ true);
443 }
444
445 // Otherwise, we have to form a constant select expression.
446 llvm::Constant *Null = llvm::Constant::getAllOnesValue(C->getType());
447
448 llvm::Constant *IsNull =
449 llvm::ConstantExpr::getICmp(llvm::ICmpInst::ICMP_EQ, C, Null);
450
451 llvm::Constant *Dst;
452 if (DerivedToBase)
453 Dst = llvm::ConstantExpr::getNSWSub(C, Offset);
454 else
455 Dst = llvm::ConstantExpr::getNSWAdd(C, Offset);
456
457 return llvm::ConstantExpr::getSelect(IsNull, Null, Dst);
458 }
459
460 // The this-adjustment is left-shifted by 1 on ARM.
461 if (IsARM) {
462 int64_t OffsetV = cast<llvm::ConstantInt>(Offset)->getSExtValue();
463 OffsetV <<= 1;
464 Offset = llvm::ConstantInt::get(Offset->getType(), OffsetV);
465 }
466
467 llvm::ConstantStruct *CS = cast<llvm::ConstantStruct>(C);
468
469 llvm::Constant *Values[2] = { CS->getOperand(0), 0 };
470 if (DerivedToBase)
471 Values[1] = llvm::ConstantExpr::getSub(CS->getOperand(1), Offset);
472 else
473 Values[1] = llvm::ConstantExpr::getAdd(CS->getOperand(1), Offset);
474
475 return llvm::ConstantStruct::get(CS->getType(), Values);
476 }
477
478
479 llvm::Constant *
EmitNullMemberPointer(const MemberPointerType * MPT)480 ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
481 llvm::Type *ptrdiff_t = getPtrDiffTy();
482
483 // Itanium C++ ABI 2.3:
484 // A NULL pointer is represented as -1.
485 if (MPT->isMemberDataPointer())
486 return llvm::ConstantInt::get(ptrdiff_t, -1ULL, /*isSigned=*/true);
487
488 llvm::Constant *Zero = llvm::ConstantInt::get(ptrdiff_t, 0);
489 llvm::Constant *Values[2] = { Zero, Zero };
490 return llvm::ConstantStruct::getAnon(Values);
491 }
492
493 llvm::Constant *
EmitMemberDataPointer(const MemberPointerType * MPT,CharUnits offset)494 ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
495 CharUnits offset) {
496 // Itanium C++ ABI 2.3:
497 // A pointer to data member is an offset from the base address of
498 // the class object containing it, represented as a ptrdiff_t
499 return llvm::ConstantInt::get(getPtrDiffTy(), offset.getQuantity());
500 }
501
EmitMemberPointer(const CXXMethodDecl * MD)502 llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const CXXMethodDecl *MD) {
503 assert(MD->isInstance() && "Member function must not be static!");
504 MD = MD->getCanonicalDecl();
505
506 CodeGenTypes &Types = CGM.getTypes();
507 llvm::Type *ptrdiff_t = getPtrDiffTy();
508
509 // Get the function pointer (or index if this is a virtual function).
510 llvm::Constant *MemPtr[2];
511 if (MD->isVirtual()) {
512 uint64_t Index = CGM.getVTables().getMethodVTableIndex(MD);
513
514 const ASTContext &Context = getContext();
515 CharUnits PointerWidth =
516 Context.toCharUnitsFromBits(Context.Target.getPointerWidth(0));
517 uint64_t VTableOffset = (Index * PointerWidth.getQuantity());
518
519 if (IsARM) {
520 // ARM C++ ABI 3.2.1:
521 // This ABI specifies that adj contains twice the this
522 // adjustment, plus 1 if the member function is virtual. The
523 // least significant bit of adj then makes exactly the same
524 // discrimination as the least significant bit of ptr does for
525 // Itanium.
526 MemPtr[0] = llvm::ConstantInt::get(ptrdiff_t, VTableOffset);
527 MemPtr[1] = llvm::ConstantInt::get(ptrdiff_t, 1);
528 } else {
529 // Itanium C++ ABI 2.3:
530 // For a virtual function, [the pointer field] is 1 plus the
531 // virtual table offset (in bytes) of the function,
532 // represented as a ptrdiff_t.
533 MemPtr[0] = llvm::ConstantInt::get(ptrdiff_t, VTableOffset + 1);
534 MemPtr[1] = llvm::ConstantInt::get(ptrdiff_t, 0);
535 }
536 } else {
537 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
538 llvm::Type *Ty;
539 // Check whether the function has a computable LLVM signature.
540 if (Types.isFuncTypeConvertible(FPT)) {
541 // The function has a computable LLVM signature; use the correct type.
542 Ty = Types.GetFunctionType(Types.getFunctionInfo(MD),
543 FPT->isVariadic());
544 } else {
545 // Use an arbitrary non-function type to tell GetAddrOfFunction that the
546 // function type is incomplete.
547 Ty = ptrdiff_t;
548 }
549 llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty);
550
551 MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, ptrdiff_t);
552 MemPtr[1] = llvm::ConstantInt::get(ptrdiff_t, 0);
553 }
554
555 return llvm::ConstantStruct::getAnon(MemPtr);
556 }
557
558 /// The comparison algorithm is pretty easy: the member pointers are
559 /// the same if they're either bitwise identical *or* both null.
560 ///
561 /// ARM is different here only because null-ness is more complicated.
562 llvm::Value *
EmitMemberPointerComparison(CodeGenFunction & CGF,llvm::Value * L,llvm::Value * R,const MemberPointerType * MPT,bool Inequality)563 ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
564 llvm::Value *L,
565 llvm::Value *R,
566 const MemberPointerType *MPT,
567 bool Inequality) {
568 CGBuilderTy &Builder = CGF.Builder;
569
570 llvm::ICmpInst::Predicate Eq;
571 llvm::Instruction::BinaryOps And, Or;
572 if (Inequality) {
573 Eq = llvm::ICmpInst::ICMP_NE;
574 And = llvm::Instruction::Or;
575 Or = llvm::Instruction::And;
576 } else {
577 Eq = llvm::ICmpInst::ICMP_EQ;
578 And = llvm::Instruction::And;
579 Or = llvm::Instruction::Or;
580 }
581
582 // Member data pointers are easy because there's a unique null
583 // value, so it just comes down to bitwise equality.
584 if (MPT->isMemberDataPointer())
585 return Builder.CreateICmp(Eq, L, R);
586
587 // For member function pointers, the tautologies are more complex.
588 // The Itanium tautology is:
589 // (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
590 // The ARM tautology is:
591 // (L == R) <==> (L.ptr == R.ptr &&
592 // (L.adj == R.adj ||
593 // (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
594 // The inequality tautologies have exactly the same structure, except
595 // applying De Morgan's laws.
596
597 llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr");
598 llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr");
599
600 // This condition tests whether L.ptr == R.ptr. This must always be
601 // true for equality to hold.
602 llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr");
603
604 // This condition, together with the assumption that L.ptr == R.ptr,
605 // tests whether the pointers are both null. ARM imposes an extra
606 // condition.
607 llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType());
608 llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null");
609
610 // This condition tests whether L.adj == R.adj. If this isn't
611 // true, the pointers are unequal unless they're both null.
612 llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj");
613 llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj");
614 llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj");
615
616 // Null member function pointers on ARM clear the low bit of Adj,
617 // so the zero condition has to check that neither low bit is set.
618 if (IsARM) {
619 llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1);
620
621 // Compute (l.adj | r.adj) & 1 and test it against zero.
622 llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj");
623 llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One);
624 llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero,
625 "cmp.or.adj");
626 EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero);
627 }
628
629 // Tie together all our conditions.
630 llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq);
631 Result = Builder.CreateBinOp(And, PtrEq, Result,
632 Inequality ? "memptr.ne" : "memptr.eq");
633 return Result;
634 }
635
636 llvm::Value *
EmitMemberPointerIsNotNull(CodeGenFunction & CGF,llvm::Value * MemPtr,const MemberPointerType * MPT)637 ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
638 llvm::Value *MemPtr,
639 const MemberPointerType *MPT) {
640 CGBuilderTy &Builder = CGF.Builder;
641
642 /// For member data pointers, this is just a check against -1.
643 if (MPT->isMemberDataPointer()) {
644 assert(MemPtr->getType() == getPtrDiffTy());
645 llvm::Value *NegativeOne =
646 llvm::Constant::getAllOnesValue(MemPtr->getType());
647 return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool");
648 }
649
650 // In Itanium, a member function pointer is not null if 'ptr' is not null.
651 llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr");
652
653 llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0);
654 llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool");
655
656 // On ARM, a member function pointer is also non-null if the low bit of 'adj'
657 // (the virtual bit) is set.
658 if (IsARM) {
659 llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1);
660 llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj");
661 llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit");
662 llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero,
663 "memptr.isvirtual");
664 Result = Builder.CreateOr(Result, IsVirtual);
665 }
666
667 return Result;
668 }
669
670 /// The Itanium ABI requires non-zero initialization only for data
671 /// member pointers, for which '0' is a valid offset.
isZeroInitializable(const MemberPointerType * MPT)672 bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
673 return MPT->getPointeeType()->isFunctionType();
674 }
675
676 /// The generic ABI passes 'this', plus a VTT if it's initializing a
677 /// base subobject.
BuildConstructorSignature(const CXXConstructorDecl * Ctor,CXXCtorType Type,CanQualType & ResTy,llvm::SmallVectorImpl<CanQualType> & ArgTys)678 void ItaniumCXXABI::BuildConstructorSignature(const CXXConstructorDecl *Ctor,
679 CXXCtorType Type,
680 CanQualType &ResTy,
681 llvm::SmallVectorImpl<CanQualType> &ArgTys) {
682 ASTContext &Context = getContext();
683
684 // 'this' is already there.
685
686 // Check if we need to add a VTT parameter (which has type void **).
687 if (Type == Ctor_Base && Ctor->getParent()->getNumVBases() != 0)
688 ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
689 }
690
691 /// The ARM ABI does the same as the Itanium ABI, but returns 'this'.
BuildConstructorSignature(const CXXConstructorDecl * Ctor,CXXCtorType Type,CanQualType & ResTy,llvm::SmallVectorImpl<CanQualType> & ArgTys)692 void ARMCXXABI::BuildConstructorSignature(const CXXConstructorDecl *Ctor,
693 CXXCtorType Type,
694 CanQualType &ResTy,
695 llvm::SmallVectorImpl<CanQualType> &ArgTys) {
696 ItaniumCXXABI::BuildConstructorSignature(Ctor, Type, ResTy, ArgTys);
697 ResTy = ArgTys[0];
698 }
699
700 /// The generic ABI passes 'this', plus a VTT if it's destroying a
701 /// base subobject.
BuildDestructorSignature(const CXXDestructorDecl * Dtor,CXXDtorType Type,CanQualType & ResTy,llvm::SmallVectorImpl<CanQualType> & ArgTys)702 void ItaniumCXXABI::BuildDestructorSignature(const CXXDestructorDecl *Dtor,
703 CXXDtorType Type,
704 CanQualType &ResTy,
705 llvm::SmallVectorImpl<CanQualType> &ArgTys) {
706 ASTContext &Context = getContext();
707
708 // 'this' is already there.
709
710 // Check if we need to add a VTT parameter (which has type void **).
711 if (Type == Dtor_Base && Dtor->getParent()->getNumVBases() != 0)
712 ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
713 }
714
715 /// The ARM ABI does the same as the Itanium ABI, but returns 'this'
716 /// for non-deleting destructors.
BuildDestructorSignature(const CXXDestructorDecl * Dtor,CXXDtorType Type,CanQualType & ResTy,llvm::SmallVectorImpl<CanQualType> & ArgTys)717 void ARMCXXABI::BuildDestructorSignature(const CXXDestructorDecl *Dtor,
718 CXXDtorType Type,
719 CanQualType &ResTy,
720 llvm::SmallVectorImpl<CanQualType> &ArgTys) {
721 ItaniumCXXABI::BuildDestructorSignature(Dtor, Type, ResTy, ArgTys);
722
723 if (Type != Dtor_Deleting)
724 ResTy = ArgTys[0];
725 }
726
BuildInstanceFunctionParams(CodeGenFunction & CGF,QualType & ResTy,FunctionArgList & Params)727 void ItaniumCXXABI::BuildInstanceFunctionParams(CodeGenFunction &CGF,
728 QualType &ResTy,
729 FunctionArgList &Params) {
730 /// Create the 'this' variable.
731 BuildThisParam(CGF, Params);
732
733 const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
734 assert(MD->isInstance());
735
736 // Check if we need a VTT parameter as well.
737 if (CodeGenVTables::needsVTTParameter(CGF.CurGD)) {
738 ASTContext &Context = getContext();
739
740 // FIXME: avoid the fake decl
741 QualType T = Context.getPointerType(Context.VoidPtrTy);
742 ImplicitParamDecl *VTTDecl
743 = ImplicitParamDecl::Create(Context, 0, MD->getLocation(),
744 &Context.Idents.get("vtt"), T);
745 Params.push_back(VTTDecl);
746 getVTTDecl(CGF) = VTTDecl;
747 }
748 }
749
BuildInstanceFunctionParams(CodeGenFunction & CGF,QualType & ResTy,FunctionArgList & Params)750 void ARMCXXABI::BuildInstanceFunctionParams(CodeGenFunction &CGF,
751 QualType &ResTy,
752 FunctionArgList &Params) {
753 ItaniumCXXABI::BuildInstanceFunctionParams(CGF, ResTy, Params);
754
755 // Return 'this' from certain constructors and destructors.
756 if (HasThisReturn(CGF.CurGD))
757 ResTy = Params[0]->getType();
758 }
759
EmitInstanceFunctionProlog(CodeGenFunction & CGF)760 void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
761 /// Initialize the 'this' slot.
762 EmitThisParam(CGF);
763
764 /// Initialize the 'vtt' slot if needed.
765 if (getVTTDecl(CGF)) {
766 getVTTValue(CGF)
767 = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(getVTTDecl(CGF)),
768 "vtt");
769 }
770 }
771
EmitInstanceFunctionProlog(CodeGenFunction & CGF)772 void ARMCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
773 ItaniumCXXABI::EmitInstanceFunctionProlog(CGF);
774
775 /// Initialize the return slot to 'this' at the start of the
776 /// function.
777 if (HasThisReturn(CGF.CurGD))
778 CGF.Builder.CreateStore(CGF.LoadCXXThis(), CGF.ReturnValue);
779 }
780
EmitReturnFromThunk(CodeGenFunction & CGF,RValue RV,QualType ResultType)781 void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
782 RValue RV, QualType ResultType) {
783 if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl()))
784 return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
785
786 // Destructor thunks in the ARM ABI have indeterminate results.
787 llvm::Type *T =
788 cast<llvm::PointerType>(CGF.ReturnValue->getType())->getElementType();
789 RValue Undef = RValue::get(llvm::UndefValue::get(T));
790 return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
791 }
792
793 /************************** Array allocation cookies **************************/
794
NeedsArrayCookie(const CXXNewExpr * expr)795 bool ItaniumCXXABI::NeedsArrayCookie(const CXXNewExpr *expr) {
796 // If the class's usual deallocation function takes two arguments,
797 // it needs a cookie.
798 if (expr->doesUsualArrayDeleteWantSize())
799 return true;
800
801 // Automatic Reference Counting:
802 // We need an array cookie for pointers with strong or weak lifetime.
803 QualType AllocatedType = expr->getAllocatedType();
804 if (getContext().getLangOptions().ObjCAutoRefCount &&
805 AllocatedType->isObjCLifetimeType()) {
806 switch (AllocatedType.getObjCLifetime()) {
807 case Qualifiers::OCL_None:
808 case Qualifiers::OCL_ExplicitNone:
809 case Qualifiers::OCL_Autoreleasing:
810 return false;
811
812 case Qualifiers::OCL_Strong:
813 case Qualifiers::OCL_Weak:
814 return true;
815 }
816 }
817
818 // Otherwise, if the class has a non-trivial destructor, it always
819 // needs a cookie.
820 const CXXRecordDecl *record =
821 AllocatedType->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
822 return (record && !record->hasTrivialDestructor());
823 }
824
NeedsArrayCookie(const CXXDeleteExpr * expr,QualType elementType)825 bool ItaniumCXXABI::NeedsArrayCookie(const CXXDeleteExpr *expr,
826 QualType elementType) {
827 // If the class's usual deallocation function takes two arguments,
828 // it needs a cookie.
829 if (expr->doesUsualArrayDeleteWantSize())
830 return true;
831
832 // Automatic Reference Counting:
833 // We need an array cookie for pointers with strong or weak lifetime.
834 if (getContext().getLangOptions().ObjCAutoRefCount &&
835 elementType->isObjCLifetimeType()) {
836 switch (elementType.getObjCLifetime()) {
837 case Qualifiers::OCL_None:
838 case Qualifiers::OCL_ExplicitNone:
839 case Qualifiers::OCL_Autoreleasing:
840 return false;
841
842 case Qualifiers::OCL_Strong:
843 case Qualifiers::OCL_Weak:
844 return true;
845 }
846 }
847
848 // Otherwise, if the class has a non-trivial destructor, it always
849 // needs a cookie.
850 const CXXRecordDecl *record =
851 elementType->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
852 return (record && !record->hasTrivialDestructor());
853 }
854
GetArrayCookieSize(const CXXNewExpr * expr)855 CharUnits ItaniumCXXABI::GetArrayCookieSize(const CXXNewExpr *expr) {
856 if (!NeedsArrayCookie(expr))
857 return CharUnits::Zero();
858
859 // Padding is the maximum of sizeof(size_t) and alignof(elementType)
860 ASTContext &Ctx = getContext();
861 return std::max(Ctx.getTypeSizeInChars(Ctx.getSizeType()),
862 Ctx.getTypeAlignInChars(expr->getAllocatedType()));
863 }
864
InitializeArrayCookie(CodeGenFunction & CGF,llvm::Value * NewPtr,llvm::Value * NumElements,const CXXNewExpr * expr,QualType ElementType)865 llvm::Value *ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
866 llvm::Value *NewPtr,
867 llvm::Value *NumElements,
868 const CXXNewExpr *expr,
869 QualType ElementType) {
870 assert(NeedsArrayCookie(expr));
871
872 unsigned AS = cast<llvm::PointerType>(NewPtr->getType())->getAddressSpace();
873
874 ASTContext &Ctx = getContext();
875 QualType SizeTy = Ctx.getSizeType();
876 CharUnits SizeSize = Ctx.getTypeSizeInChars(SizeTy);
877
878 // The size of the cookie.
879 CharUnits CookieSize =
880 std::max(SizeSize, Ctx.getTypeAlignInChars(ElementType));
881
882 // Compute an offset to the cookie.
883 llvm::Value *CookiePtr = NewPtr;
884 CharUnits CookieOffset = CookieSize - SizeSize;
885 if (!CookieOffset.isZero())
886 CookiePtr = CGF.Builder.CreateConstInBoundsGEP1_64(CookiePtr,
887 CookieOffset.getQuantity());
888
889 // Write the number of elements into the appropriate slot.
890 llvm::Value *NumElementsPtr
891 = CGF.Builder.CreateBitCast(CookiePtr,
892 CGF.ConvertType(SizeTy)->getPointerTo(AS));
893 CGF.Builder.CreateStore(NumElements, NumElementsPtr);
894
895 // Finally, compute a pointer to the actual data buffer by skipping
896 // over the cookie completely.
897 return CGF.Builder.CreateConstInBoundsGEP1_64(NewPtr,
898 CookieSize.getQuantity());
899 }
900
ReadArrayCookie(CodeGenFunction & CGF,llvm::Value * Ptr,const CXXDeleteExpr * expr,QualType ElementType,llvm::Value * & NumElements,llvm::Value * & AllocPtr,CharUnits & CookieSize)901 void ItaniumCXXABI::ReadArrayCookie(CodeGenFunction &CGF,
902 llvm::Value *Ptr,
903 const CXXDeleteExpr *expr,
904 QualType ElementType,
905 llvm::Value *&NumElements,
906 llvm::Value *&AllocPtr,
907 CharUnits &CookieSize) {
908 // Derive a char* in the same address space as the pointer.
909 unsigned AS = cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
910 llvm::Type *CharPtrTy = CGF.Builder.getInt8Ty()->getPointerTo(AS);
911
912 // If we don't need an array cookie, bail out early.
913 if (!NeedsArrayCookie(expr, ElementType)) {
914 AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy);
915 NumElements = 0;
916 CookieSize = CharUnits::Zero();
917 return;
918 }
919
920 QualType SizeTy = getContext().getSizeType();
921 CharUnits SizeSize = getContext().getTypeSizeInChars(SizeTy);
922 llvm::Type *SizeLTy = CGF.ConvertType(SizeTy);
923
924 CookieSize
925 = std::max(SizeSize, getContext().getTypeAlignInChars(ElementType));
926
927 CharUnits NumElementsOffset = CookieSize - SizeSize;
928
929 // Compute the allocated pointer.
930 AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy);
931 AllocPtr = CGF.Builder.CreateConstInBoundsGEP1_64(AllocPtr,
932 -CookieSize.getQuantity());
933
934 llvm::Value *NumElementsPtr = AllocPtr;
935 if (!NumElementsOffset.isZero())
936 NumElementsPtr =
937 CGF.Builder.CreateConstInBoundsGEP1_64(NumElementsPtr,
938 NumElementsOffset.getQuantity());
939 NumElementsPtr =
940 CGF.Builder.CreateBitCast(NumElementsPtr, SizeLTy->getPointerTo(AS));
941 NumElements = CGF.Builder.CreateLoad(NumElementsPtr);
942 }
943
GetArrayCookieSize(const CXXNewExpr * expr)944 CharUnits ARMCXXABI::GetArrayCookieSize(const CXXNewExpr *expr) {
945 if (!NeedsArrayCookie(expr))
946 return CharUnits::Zero();
947
948 // On ARM, the cookie is always:
949 // struct array_cookie {
950 // std::size_t element_size; // element_size != 0
951 // std::size_t element_count;
952 // };
953 // TODO: what should we do if the allocated type actually wants
954 // greater alignment?
955 return getContext().getTypeSizeInChars(getContext().getSizeType()) * 2;
956 }
957
InitializeArrayCookie(CodeGenFunction & CGF,llvm::Value * NewPtr,llvm::Value * NumElements,const CXXNewExpr * expr,QualType ElementType)958 llvm::Value *ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
959 llvm::Value *NewPtr,
960 llvm::Value *NumElements,
961 const CXXNewExpr *expr,
962 QualType ElementType) {
963 assert(NeedsArrayCookie(expr));
964
965 // NewPtr is a char*.
966
967 unsigned AS = cast<llvm::PointerType>(NewPtr->getType())->getAddressSpace();
968
969 ASTContext &Ctx = getContext();
970 CharUnits SizeSize = Ctx.getTypeSizeInChars(Ctx.getSizeType());
971 llvm::IntegerType *SizeTy =
972 cast<llvm::IntegerType>(CGF.ConvertType(Ctx.getSizeType()));
973
974 // The cookie is always at the start of the buffer.
975 llvm::Value *CookiePtr = NewPtr;
976
977 // The first element is the element size.
978 CookiePtr = CGF.Builder.CreateBitCast(CookiePtr, SizeTy->getPointerTo(AS));
979 llvm::Value *ElementSize = llvm::ConstantInt::get(SizeTy,
980 Ctx.getTypeSizeInChars(ElementType).getQuantity());
981 CGF.Builder.CreateStore(ElementSize, CookiePtr);
982
983 // The second element is the element count.
984 CookiePtr = CGF.Builder.CreateConstInBoundsGEP1_32(CookiePtr, 1);
985 CGF.Builder.CreateStore(NumElements, CookiePtr);
986
987 // Finally, compute a pointer to the actual data buffer by skipping
988 // over the cookie completely.
989 CharUnits CookieSize = 2 * SizeSize;
990 return CGF.Builder.CreateConstInBoundsGEP1_64(NewPtr,
991 CookieSize.getQuantity());
992 }
993
ReadArrayCookie(CodeGenFunction & CGF,llvm::Value * Ptr,const CXXDeleteExpr * expr,QualType ElementType,llvm::Value * & NumElements,llvm::Value * & AllocPtr,CharUnits & CookieSize)994 void ARMCXXABI::ReadArrayCookie(CodeGenFunction &CGF,
995 llvm::Value *Ptr,
996 const CXXDeleteExpr *expr,
997 QualType ElementType,
998 llvm::Value *&NumElements,
999 llvm::Value *&AllocPtr,
1000 CharUnits &CookieSize) {
1001 // Derive a char* in the same address space as the pointer.
1002 unsigned AS = cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
1003 llvm::Type *CharPtrTy = CGF.Builder.getInt8Ty()->getPointerTo(AS);
1004
1005 // If we don't need an array cookie, bail out early.
1006 if (!NeedsArrayCookie(expr, ElementType)) {
1007 AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy);
1008 NumElements = 0;
1009 CookieSize = CharUnits::Zero();
1010 return;
1011 }
1012
1013 QualType SizeTy = getContext().getSizeType();
1014 CharUnits SizeSize = getContext().getTypeSizeInChars(SizeTy);
1015 llvm::Type *SizeLTy = CGF.ConvertType(SizeTy);
1016
1017 // The cookie size is always 2 * sizeof(size_t).
1018 CookieSize = 2 * SizeSize;
1019
1020 // The allocated pointer is the input ptr, minus that amount.
1021 AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy);
1022 AllocPtr = CGF.Builder.CreateConstInBoundsGEP1_64(AllocPtr,
1023 -CookieSize.getQuantity());
1024
1025 // The number of elements is at offset sizeof(size_t) relative to that.
1026 llvm::Value *NumElementsPtr
1027 = CGF.Builder.CreateConstInBoundsGEP1_64(AllocPtr,
1028 SizeSize.getQuantity());
1029 NumElementsPtr =
1030 CGF.Builder.CreateBitCast(NumElementsPtr, SizeLTy->getPointerTo(AS));
1031 NumElements = CGF.Builder.CreateLoad(NumElementsPtr);
1032 }
1033
1034 /*********************** Static local initialization **************************/
1035
getGuardAcquireFn(CodeGenModule & CGM,llvm::PointerType * GuardPtrTy)1036 static llvm::Constant *getGuardAcquireFn(CodeGenModule &CGM,
1037 llvm::PointerType *GuardPtrTy) {
1038 // int __cxa_guard_acquire(__guard *guard_object);
1039 llvm::Type *ArgTys[] = { GuardPtrTy };
1040 llvm::FunctionType *FTy =
1041 llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
1042 ArgTys, /*isVarArg=*/false);
1043
1044 return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_acquire");
1045 }
1046
getGuardReleaseFn(CodeGenModule & CGM,llvm::PointerType * GuardPtrTy)1047 static llvm::Constant *getGuardReleaseFn(CodeGenModule &CGM,
1048 llvm::PointerType *GuardPtrTy) {
1049 // void __cxa_guard_release(__guard *guard_object);
1050 llvm::Type *ArgTys[] = { GuardPtrTy };
1051 llvm::FunctionType *FTy =
1052 llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
1053 ArgTys, /*isVarArg=*/false);
1054
1055 return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_release");
1056 }
1057
getGuardAbortFn(CodeGenModule & CGM,llvm::PointerType * GuardPtrTy)1058 static llvm::Constant *getGuardAbortFn(CodeGenModule &CGM,
1059 llvm::PointerType *GuardPtrTy) {
1060 // void __cxa_guard_abort(__guard *guard_object);
1061 llvm::Type *ArgTys[] = { GuardPtrTy };
1062 llvm::FunctionType *FTy =
1063 llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
1064 ArgTys, /*isVarArg=*/false);
1065
1066 return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_abort");
1067 }
1068
1069 namespace {
1070 struct CallGuardAbort : EHScopeStack::Cleanup {
1071 llvm::GlobalVariable *Guard;
CallGuardAbort__anon772d0e390211::CallGuardAbort1072 CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
1073
Emit__anon772d0e390211::CallGuardAbort1074 void Emit(CodeGenFunction &CGF, Flags flags) {
1075 CGF.Builder.CreateCall(getGuardAbortFn(CGF.CGM, Guard->getType()), Guard)
1076 ->setDoesNotThrow();
1077 }
1078 };
1079 }
1080
1081 /// The ARM code here follows the Itanium code closely enough that we
1082 /// just special-case it at particular places.
EmitGuardedInit(CodeGenFunction & CGF,const VarDecl & D,llvm::GlobalVariable * GV)1083 void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
1084 const VarDecl &D,
1085 llvm::GlobalVariable *GV) {
1086 CGBuilderTy &Builder = CGF.Builder;
1087
1088 // We only need to use thread-safe statics for local variables;
1089 // global initialization is always single-threaded.
1090 bool threadsafe =
1091 (getContext().getLangOptions().ThreadsafeStatics && D.isLocalVarDecl());
1092
1093 llvm::IntegerType *GuardTy;
1094
1095 // If we have a global variable with internal linkage and thread-safe statics
1096 // are disabled, we can just let the guard variable be of type i8.
1097 bool useInt8GuardVariable = !threadsafe && GV->hasInternalLinkage();
1098 if (useInt8GuardVariable) {
1099 GuardTy = CGF.Int8Ty;
1100 } else {
1101 // Guard variables are 64 bits in the generic ABI and 32 bits on ARM.
1102 GuardTy = (IsARM ? CGF.Int32Ty : CGF.Int64Ty);
1103 }
1104 llvm::PointerType *GuardPtrTy = GuardTy->getPointerTo();
1105
1106 // Create the guard variable.
1107 llvm::SmallString<256> GuardVName;
1108 llvm::raw_svector_ostream Out(GuardVName);
1109 getMangleContext().mangleItaniumGuardVariable(&D, Out);
1110 Out.flush();
1111
1112 // Just absorb linkage and visibility from the variable.
1113 llvm::GlobalVariable *GuardVariable =
1114 new llvm::GlobalVariable(CGM.getModule(), GuardTy,
1115 false, GV->getLinkage(),
1116 llvm::ConstantInt::get(GuardTy, 0),
1117 GuardVName.str());
1118 GuardVariable->setVisibility(GV->getVisibility());
1119
1120 // Test whether the variable has completed initialization.
1121 llvm::Value *IsInitialized;
1122
1123 // ARM C++ ABI 3.2.3.1:
1124 // To support the potential use of initialization guard variables
1125 // as semaphores that are the target of ARM SWP and LDREX/STREX
1126 // synchronizing instructions we define a static initialization
1127 // guard variable to be a 4-byte aligned, 4- byte word with the
1128 // following inline access protocol.
1129 // #define INITIALIZED 1
1130 // if ((obj_guard & INITIALIZED) != INITIALIZED) {
1131 // if (__cxa_guard_acquire(&obj_guard))
1132 // ...
1133 // }
1134 if (IsARM && !useInt8GuardVariable) {
1135 llvm::Value *V = Builder.CreateLoad(GuardVariable);
1136 V = Builder.CreateAnd(V, Builder.getInt32(1));
1137 IsInitialized = Builder.CreateIsNull(V, "guard.uninitialized");
1138
1139 // Itanium C++ ABI 3.3.2:
1140 // The following is pseudo-code showing how these functions can be used:
1141 // if (obj_guard.first_byte == 0) {
1142 // if ( __cxa_guard_acquire (&obj_guard) ) {
1143 // try {
1144 // ... initialize the object ...;
1145 // } catch (...) {
1146 // __cxa_guard_abort (&obj_guard);
1147 // throw;
1148 // }
1149 // ... queue object destructor with __cxa_atexit() ...;
1150 // __cxa_guard_release (&obj_guard);
1151 // }
1152 // }
1153 } else {
1154 // Load the first byte of the guard variable.
1155 llvm::Type *PtrTy = Builder.getInt8PtrTy();
1156 llvm::Value *V =
1157 Builder.CreateLoad(Builder.CreateBitCast(GuardVariable, PtrTy), "tmp");
1158
1159 IsInitialized = Builder.CreateIsNull(V, "guard.uninitialized");
1160 }
1161
1162 llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
1163 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
1164
1165 llvm::BasicBlock *NoCheckBlock = EndBlock;
1166 if (threadsafe) NoCheckBlock = CGF.createBasicBlock("init.barrier");
1167
1168 // Check if the first byte of the guard variable is zero.
1169 Builder.CreateCondBr(IsInitialized, InitCheckBlock, NoCheckBlock);
1170
1171 CGF.EmitBlock(InitCheckBlock);
1172
1173 // Variables used when coping with thread-safe statics and exceptions.
1174 if (threadsafe) {
1175 // Call __cxa_guard_acquire.
1176 llvm::Value *V
1177 = Builder.CreateCall(getGuardAcquireFn(CGM, GuardPtrTy), GuardVariable);
1178
1179 llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
1180
1181 Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
1182 InitBlock, EndBlock);
1183
1184 // Call __cxa_guard_abort along the exceptional edge.
1185 CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, GuardVariable);
1186
1187 CGF.EmitBlock(InitBlock);
1188 }
1189
1190 // Emit the initializer and add a global destructor if appropriate.
1191 CGF.EmitCXXGlobalVarDeclInit(D, GV);
1192
1193 if (threadsafe) {
1194 // Pop the guard-abort cleanup if we pushed one.
1195 CGF.PopCleanupBlock();
1196
1197 // Call __cxa_guard_release. This cannot throw.
1198 Builder.CreateCall(getGuardReleaseFn(CGM, GuardPtrTy), GuardVariable);
1199 } else {
1200 Builder.CreateStore(llvm::ConstantInt::get(GuardTy, 1), GuardVariable);
1201 }
1202
1203 // Emit an acquire memory barrier if using thread-safe statics:
1204 // Itanium ABI:
1205 // An implementation supporting thread-safety on multiprocessor
1206 // systems must also guarantee that references to the initialized
1207 // object do not occur before the load of the initialization flag.
1208 if (threadsafe) {
1209 Builder.CreateBr(EndBlock);
1210 CGF.EmitBlock(NoCheckBlock);
1211
1212 llvm::Value *_false = Builder.getFalse();
1213 llvm::Value *_true = Builder.getTrue();
1214
1215 Builder.CreateCall5(CGM.getIntrinsic(llvm::Intrinsic::memory_barrier),
1216 /* load-load, load-store */ _true, _true,
1217 /* store-load, store-store */ _false, _false,
1218 /* device or I/O */ _false);
1219 }
1220
1221 CGF.EmitBlock(EndBlock);
1222 }
1223