1 //===--- CGVTables.cpp - Emit LLVM Code for C++ vtables -------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code dealing with C++ code generation of virtual tables.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "CodeGenFunction.h"
15 #include "CGCXXABI.h"
16 #include "CodeGenModule.h"
17 #include "clang/AST/CXXInheritance.h"
18 #include "clang/AST/RecordLayout.h"
19 #include "clang/Frontend/CodeGenOptions.h"
20 #include "llvm/ADT/DenseSet.h"
21 #include "llvm/ADT/SetVector.h"
22 #include "llvm/Support/Compiler.h"
23 #include "llvm/Support/Format.h"
24 #include "llvm/Transforms/Utils/Cloning.h"
25 #include <algorithm>
26 #include <cstdio>
27
28 using namespace clang;
29 using namespace CodeGen;
30
CodeGenVTables(CodeGenModule & CGM)31 CodeGenVTables::CodeGenVTables(CodeGenModule &CGM)
32 : CGM(CGM), VTContext(CGM.getContext()) {
33 if (CGM.getTarget().getCXXABI().isMicrosoft()) {
34 // FIXME: Eventually, we should only have one of V*TContexts available.
35 // Today we use both in the Microsoft ABI as MicrosoftVFTableContext
36 // is not completely supported in CodeGen yet.
37 VFTContext.reset(new MicrosoftVFTableContext(CGM.getContext()));
38 }
39 }
40
GetAddrOfThunk(GlobalDecl GD,const ThunkInfo & Thunk)41 llvm::Constant *CodeGenModule::GetAddrOfThunk(GlobalDecl GD,
42 const ThunkInfo &Thunk) {
43 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
44
45 // Compute the mangled name.
46 SmallString<256> Name;
47 llvm::raw_svector_ostream Out(Name);
48 if (const CXXDestructorDecl* DD = dyn_cast<CXXDestructorDecl>(MD))
49 getCXXABI().getMangleContext().mangleCXXDtorThunk(DD, GD.getDtorType(),
50 Thunk.This, Out);
51 else
52 getCXXABI().getMangleContext().mangleThunk(MD, Thunk, Out);
53 Out.flush();
54
55 llvm::Type *Ty = getTypes().GetFunctionTypeForVTable(GD);
56 return GetOrCreateLLVMFunction(Name, Ty, GD, /*ForVTable=*/true);
57 }
58
PerformTypeAdjustment(CodeGenFunction & CGF,llvm::Value * Ptr,int64_t NonVirtualAdjustment,int64_t VirtualAdjustment,bool IsReturnAdjustment)59 static llvm::Value *PerformTypeAdjustment(CodeGenFunction &CGF,
60 llvm::Value *Ptr,
61 int64_t NonVirtualAdjustment,
62 int64_t VirtualAdjustment,
63 bool IsReturnAdjustment) {
64 if (!NonVirtualAdjustment && !VirtualAdjustment)
65 return Ptr;
66
67 llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
68 llvm::Value *V = CGF.Builder.CreateBitCast(Ptr, Int8PtrTy);
69
70 if (NonVirtualAdjustment && !IsReturnAdjustment) {
71 // Perform the non-virtual adjustment for a base-to-derived cast.
72 V = CGF.Builder.CreateConstInBoundsGEP1_64(V, NonVirtualAdjustment);
73 }
74
75 if (VirtualAdjustment) {
76 llvm::Type *PtrDiffTy =
77 CGF.ConvertType(CGF.getContext().getPointerDiffType());
78
79 // Perform the virtual adjustment.
80 llvm::Value *VTablePtrPtr =
81 CGF.Builder.CreateBitCast(V, Int8PtrTy->getPointerTo());
82
83 llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
84
85 llvm::Value *OffsetPtr =
86 CGF.Builder.CreateConstInBoundsGEP1_64(VTablePtr, VirtualAdjustment);
87
88 OffsetPtr = CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
89
90 // Load the adjustment offset from the vtable.
91 llvm::Value *Offset = CGF.Builder.CreateLoad(OffsetPtr);
92
93 // Adjust our pointer.
94 V = CGF.Builder.CreateInBoundsGEP(V, Offset);
95 }
96
97 if (NonVirtualAdjustment && IsReturnAdjustment) {
98 // Perform the non-virtual adjustment for a derived-to-base cast.
99 V = CGF.Builder.CreateConstInBoundsGEP1_64(V, NonVirtualAdjustment);
100 }
101
102 // Cast back to the original type.
103 return CGF.Builder.CreateBitCast(V, Ptr->getType());
104 }
105
setThunkVisibility(CodeGenModule & CGM,const CXXMethodDecl * MD,const ThunkInfo & Thunk,llvm::Function * Fn)106 static void setThunkVisibility(CodeGenModule &CGM, const CXXMethodDecl *MD,
107 const ThunkInfo &Thunk, llvm::Function *Fn) {
108 CGM.setGlobalVisibility(Fn, MD);
109
110 if (!CGM.getCodeGenOpts().HiddenWeakVTables)
111 return;
112
113 // If the thunk has weak/linkonce linkage, but the function must be
114 // emitted in every translation unit that references it, then we can
115 // emit its thunks with hidden visibility, since its thunks must be
116 // emitted when the function is.
117
118 // This follows CodeGenModule::setTypeVisibility; see the comments
119 // there for explanation.
120
121 if ((Fn->getLinkage() != llvm::GlobalVariable::LinkOnceODRLinkage &&
122 Fn->getLinkage() != llvm::GlobalVariable::WeakODRLinkage) ||
123 Fn->getVisibility() != llvm::GlobalVariable::DefaultVisibility)
124 return;
125
126 if (MD->getExplicitVisibility(ValueDecl::VisibilityForValue))
127 return;
128
129 switch (MD->getTemplateSpecializationKind()) {
130 case TSK_ExplicitInstantiationDefinition:
131 case TSK_ExplicitInstantiationDeclaration:
132 return;
133
134 case TSK_Undeclared:
135 break;
136
137 case TSK_ExplicitSpecialization:
138 case TSK_ImplicitInstantiation:
139 return;
140 break;
141 }
142
143 // If there's an explicit definition, and that definition is
144 // out-of-line, then we can't assume that all users will have a
145 // definition to emit.
146 const FunctionDecl *Def = 0;
147 if (MD->hasBody(Def) && Def->isOutOfLine())
148 return;
149
150 Fn->setVisibility(llvm::GlobalValue::HiddenVisibility);
151 }
152
153 #ifndef NDEBUG
similar(const ABIArgInfo & infoL,CanQualType typeL,const ABIArgInfo & infoR,CanQualType typeR)154 static bool similar(const ABIArgInfo &infoL, CanQualType typeL,
155 const ABIArgInfo &infoR, CanQualType typeR) {
156 return (infoL.getKind() == infoR.getKind() &&
157 (typeL == typeR ||
158 (isa<PointerType>(typeL) && isa<PointerType>(typeR)) ||
159 (isa<ReferenceType>(typeL) && isa<ReferenceType>(typeR))));
160 }
161 #endif
162
PerformReturnAdjustment(CodeGenFunction & CGF,QualType ResultType,RValue RV,const ThunkInfo & Thunk)163 static RValue PerformReturnAdjustment(CodeGenFunction &CGF,
164 QualType ResultType, RValue RV,
165 const ThunkInfo &Thunk) {
166 // Emit the return adjustment.
167 bool NullCheckValue = !ResultType->isReferenceType();
168
169 llvm::BasicBlock *AdjustNull = 0;
170 llvm::BasicBlock *AdjustNotNull = 0;
171 llvm::BasicBlock *AdjustEnd = 0;
172
173 llvm::Value *ReturnValue = RV.getScalarVal();
174
175 if (NullCheckValue) {
176 AdjustNull = CGF.createBasicBlock("adjust.null");
177 AdjustNotNull = CGF.createBasicBlock("adjust.notnull");
178 AdjustEnd = CGF.createBasicBlock("adjust.end");
179
180 llvm::Value *IsNull = CGF.Builder.CreateIsNull(ReturnValue);
181 CGF.Builder.CreateCondBr(IsNull, AdjustNull, AdjustNotNull);
182 CGF.EmitBlock(AdjustNotNull);
183 }
184
185 ReturnValue = PerformTypeAdjustment(CGF, ReturnValue,
186 Thunk.Return.NonVirtual,
187 Thunk.Return.VBaseOffsetOffset,
188 /*IsReturnAdjustment*/true);
189
190 if (NullCheckValue) {
191 CGF.Builder.CreateBr(AdjustEnd);
192 CGF.EmitBlock(AdjustNull);
193 CGF.Builder.CreateBr(AdjustEnd);
194 CGF.EmitBlock(AdjustEnd);
195
196 llvm::PHINode *PHI = CGF.Builder.CreatePHI(ReturnValue->getType(), 2);
197 PHI->addIncoming(ReturnValue, AdjustNotNull);
198 PHI->addIncoming(llvm::Constant::getNullValue(ReturnValue->getType()),
199 AdjustNull);
200 ReturnValue = PHI;
201 }
202
203 return RValue::get(ReturnValue);
204 }
205
206 // This function does roughly the same thing as GenerateThunk, but in a
207 // very different way, so that va_start and va_end work correctly.
208 // FIXME: This function assumes "this" is the first non-sret LLVM argument of
209 // a function, and that there is an alloca built in the entry block
210 // for all accesses to "this".
211 // FIXME: This function assumes there is only one "ret" statement per function.
212 // FIXME: Cloning isn't correct in the presence of indirect goto!
213 // FIXME: This implementation of thunks bloats codesize by duplicating the
214 // function definition. There are alternatives:
215 // 1. Add some sort of stub support to LLVM for cases where we can
216 // do a this adjustment, then a sibcall.
217 // 2. We could transform the definition to take a va_list instead of an
218 // actual variable argument list, then have the thunks (including a
219 // no-op thunk for the regular definition) call va_start/va_end.
220 // There's a bit of per-call overhead for this solution, but it's
221 // better for codesize if the definition is long.
GenerateVarArgsThunk(llvm::Function * Fn,const CGFunctionInfo & FnInfo,GlobalDecl GD,const ThunkInfo & Thunk)222 void CodeGenFunction::GenerateVarArgsThunk(
223 llvm::Function *Fn,
224 const CGFunctionInfo &FnInfo,
225 GlobalDecl GD, const ThunkInfo &Thunk) {
226 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
227 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
228 QualType ResultType = FPT->getResultType();
229
230 // Get the original function
231 assert(FnInfo.isVariadic());
232 llvm::Type *Ty = CGM.getTypes().GetFunctionType(FnInfo);
233 llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
234 llvm::Function *BaseFn = cast<llvm::Function>(Callee);
235
236 // Clone to thunk.
237 llvm::ValueToValueMapTy VMap;
238 llvm::Function *NewFn = llvm::CloneFunction(BaseFn, VMap,
239 /*ModuleLevelChanges=*/false);
240 CGM.getModule().getFunctionList().push_back(NewFn);
241 Fn->replaceAllUsesWith(NewFn);
242 NewFn->takeName(Fn);
243 Fn->eraseFromParent();
244 Fn = NewFn;
245
246 // "Initialize" CGF (minimally).
247 CurFn = Fn;
248
249 // Get the "this" value
250 llvm::Function::arg_iterator AI = Fn->arg_begin();
251 if (CGM.ReturnTypeUsesSRet(FnInfo))
252 ++AI;
253
254 // Find the first store of "this", which will be to the alloca associated
255 // with "this".
256 llvm::Value *ThisPtr = &*AI;
257 llvm::BasicBlock *EntryBB = Fn->begin();
258 llvm::Instruction *ThisStore = 0;
259 for (llvm::BasicBlock::iterator I = EntryBB->begin(), E = EntryBB->end();
260 I != E; I++) {
261 if (isa<llvm::StoreInst>(I) && I->getOperand(0) == ThisPtr) {
262 ThisStore = cast<llvm::StoreInst>(I);
263 break;
264 }
265 }
266 assert(ThisStore && "Store of this should be in entry block?");
267 // Adjust "this", if necessary.
268 Builder.SetInsertPoint(ThisStore);
269 llvm::Value *AdjustedThisPtr =
270 PerformTypeAdjustment(*this, ThisPtr,
271 Thunk.This.NonVirtual,
272 Thunk.This.VCallOffsetOffset,
273 /*IsReturnAdjustment*/false);
274 ThisStore->setOperand(0, AdjustedThisPtr);
275
276 if (!Thunk.Return.isEmpty()) {
277 // Fix up the returned value, if necessary.
278 for (llvm::Function::iterator I = Fn->begin(), E = Fn->end(); I != E; I++) {
279 llvm::Instruction *T = I->getTerminator();
280 if (isa<llvm::ReturnInst>(T)) {
281 RValue RV = RValue::get(T->getOperand(0));
282 T->eraseFromParent();
283 Builder.SetInsertPoint(&*I);
284 RV = PerformReturnAdjustment(*this, ResultType, RV, Thunk);
285 Builder.CreateRet(RV.getScalarVal());
286 break;
287 }
288 }
289 }
290 }
291
GenerateThunk(llvm::Function * Fn,const CGFunctionInfo & FnInfo,GlobalDecl GD,const ThunkInfo & Thunk)292 void CodeGenFunction::GenerateThunk(llvm::Function *Fn,
293 const CGFunctionInfo &FnInfo,
294 GlobalDecl GD, const ThunkInfo &Thunk) {
295 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
296 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
297 QualType ThisType = MD->getThisType(getContext());
298 QualType ResultType =
299 CGM.getCXXABI().HasThisReturn(GD) ? ThisType : FPT->getResultType();
300
301 FunctionArgList FunctionArgs;
302
303 // FIXME: It would be nice if more of this code could be shared with
304 // CodeGenFunction::GenerateCode.
305
306 // Create the implicit 'this' parameter declaration.
307 CurGD = GD;
308 CGM.getCXXABI().BuildInstanceFunctionParams(*this, ResultType, FunctionArgs);
309
310 // Add the rest of the parameters.
311 for (FunctionDecl::param_const_iterator I = MD->param_begin(),
312 E = MD->param_end(); I != E; ++I) {
313 ParmVarDecl *Param = *I;
314
315 FunctionArgs.push_back(Param);
316 }
317
318 // Initialize debug info if needed.
319 maybeInitializeDebugInfo();
320
321 StartFunction(GlobalDecl(), ResultType, Fn, FnInfo, FunctionArgs,
322 SourceLocation());
323
324 CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
325 CXXThisValue = CXXABIThisValue;
326
327 // Adjust the 'this' pointer if necessary.
328 llvm::Value *AdjustedThisPtr =
329 PerformTypeAdjustment(*this, LoadCXXThis(),
330 Thunk.This.NonVirtual,
331 Thunk.This.VCallOffsetOffset,
332 /*IsReturnAdjustment*/false);
333
334 CallArgList CallArgs;
335
336 // Add our adjusted 'this' pointer.
337 CallArgs.add(RValue::get(AdjustedThisPtr), ThisType);
338
339 // Add the rest of the parameters.
340 for (FunctionDecl::param_const_iterator I = MD->param_begin(),
341 E = MD->param_end(); I != E; ++I) {
342 ParmVarDecl *param = *I;
343 EmitDelegateCallArg(CallArgs, param);
344 }
345
346 // Get our callee.
347 llvm::Type *Ty =
348 CGM.getTypes().GetFunctionType(CGM.getTypes().arrangeGlobalDeclaration(GD));
349 llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
350
351 #ifndef NDEBUG
352 const CGFunctionInfo &CallFnInfo =
353 CGM.getTypes().arrangeCXXMethodCall(CallArgs, FPT,
354 RequiredArgs::forPrototypePlus(FPT, 1));
355 assert(CallFnInfo.getRegParm() == FnInfo.getRegParm() &&
356 CallFnInfo.isNoReturn() == FnInfo.isNoReturn() &&
357 CallFnInfo.getCallingConvention() == FnInfo.getCallingConvention());
358 assert(isa<CXXDestructorDecl>(MD) || // ignore dtor return types
359 similar(CallFnInfo.getReturnInfo(), CallFnInfo.getReturnType(),
360 FnInfo.getReturnInfo(), FnInfo.getReturnType()));
361 assert(CallFnInfo.arg_size() == FnInfo.arg_size());
362 for (unsigned i = 0, e = FnInfo.arg_size(); i != e; ++i)
363 assert(similar(CallFnInfo.arg_begin()[i].info,
364 CallFnInfo.arg_begin()[i].type,
365 FnInfo.arg_begin()[i].info, FnInfo.arg_begin()[i].type));
366 #endif
367
368 // Determine whether we have a return value slot to use.
369 ReturnValueSlot Slot;
370 if (!ResultType->isVoidType() &&
371 FnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect &&
372 !hasScalarEvaluationKind(CurFnInfo->getReturnType()))
373 Slot = ReturnValueSlot(ReturnValue, ResultType.isVolatileQualified());
374
375 // Now emit our call.
376 RValue RV = EmitCall(FnInfo, Callee, Slot, CallArgs, MD);
377
378 if (!Thunk.Return.isEmpty())
379 RV = PerformReturnAdjustment(*this, ResultType, RV, Thunk);
380
381 if (!ResultType->isVoidType() && Slot.isNull())
382 CGM.getCXXABI().EmitReturnFromThunk(*this, RV, ResultType);
383
384 // Disable the final ARC autorelease.
385 AutoreleaseResult = false;
386
387 FinishFunction();
388
389 // Set the right linkage.
390 CGM.setFunctionLinkage(GD, Fn);
391
392 // Set the right visibility.
393 setThunkVisibility(CGM, MD, Thunk, Fn);
394 }
395
EmitThunk(GlobalDecl GD,const ThunkInfo & Thunk,bool UseAvailableExternallyLinkage)396 void CodeGenVTables::EmitThunk(GlobalDecl GD, const ThunkInfo &Thunk,
397 bool UseAvailableExternallyLinkage)
398 {
399 if (CGM.getTarget().getCXXABI().isMicrosoft()) {
400 // Emission of thunks is not supported yet in Microsoft ABI.
401 return;
402 }
403
404 const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeGlobalDeclaration(GD);
405
406 // FIXME: re-use FnInfo in this computation.
407 llvm::Constant *Entry = CGM.GetAddrOfThunk(GD, Thunk);
408
409 // Strip off a bitcast if we got one back.
410 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Entry)) {
411 assert(CE->getOpcode() == llvm::Instruction::BitCast);
412 Entry = CE->getOperand(0);
413 }
414
415 // There's already a declaration with the same name, check if it has the same
416 // type or if we need to replace it.
417 if (cast<llvm::GlobalValue>(Entry)->getType()->getElementType() !=
418 CGM.getTypes().GetFunctionTypeForVTable(GD)) {
419 llvm::GlobalValue *OldThunkFn = cast<llvm::GlobalValue>(Entry);
420
421 // If the types mismatch then we have to rewrite the definition.
422 assert(OldThunkFn->isDeclaration() &&
423 "Shouldn't replace non-declaration");
424
425 // Remove the name from the old thunk function and get a new thunk.
426 OldThunkFn->setName(StringRef());
427 Entry = CGM.GetAddrOfThunk(GD, Thunk);
428
429 // If needed, replace the old thunk with a bitcast.
430 if (!OldThunkFn->use_empty()) {
431 llvm::Constant *NewPtrForOldDecl =
432 llvm::ConstantExpr::getBitCast(Entry, OldThunkFn->getType());
433 OldThunkFn->replaceAllUsesWith(NewPtrForOldDecl);
434 }
435
436 // Remove the old thunk.
437 OldThunkFn->eraseFromParent();
438 }
439
440 llvm::Function *ThunkFn = cast<llvm::Function>(Entry);
441
442 if (!ThunkFn->isDeclaration()) {
443 if (UseAvailableExternallyLinkage) {
444 // There is already a thunk emitted for this function, do nothing.
445 return;
446 }
447
448 // If a function has a body, it should have available_externally linkage.
449 assert(ThunkFn->hasAvailableExternallyLinkage() &&
450 "Function should have available_externally linkage!");
451
452 // Change the linkage.
453 CGM.setFunctionLinkage(GD, ThunkFn);
454 return;
455 }
456
457 CGM.SetLLVMFunctionAttributesForDefinition(GD.getDecl(), ThunkFn);
458
459 if (ThunkFn->isVarArg()) {
460 // Varargs thunks are special; we can't just generate a call because
461 // we can't copy the varargs. Our implementation is rather
462 // expensive/sucky at the moment, so don't generate the thunk unless
463 // we have to.
464 // FIXME: Do something better here; GenerateVarArgsThunk is extremely ugly.
465 if (!UseAvailableExternallyLinkage)
466 CodeGenFunction(CGM).GenerateVarArgsThunk(ThunkFn, FnInfo, GD, Thunk);
467 } else {
468 // Normal thunk body generation.
469 CodeGenFunction(CGM).GenerateThunk(ThunkFn, FnInfo, GD, Thunk);
470 }
471
472 if (UseAvailableExternallyLinkage)
473 ThunkFn->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
474 }
475
MaybeEmitThunkAvailableExternally(GlobalDecl GD,const ThunkInfo & Thunk)476 void CodeGenVTables::MaybeEmitThunkAvailableExternally(GlobalDecl GD,
477 const ThunkInfo &Thunk) {
478 // We only want to do this when building with optimizations.
479 if (!CGM.getCodeGenOpts().OptimizationLevel)
480 return;
481
482 // We can't emit thunks for member functions with incomplete types.
483 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
484 if (!CGM.getTypes().isFuncTypeConvertible(
485 cast<FunctionType>(MD->getType().getTypePtr())))
486 return;
487
488 EmitThunk(GD, Thunk, /*UseAvailableExternallyLinkage=*/true);
489 }
490
EmitThunks(GlobalDecl GD)491 void CodeGenVTables::EmitThunks(GlobalDecl GD)
492 {
493 const CXXMethodDecl *MD =
494 cast<CXXMethodDecl>(GD.getDecl())->getCanonicalDecl();
495
496 // We don't need to generate thunks for the base destructor.
497 if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
498 return;
499
500 if (VFTContext.isValid()) {
501 // FIXME: This is a temporary solution to force generation of vftables in
502 // Microsoft ABI. Remove when we thread VFTableContext through CodeGen.
503 VFTContext->getVFPtrOffsets(MD->getParent());
504 }
505
506 const VTableContext::ThunkInfoVectorTy *ThunkInfoVector =
507 VTContext.getThunkInfo(MD);
508 if (!ThunkInfoVector)
509 return;
510
511 for (unsigned I = 0, E = ThunkInfoVector->size(); I != E; ++I)
512 EmitThunk(GD, (*ThunkInfoVector)[I],
513 /*UseAvailableExternallyLinkage=*/false);
514 }
515
516 llvm::Constant *
CreateVTableInitializer(const CXXRecordDecl * RD,const VTableComponent * Components,unsigned NumComponents,const VTableLayout::VTableThunkTy * VTableThunks,unsigned NumVTableThunks)517 CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD,
518 const VTableComponent *Components,
519 unsigned NumComponents,
520 const VTableLayout::VTableThunkTy *VTableThunks,
521 unsigned NumVTableThunks) {
522 SmallVector<llvm::Constant *, 64> Inits;
523
524 llvm::Type *Int8PtrTy = CGM.Int8PtrTy;
525
526 llvm::Type *PtrDiffTy =
527 CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
528
529 QualType ClassType = CGM.getContext().getTagDeclType(RD);
530 llvm::Constant *RTTI = CGM.GetAddrOfRTTIDescriptor(ClassType);
531
532 unsigned NextVTableThunkIndex = 0;
533
534 llvm::Constant *PureVirtualFn = 0, *DeletedVirtualFn = 0;
535
536 for (unsigned I = 0; I != NumComponents; ++I) {
537 VTableComponent Component = Components[I];
538
539 llvm::Constant *Init = 0;
540
541 switch (Component.getKind()) {
542 case VTableComponent::CK_VCallOffset:
543 Init = llvm::ConstantInt::get(PtrDiffTy,
544 Component.getVCallOffset().getQuantity());
545 Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy);
546 break;
547 case VTableComponent::CK_VBaseOffset:
548 Init = llvm::ConstantInt::get(PtrDiffTy,
549 Component.getVBaseOffset().getQuantity());
550 Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy);
551 break;
552 case VTableComponent::CK_OffsetToTop:
553 Init = llvm::ConstantInt::get(PtrDiffTy,
554 Component.getOffsetToTop().getQuantity());
555 Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy);
556 break;
557 case VTableComponent::CK_RTTI:
558 Init = llvm::ConstantExpr::getBitCast(RTTI, Int8PtrTy);
559 break;
560 case VTableComponent::CK_FunctionPointer:
561 case VTableComponent::CK_CompleteDtorPointer:
562 case VTableComponent::CK_DeletingDtorPointer: {
563 GlobalDecl GD;
564
565 // Get the right global decl.
566 switch (Component.getKind()) {
567 default:
568 llvm_unreachable("Unexpected vtable component kind");
569 case VTableComponent::CK_FunctionPointer:
570 GD = Component.getFunctionDecl();
571 break;
572 case VTableComponent::CK_CompleteDtorPointer:
573 GD = GlobalDecl(Component.getDestructorDecl(), Dtor_Complete);
574 break;
575 case VTableComponent::CK_DeletingDtorPointer:
576 GD = GlobalDecl(Component.getDestructorDecl(), Dtor_Deleting);
577 break;
578 }
579
580 if (cast<CXXMethodDecl>(GD.getDecl())->isPure()) {
581 // We have a pure virtual member function.
582 if (!PureVirtualFn) {
583 llvm::FunctionType *Ty =
584 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
585 StringRef PureCallName = CGM.getCXXABI().GetPureVirtualCallName();
586 PureVirtualFn = CGM.CreateRuntimeFunction(Ty, PureCallName);
587 PureVirtualFn = llvm::ConstantExpr::getBitCast(PureVirtualFn,
588 CGM.Int8PtrTy);
589 }
590 Init = PureVirtualFn;
591 } else if (cast<CXXMethodDecl>(GD.getDecl())->isDeleted()) {
592 if (!DeletedVirtualFn) {
593 llvm::FunctionType *Ty =
594 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
595 StringRef DeletedCallName =
596 CGM.getCXXABI().GetDeletedVirtualCallName();
597 DeletedVirtualFn = CGM.CreateRuntimeFunction(Ty, DeletedCallName);
598 DeletedVirtualFn = llvm::ConstantExpr::getBitCast(DeletedVirtualFn,
599 CGM.Int8PtrTy);
600 }
601 Init = DeletedVirtualFn;
602 } else {
603 // Check if we should use a thunk.
604 if (NextVTableThunkIndex < NumVTableThunks &&
605 VTableThunks[NextVTableThunkIndex].first == I) {
606 const ThunkInfo &Thunk = VTableThunks[NextVTableThunkIndex].second;
607
608 MaybeEmitThunkAvailableExternally(GD, Thunk);
609 Init = CGM.GetAddrOfThunk(GD, Thunk);
610
611 NextVTableThunkIndex++;
612 } else {
613 llvm::Type *Ty = CGM.getTypes().GetFunctionTypeForVTable(GD);
614
615 Init = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
616 }
617
618 Init = llvm::ConstantExpr::getBitCast(Init, Int8PtrTy);
619 }
620 break;
621 }
622
623 case VTableComponent::CK_UnusedFunctionPointer:
624 Init = llvm::ConstantExpr::getNullValue(Int8PtrTy);
625 break;
626 };
627
628 Inits.push_back(Init);
629 }
630
631 llvm::ArrayType *ArrayType = llvm::ArrayType::get(Int8PtrTy, NumComponents);
632 return llvm::ConstantArray::get(ArrayType, Inits);
633 }
634
GetAddrOfVTable(const CXXRecordDecl * RD)635 llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTable(const CXXRecordDecl *RD) {
636 llvm::GlobalVariable *&VTable = VTables[RD];
637 if (VTable)
638 return VTable;
639
640 // Queue up this v-table for possible deferred emission.
641 CGM.addDeferredVTable(RD);
642
643 SmallString<256> OutName;
644 llvm::raw_svector_ostream Out(OutName);
645 CGM.getCXXABI().getMangleContext().mangleCXXVTable(RD, Out);
646 Out.flush();
647 StringRef Name = OutName.str();
648
649 llvm::ArrayType *ArrayType =
650 llvm::ArrayType::get(CGM.Int8PtrTy,
651 VTContext.getVTableLayout(RD).getNumVTableComponents());
652
653 VTable =
654 CGM.CreateOrReplaceCXXRuntimeVariable(Name, ArrayType,
655 llvm::GlobalValue::ExternalLinkage);
656 VTable->setUnnamedAddr(true);
657 return VTable;
658 }
659
660 void
EmitVTableDefinition(llvm::GlobalVariable * VTable,llvm::GlobalVariable::LinkageTypes Linkage,const CXXRecordDecl * RD)661 CodeGenVTables::EmitVTableDefinition(llvm::GlobalVariable *VTable,
662 llvm::GlobalVariable::LinkageTypes Linkage,
663 const CXXRecordDecl *RD) {
664 const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
665
666 // Create and set the initializer.
667 llvm::Constant *Init =
668 CreateVTableInitializer(RD,
669 VTLayout.vtable_component_begin(),
670 VTLayout.getNumVTableComponents(),
671 VTLayout.vtable_thunk_begin(),
672 VTLayout.getNumVTableThunks());
673 VTable->setInitializer(Init);
674
675 // Set the correct linkage.
676 VTable->setLinkage(Linkage);
677
678 // Set the right visibility.
679 CGM.setTypeVisibility(VTable, RD, CodeGenModule::TVK_ForVTable);
680 }
681
682 llvm::GlobalVariable *
GenerateConstructionVTable(const CXXRecordDecl * RD,const BaseSubobject & Base,bool BaseIsVirtual,llvm::GlobalVariable::LinkageTypes Linkage,VTableAddressPointsMapTy & AddressPoints)683 CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
684 const BaseSubobject &Base,
685 bool BaseIsVirtual,
686 llvm::GlobalVariable::LinkageTypes Linkage,
687 VTableAddressPointsMapTy& AddressPoints) {
688 OwningPtr<VTableLayout> VTLayout(
689 VTContext.createConstructionVTableLayout(Base.getBase(),
690 Base.getBaseOffset(),
691 BaseIsVirtual, RD));
692
693 // Add the address points.
694 AddressPoints = VTLayout->getAddressPoints();
695
696 // Get the mangled construction vtable name.
697 SmallString<256> OutName;
698 llvm::raw_svector_ostream Out(OutName);
699 CGM.getCXXABI().getMangleContext().
700 mangleCXXCtorVTable(RD, Base.getBaseOffset().getQuantity(), Base.getBase(),
701 Out);
702 Out.flush();
703 StringRef Name = OutName.str();
704
705 llvm::ArrayType *ArrayType =
706 llvm::ArrayType::get(CGM.Int8PtrTy, VTLayout->getNumVTableComponents());
707
708 // Construction vtable symbols are not part of the Itanium ABI, so we cannot
709 // guarantee that they actually will be available externally. Instead, when
710 // emitting an available_externally VTT, we provide references to an internal
711 // linkage construction vtable. The ABI only requires complete-object vtables
712 // to be the same for all instances of a type, not construction vtables.
713 if (Linkage == llvm::GlobalVariable::AvailableExternallyLinkage)
714 Linkage = llvm::GlobalVariable::InternalLinkage;
715
716 // Create the variable that will hold the construction vtable.
717 llvm::GlobalVariable *VTable =
718 CGM.CreateOrReplaceCXXRuntimeVariable(Name, ArrayType, Linkage);
719 CGM.setTypeVisibility(VTable, RD, CodeGenModule::TVK_ForConstructionVTable);
720
721 // V-tables are always unnamed_addr.
722 VTable->setUnnamedAddr(true);
723
724 // Create and set the initializer.
725 llvm::Constant *Init =
726 CreateVTableInitializer(Base.getBase(),
727 VTLayout->vtable_component_begin(),
728 VTLayout->getNumVTableComponents(),
729 VTLayout->vtable_thunk_begin(),
730 VTLayout->getNumVTableThunks());
731 VTable->setInitializer(Init);
732
733 return VTable;
734 }
735
736 /// Compute the required linkage of the v-table for the given class.
737 ///
738 /// Note that we only call this at the end of the translation unit.
739 llvm::GlobalVariable::LinkageTypes
getVTableLinkage(const CXXRecordDecl * RD)740 CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
741 if (!RD->isExternallyVisible())
742 return llvm::GlobalVariable::InternalLinkage;
743
744 // We're at the end of the translation unit, so the current key
745 // function is fully correct.
746 if (const CXXMethodDecl *keyFunction = Context.getCurrentKeyFunction(RD)) {
747 // If this class has a key function, use that to determine the
748 // linkage of the vtable.
749 const FunctionDecl *def = 0;
750 if (keyFunction->hasBody(def))
751 keyFunction = cast<CXXMethodDecl>(def);
752
753 switch (keyFunction->getTemplateSpecializationKind()) {
754 case TSK_Undeclared:
755 case TSK_ExplicitSpecialization:
756 // When compiling with optimizations turned on, we emit all vtables,
757 // even if the key function is not defined in the current translation
758 // unit. If this is the case, use available_externally linkage.
759 if (!def && CodeGenOpts.OptimizationLevel)
760 return llvm::GlobalVariable::AvailableExternallyLinkage;
761
762 if (keyFunction->isInlined())
763 return !Context.getLangOpts().AppleKext ?
764 llvm::GlobalVariable::LinkOnceODRLinkage :
765 llvm::Function::InternalLinkage;
766
767 return llvm::GlobalVariable::ExternalLinkage;
768
769 case TSK_ImplicitInstantiation:
770 return !Context.getLangOpts().AppleKext ?
771 llvm::GlobalVariable::LinkOnceODRLinkage :
772 llvm::Function::InternalLinkage;
773
774 case TSK_ExplicitInstantiationDefinition:
775 return !Context.getLangOpts().AppleKext ?
776 llvm::GlobalVariable::WeakODRLinkage :
777 llvm::Function::InternalLinkage;
778
779 case TSK_ExplicitInstantiationDeclaration:
780 return !Context.getLangOpts().AppleKext ?
781 llvm::GlobalVariable::AvailableExternallyLinkage :
782 llvm::Function::InternalLinkage;
783 }
784 }
785
786 // -fapple-kext mode does not support weak linkage, so we must use
787 // internal linkage.
788 if (Context.getLangOpts().AppleKext)
789 return llvm::Function::InternalLinkage;
790
791 switch (RD->getTemplateSpecializationKind()) {
792 case TSK_Undeclared:
793 case TSK_ExplicitSpecialization:
794 case TSK_ImplicitInstantiation:
795 return llvm::GlobalVariable::LinkOnceODRLinkage;
796
797 case TSK_ExplicitInstantiationDeclaration:
798 return llvm::GlobalVariable::AvailableExternallyLinkage;
799
800 case TSK_ExplicitInstantiationDefinition:
801 return llvm::GlobalVariable::WeakODRLinkage;
802 }
803
804 llvm_unreachable("Invalid TemplateSpecializationKind!");
805 }
806
807 /// This is a callback from Sema to tell us that it believes that a
808 /// particular v-table is required to be emitted in this translation
809 /// unit.
810 ///
811 /// The reason we don't simply trust this callback is because Sema
812 /// will happily report that something is used even when it's used
813 /// only in code that we don't actually have to emit.
814 ///
815 /// \param isRequired - if true, the v-table is mandatory, e.g.
816 /// because the translation unit defines the key function
EmitVTable(CXXRecordDecl * theClass,bool isRequired)817 void CodeGenModule::EmitVTable(CXXRecordDecl *theClass, bool isRequired) {
818 if (!isRequired) return;
819
820 VTables.GenerateClassData(theClass);
821 }
822
823 void
GenerateClassData(const CXXRecordDecl * RD)824 CodeGenVTables::GenerateClassData(const CXXRecordDecl *RD) {
825 if (VFTContext.isValid()) {
826 // FIXME: This is a temporary solution to force generation of vftables in
827 // Microsoft ABI. Remove when we thread VFTableContext through CodeGen.
828 VFTContext->getVFPtrOffsets(RD);
829 }
830
831 // First off, check whether we've already emitted the v-table and
832 // associated stuff.
833 llvm::GlobalVariable *VTable = GetAddrOfVTable(RD);
834 if (VTable->hasInitializer())
835 return;
836
837 llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
838 EmitVTableDefinition(VTable, Linkage, RD);
839
840 if (RD->getNumVBases())
841 CGM.getCXXABI().EmitVirtualInheritanceTables(Linkage, RD);
842
843 // If this is the magic class __cxxabiv1::__fundamental_type_info,
844 // we will emit the typeinfo for the fundamental types. This is the
845 // same behaviour as GCC.
846 const DeclContext *DC = RD->getDeclContext();
847 if (RD->getIdentifier() &&
848 RD->getIdentifier()->isStr("__fundamental_type_info") &&
849 isa<NamespaceDecl>(DC) &&
850 cast<NamespaceDecl>(DC)->getIdentifier() &&
851 cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
852 DC->getParent()->isTranslationUnit())
853 CGM.EmitFundamentalRTTIDescriptors();
854 }
855
856 /// At this point in the translation unit, does it appear that can we
857 /// rely on the vtable being defined elsewhere in the program?
858 ///
859 /// The response is really only definitive when called at the end of
860 /// the translation unit.
861 ///
862 /// The only semantic restriction here is that the object file should
863 /// not contain a v-table definition when that v-table is defined
864 /// strongly elsewhere. Otherwise, we'd just like to avoid emitting
865 /// v-tables when unnecessary.
isVTableExternal(const CXXRecordDecl * RD)866 bool CodeGenVTables::isVTableExternal(const CXXRecordDecl *RD) {
867 assert(RD->isDynamicClass() && "Non dynamic classes have no VTable.");
868
869 // If we have an explicit instantiation declaration (and not a
870 // definition), the v-table is defined elsewhere.
871 TemplateSpecializationKind TSK = RD->getTemplateSpecializationKind();
872 if (TSK == TSK_ExplicitInstantiationDeclaration)
873 return true;
874
875 // Otherwise, if the class is an instantiated template, the
876 // v-table must be defined here.
877 if (TSK == TSK_ImplicitInstantiation ||
878 TSK == TSK_ExplicitInstantiationDefinition)
879 return false;
880
881 // Otherwise, if the class doesn't have a key function (possibly
882 // anymore), the v-table must be defined here.
883 const CXXMethodDecl *keyFunction = CGM.getContext().getCurrentKeyFunction(RD);
884 if (!keyFunction)
885 return false;
886
887 // Otherwise, if we don't have a definition of the key function, the
888 // v-table must be defined somewhere else.
889 return !keyFunction->hasBody();
890 }
891
892 /// Given that we're currently at the end of the translation unit, and
893 /// we've emitted a reference to the v-table for this class, should
894 /// we define that v-table?
shouldEmitVTableAtEndOfTranslationUnit(CodeGenModule & CGM,const CXXRecordDecl * RD)895 static bool shouldEmitVTableAtEndOfTranslationUnit(CodeGenModule &CGM,
896 const CXXRecordDecl *RD) {
897 // If we're building with optimization, we always emit v-tables
898 // since that allows for virtual function calls to be devirtualized.
899 // If the v-table is defined strongly elsewhere, this definition
900 // will be emitted available_externally.
901 //
902 // However, we don't want to do this in -fapple-kext mode, because
903 // kext mode does not permit devirtualization.
904 if (CGM.getCodeGenOpts().OptimizationLevel && !CGM.getLangOpts().AppleKext)
905 return true;
906
907 return !CGM.getVTables().isVTableExternal(RD);
908 }
909
910 /// Given that at some point we emitted a reference to one or more
911 /// v-tables, and that we are now at the end of the translation unit,
912 /// decide whether we should emit them.
EmitDeferredVTables()913 void CodeGenModule::EmitDeferredVTables() {
914 #ifndef NDEBUG
915 // Remember the size of DeferredVTables, because we're going to assume
916 // that this entire operation doesn't modify it.
917 size_t savedSize = DeferredVTables.size();
918 #endif
919
920 typedef std::vector<const CXXRecordDecl *>::const_iterator const_iterator;
921 for (const_iterator i = DeferredVTables.begin(),
922 e = DeferredVTables.end(); i != e; ++i) {
923 const CXXRecordDecl *RD = *i;
924 if (shouldEmitVTableAtEndOfTranslationUnit(*this, RD))
925 VTables.GenerateClassData(RD);
926 }
927
928 assert(savedSize == DeferredVTables.size() &&
929 "deferred extra v-tables during v-table emission?");
930 DeferredVTables.clear();
931 }
932