1 //===--- CGExprConstant.cpp - Emit LLVM Code from Constant Expressions ----===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code to emit Constant Expr nodes as LLVM code.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "CodeGenFunction.h"
15 #include "CodeGenModule.h"
16 #include "CGCXXABI.h"
17 #include "CGObjCRuntime.h"
18 #include "CGRecordLayout.h"
19 #include "clang/AST/APValue.h"
20 #include "clang/AST/ASTContext.h"
21 #include "clang/AST/RecordLayout.h"
22 #include "clang/AST/StmtVisitor.h"
23 #include "clang/Basic/Builtins.h"
24 #include "llvm/Constants.h"
25 #include "llvm/Function.h"
26 #include "llvm/GlobalVariable.h"
27 #include "llvm/Target/TargetData.h"
28 using namespace clang;
29 using namespace CodeGen;
30
31 //===----------------------------------------------------------------------===//
32 // ConstStructBuilder
33 //===----------------------------------------------------------------------===//
34
35 namespace {
36 class ConstStructBuilder {
37 CodeGenModule &CGM;
38 CodeGenFunction *CGF;
39
40 bool Packed;
41 CharUnits NextFieldOffsetInChars;
42 CharUnits LLVMStructAlignment;
43 std::vector<llvm::Constant *> Elements;
44 public:
45 static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF,
46 InitListExpr *ILE);
47
48 private:
ConstStructBuilder(CodeGenModule & CGM,CodeGenFunction * CGF)49 ConstStructBuilder(CodeGenModule &CGM, CodeGenFunction *CGF)
50 : CGM(CGM), CGF(CGF), Packed(false),
51 NextFieldOffsetInChars(CharUnits::Zero()),
52 LLVMStructAlignment(CharUnits::One()) { }
53
54 bool AppendField(const FieldDecl *Field, uint64_t FieldOffset,
55 llvm::Constant *InitExpr);
56
57 void AppendBitField(const FieldDecl *Field, uint64_t FieldOffset,
58 llvm::ConstantInt *InitExpr);
59
60 void AppendPadding(CharUnits PadSize);
61
62 void AppendTailPadding(CharUnits RecordSize);
63
64 void ConvertStructToPacked();
65
66 bool Build(InitListExpr *ILE);
67
getAlignment(const llvm::Constant * C) const68 CharUnits getAlignment(const llvm::Constant *C) const {
69 if (Packed) return CharUnits::One();
70 return CharUnits::fromQuantity(
71 CGM.getTargetData().getABITypeAlignment(C->getType()));
72 }
73
getSizeInChars(const llvm::Constant * C) const74 CharUnits getSizeInChars(const llvm::Constant *C) const {
75 return CharUnits::fromQuantity(
76 CGM.getTargetData().getTypeAllocSize(C->getType()));
77 }
78 };
79
80 bool ConstStructBuilder::
AppendField(const FieldDecl * Field,uint64_t FieldOffset,llvm::Constant * InitCst)81 AppendField(const FieldDecl *Field, uint64_t FieldOffset,
82 llvm::Constant *InitCst) {
83
84 const ASTContext &Context = CGM.getContext();
85
86 CharUnits FieldOffsetInChars = Context.toCharUnitsFromBits(FieldOffset);
87
88 assert(NextFieldOffsetInChars <= FieldOffsetInChars
89 && "Field offset mismatch!");
90
91 CharUnits FieldAlignment = getAlignment(InitCst);
92
93 // Round up the field offset to the alignment of the field type.
94 CharUnits AlignedNextFieldOffsetInChars =
95 NextFieldOffsetInChars.RoundUpToAlignment(FieldAlignment);
96
97 if (AlignedNextFieldOffsetInChars > FieldOffsetInChars) {
98 assert(!Packed && "Alignment is wrong even with a packed struct!");
99
100 // Convert the struct to a packed struct.
101 ConvertStructToPacked();
102
103 AlignedNextFieldOffsetInChars = NextFieldOffsetInChars;
104 }
105
106 if (AlignedNextFieldOffsetInChars < FieldOffsetInChars) {
107 // We need to append padding.
108 AppendPadding(
109 FieldOffsetInChars - NextFieldOffsetInChars);
110
111 assert(NextFieldOffsetInChars == FieldOffsetInChars &&
112 "Did not add enough padding!");
113
114 AlignedNextFieldOffsetInChars = NextFieldOffsetInChars;
115 }
116
117 // Add the field.
118 Elements.push_back(InitCst);
119 NextFieldOffsetInChars = AlignedNextFieldOffsetInChars +
120 getSizeInChars(InitCst);
121
122 if (Packed)
123 assert(LLVMStructAlignment == CharUnits::One() &&
124 "Packed struct not byte-aligned!");
125 else
126 LLVMStructAlignment = std::max(LLVMStructAlignment, FieldAlignment);
127
128 return true;
129 }
130
AppendBitField(const FieldDecl * Field,uint64_t FieldOffset,llvm::ConstantInt * CI)131 void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
132 uint64_t FieldOffset,
133 llvm::ConstantInt *CI) {
134 const ASTContext &Context = CGM.getContext();
135 const uint64_t CharWidth = Context.getCharWidth();
136 uint64_t NextFieldOffsetInBits = Context.toBits(NextFieldOffsetInChars);
137 if (FieldOffset > NextFieldOffsetInBits) {
138 // We need to add padding.
139 CharUnits PadSize = Context.toCharUnitsFromBits(
140 llvm::RoundUpToAlignment(FieldOffset - NextFieldOffsetInBits,
141 Context.Target.getCharAlign()));
142
143 AppendPadding(PadSize);
144 }
145
146 uint64_t FieldSize =
147 Field->getBitWidth()->EvaluateAsInt(Context).getZExtValue();
148
149 llvm::APInt FieldValue = CI->getValue();
150
151 // Promote the size of FieldValue if necessary
152 // FIXME: This should never occur, but currently it can because initializer
153 // constants are cast to bool, and because clang is not enforcing bitfield
154 // width limits.
155 if (FieldSize > FieldValue.getBitWidth())
156 FieldValue = FieldValue.zext(FieldSize);
157
158 // Truncate the size of FieldValue to the bit field size.
159 if (FieldSize < FieldValue.getBitWidth())
160 FieldValue = FieldValue.trunc(FieldSize);
161
162 NextFieldOffsetInBits = Context.toBits(NextFieldOffsetInChars);
163 if (FieldOffset < NextFieldOffsetInBits) {
164 // Either part of the field or the entire field can go into the previous
165 // byte.
166 assert(!Elements.empty() && "Elements can't be empty!");
167
168 unsigned BitsInPreviousByte = NextFieldOffsetInBits - FieldOffset;
169
170 bool FitsCompletelyInPreviousByte =
171 BitsInPreviousByte >= FieldValue.getBitWidth();
172
173 llvm::APInt Tmp = FieldValue;
174
175 if (!FitsCompletelyInPreviousByte) {
176 unsigned NewFieldWidth = FieldSize - BitsInPreviousByte;
177
178 if (CGM.getTargetData().isBigEndian()) {
179 Tmp = Tmp.lshr(NewFieldWidth);
180 Tmp = Tmp.trunc(BitsInPreviousByte);
181
182 // We want the remaining high bits.
183 FieldValue = FieldValue.trunc(NewFieldWidth);
184 } else {
185 Tmp = Tmp.trunc(BitsInPreviousByte);
186
187 // We want the remaining low bits.
188 FieldValue = FieldValue.lshr(BitsInPreviousByte);
189 FieldValue = FieldValue.trunc(NewFieldWidth);
190 }
191 }
192
193 Tmp = Tmp.zext(CharWidth);
194 if (CGM.getTargetData().isBigEndian()) {
195 if (FitsCompletelyInPreviousByte)
196 Tmp = Tmp.shl(BitsInPreviousByte - FieldValue.getBitWidth());
197 } else {
198 Tmp = Tmp.shl(CharWidth - BitsInPreviousByte);
199 }
200
201 // 'or' in the bits that go into the previous byte.
202 llvm::Value *LastElt = Elements.back();
203 if (llvm::ConstantInt *Val = dyn_cast<llvm::ConstantInt>(LastElt))
204 Tmp |= Val->getValue();
205 else {
206 assert(isa<llvm::UndefValue>(LastElt));
207 // If there is an undef field that we're adding to, it can either be a
208 // scalar undef (in which case, we just replace it with our field) or it
209 // is an array. If it is an array, we have to pull one byte off the
210 // array so that the other undef bytes stay around.
211 if (!isa<llvm::IntegerType>(LastElt->getType())) {
212 // The undef padding will be a multibyte array, create a new smaller
213 // padding and then an hole for our i8 to get plopped into.
214 assert(isa<llvm::ArrayType>(LastElt->getType()) &&
215 "Expected array padding of undefs");
216 llvm::ArrayType *AT = cast<llvm::ArrayType>(LastElt->getType());
217 assert(AT->getElementType()->isIntegerTy(CharWidth) &&
218 AT->getNumElements() != 0 &&
219 "Expected non-empty array padding of undefs");
220
221 // Remove the padding array.
222 NextFieldOffsetInChars -= CharUnits::fromQuantity(AT->getNumElements());
223 Elements.pop_back();
224
225 // Add the padding back in two chunks.
226 AppendPadding(CharUnits::fromQuantity(AT->getNumElements()-1));
227 AppendPadding(CharUnits::One());
228 assert(isa<llvm::UndefValue>(Elements.back()) &&
229 Elements.back()->getType()->isIntegerTy(CharWidth) &&
230 "Padding addition didn't work right");
231 }
232 }
233
234 Elements.back() = llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp);
235
236 if (FitsCompletelyInPreviousByte)
237 return;
238 }
239
240 while (FieldValue.getBitWidth() > CharWidth) {
241 llvm::APInt Tmp;
242
243 if (CGM.getTargetData().isBigEndian()) {
244 // We want the high bits.
245 Tmp =
246 FieldValue.lshr(FieldValue.getBitWidth() - CharWidth).trunc(CharWidth);
247 } else {
248 // We want the low bits.
249 Tmp = FieldValue.trunc(CharWidth);
250
251 FieldValue = FieldValue.lshr(CharWidth);
252 }
253
254 Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp));
255 ++NextFieldOffsetInChars;
256
257 FieldValue = FieldValue.trunc(FieldValue.getBitWidth() - CharWidth);
258 }
259
260 assert(FieldValue.getBitWidth() > 0 &&
261 "Should have at least one bit left!");
262 assert(FieldValue.getBitWidth() <= CharWidth &&
263 "Should not have more than a byte left!");
264
265 if (FieldValue.getBitWidth() < CharWidth) {
266 if (CGM.getTargetData().isBigEndian()) {
267 unsigned BitWidth = FieldValue.getBitWidth();
268
269 FieldValue = FieldValue.zext(CharWidth) << (CharWidth - BitWidth);
270 } else
271 FieldValue = FieldValue.zext(CharWidth);
272 }
273
274 // Append the last element.
275 Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(),
276 FieldValue));
277 ++NextFieldOffsetInChars;
278 }
279
AppendPadding(CharUnits PadSize)280 void ConstStructBuilder::AppendPadding(CharUnits PadSize) {
281 if (PadSize.isZero())
282 return;
283
284 llvm::Type *Ty = llvm::Type::getInt8Ty(CGM.getLLVMContext());
285 if (PadSize > CharUnits::One())
286 Ty = llvm::ArrayType::get(Ty, PadSize.getQuantity());
287
288 llvm::Constant *C = llvm::UndefValue::get(Ty);
289 Elements.push_back(C);
290 assert(getAlignment(C) == CharUnits::One() &&
291 "Padding must have 1 byte alignment!");
292
293 NextFieldOffsetInChars += getSizeInChars(C);
294 }
295
AppendTailPadding(CharUnits RecordSize)296 void ConstStructBuilder::AppendTailPadding(CharUnits RecordSize) {
297 assert(NextFieldOffsetInChars <= RecordSize &&
298 "Size mismatch!");
299
300 AppendPadding(RecordSize - NextFieldOffsetInChars);
301 }
302
ConvertStructToPacked()303 void ConstStructBuilder::ConvertStructToPacked() {
304 std::vector<llvm::Constant *> PackedElements;
305 CharUnits ElementOffsetInChars = CharUnits::Zero();
306
307 for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
308 llvm::Constant *C = Elements[i];
309
310 CharUnits ElementAlign = CharUnits::fromQuantity(
311 CGM.getTargetData().getABITypeAlignment(C->getType()));
312 CharUnits AlignedElementOffsetInChars =
313 ElementOffsetInChars.RoundUpToAlignment(ElementAlign);
314
315 if (AlignedElementOffsetInChars > ElementOffsetInChars) {
316 // We need some padding.
317 CharUnits NumChars =
318 AlignedElementOffsetInChars - ElementOffsetInChars;
319
320 llvm::Type *Ty = llvm::Type::getInt8Ty(CGM.getLLVMContext());
321 if (NumChars > CharUnits::One())
322 Ty = llvm::ArrayType::get(Ty, NumChars.getQuantity());
323
324 llvm::Constant *Padding = llvm::UndefValue::get(Ty);
325 PackedElements.push_back(Padding);
326 ElementOffsetInChars += getSizeInChars(Padding);
327 }
328
329 PackedElements.push_back(C);
330 ElementOffsetInChars += getSizeInChars(C);
331 }
332
333 assert(ElementOffsetInChars == NextFieldOffsetInChars &&
334 "Packing the struct changed its size!");
335
336 Elements = PackedElements;
337 LLVMStructAlignment = CharUnits::One();
338 Packed = true;
339 }
340
Build(InitListExpr * ILE)341 bool ConstStructBuilder::Build(InitListExpr *ILE) {
342 RecordDecl *RD = ILE->getType()->getAs<RecordType>()->getDecl();
343 const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
344
345 unsigned FieldNo = 0;
346 unsigned ElementNo = 0;
347 const FieldDecl *LastFD = 0;
348 bool IsMsStruct = RD->hasAttr<MsStructAttr>();
349
350 for (RecordDecl::field_iterator Field = RD->field_begin(),
351 FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
352 if (IsMsStruct) {
353 // Zero-length bitfields following non-bitfield members are
354 // ignored:
355 if (CGM.getContext().ZeroBitfieldFollowsNonBitfield((*Field), LastFD)) {
356 --FieldNo;
357 continue;
358 }
359 LastFD = (*Field);
360 }
361
362 // If this is a union, skip all the fields that aren't being initialized.
363 if (RD->isUnion() && ILE->getInitializedFieldInUnion() != *Field)
364 continue;
365
366 // Don't emit anonymous bitfields, they just affect layout.
367 if (Field->isBitField() && !Field->getIdentifier()) {
368 LastFD = (*Field);
369 continue;
370 }
371
372 // Get the initializer. A struct can include fields without initializers,
373 // we just use explicit null values for them.
374 llvm::Constant *EltInit;
375 if (ElementNo < ILE->getNumInits())
376 EltInit = CGM.EmitConstantExpr(ILE->getInit(ElementNo++),
377 Field->getType(), CGF);
378 else
379 EltInit = CGM.EmitNullConstant(Field->getType());
380
381 if (!EltInit)
382 return false;
383
384 if (!Field->isBitField()) {
385 // Handle non-bitfield members.
386 if (!AppendField(*Field, Layout.getFieldOffset(FieldNo), EltInit))
387 return false;
388 } else {
389 // Otherwise we have a bitfield.
390 AppendBitField(*Field, Layout.getFieldOffset(FieldNo),
391 cast<llvm::ConstantInt>(EltInit));
392 }
393 }
394
395 CharUnits LayoutSizeInChars = Layout.getSize();
396
397 if (NextFieldOffsetInChars > LayoutSizeInChars) {
398 // If the struct is bigger than the size of the record type,
399 // we must have a flexible array member at the end.
400 assert(RD->hasFlexibleArrayMember() &&
401 "Must have flexible array member if struct is bigger than type!");
402
403 // No tail padding is necessary.
404 return true;
405 }
406
407 CharUnits LLVMSizeInChars =
408 NextFieldOffsetInChars.RoundUpToAlignment(LLVMStructAlignment);
409
410 // Check if we need to convert the struct to a packed struct.
411 if (NextFieldOffsetInChars <= LayoutSizeInChars &&
412 LLVMSizeInChars > LayoutSizeInChars) {
413 assert(!Packed && "Size mismatch!");
414
415 ConvertStructToPacked();
416 assert(NextFieldOffsetInChars <= LayoutSizeInChars &&
417 "Converting to packed did not help!");
418 }
419
420 // Append tail padding if necessary.
421 AppendTailPadding(LayoutSizeInChars);
422
423 assert(LayoutSizeInChars == NextFieldOffsetInChars &&
424 "Tail padding mismatch!");
425
426 return true;
427 }
428
429 llvm::Constant *ConstStructBuilder::
BuildStruct(CodeGenModule & CGM,CodeGenFunction * CGF,InitListExpr * ILE)430 BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF, InitListExpr *ILE) {
431 ConstStructBuilder Builder(CGM, CGF);
432
433 if (!Builder.Build(ILE))
434 return 0;
435
436 // Pick the type to use. If the type is layout identical to the ConvertType
437 // type then use it, otherwise use whatever the builder produced for us.
438 llvm::StructType *STy =
439 llvm::ConstantStruct::getTypeForElements(CGM.getLLVMContext(),
440 Builder.Elements,Builder.Packed);
441 llvm::Type *ILETy = CGM.getTypes().ConvertType(ILE->getType());
442 if (llvm::StructType *ILESTy = dyn_cast<llvm::StructType>(ILETy)) {
443 if (ILESTy->isLayoutIdentical(STy))
444 STy = ILESTy;
445 }
446
447 llvm::Constant *Result =
448 llvm::ConstantStruct::get(STy, Builder.Elements);
449
450 assert(Builder.NextFieldOffsetInChars.RoundUpToAlignment(
451 Builder.getAlignment(Result)) ==
452 Builder.getSizeInChars(Result) && "Size mismatch!");
453
454 return Result;
455 }
456
457
458 //===----------------------------------------------------------------------===//
459 // ConstExprEmitter
460 //===----------------------------------------------------------------------===//
461
462 class ConstExprEmitter :
463 public StmtVisitor<ConstExprEmitter, llvm::Constant*> {
464 CodeGenModule &CGM;
465 CodeGenFunction *CGF;
466 llvm::LLVMContext &VMContext;
467 public:
ConstExprEmitter(CodeGenModule & cgm,CodeGenFunction * cgf)468 ConstExprEmitter(CodeGenModule &cgm, CodeGenFunction *cgf)
469 : CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()) {
470 }
471
472 //===--------------------------------------------------------------------===//
473 // Visitor Methods
474 //===--------------------------------------------------------------------===//
475
VisitStmt(Stmt * S)476 llvm::Constant *VisitStmt(Stmt *S) {
477 return 0;
478 }
479
VisitParenExpr(ParenExpr * PE)480 llvm::Constant *VisitParenExpr(ParenExpr *PE) {
481 return Visit(PE->getSubExpr());
482 }
483
484 llvm::Constant *
VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr * PE)485 VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *PE) {
486 return Visit(PE->getReplacement());
487 }
488
VisitGenericSelectionExpr(GenericSelectionExpr * GE)489 llvm::Constant *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
490 return Visit(GE->getResultExpr());
491 }
492
VisitCompoundLiteralExpr(CompoundLiteralExpr * E)493 llvm::Constant *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
494 return Visit(E->getInitializer());
495 }
496
VisitUnaryAddrOf(UnaryOperator * E)497 llvm::Constant *VisitUnaryAddrOf(UnaryOperator *E) {
498 if (E->getType()->isMemberPointerType())
499 return CGM.getMemberPointerConstant(E);
500
501 return 0;
502 }
503
VisitBinSub(BinaryOperator * E)504 llvm::Constant *VisitBinSub(BinaryOperator *E) {
505 // This must be a pointer/pointer subtraction. This only happens for
506 // address of label.
507 if (!isa<AddrLabelExpr>(E->getLHS()->IgnoreParenNoopCasts(CGM.getContext())) ||
508 !isa<AddrLabelExpr>(E->getRHS()->IgnoreParenNoopCasts(CGM.getContext())))
509 return 0;
510
511 llvm::Constant *LHS = CGM.EmitConstantExpr(E->getLHS(),
512 E->getLHS()->getType(), CGF);
513 llvm::Constant *RHS = CGM.EmitConstantExpr(E->getRHS(),
514 E->getRHS()->getType(), CGF);
515
516 llvm::Type *ResultType = ConvertType(E->getType());
517 LHS = llvm::ConstantExpr::getPtrToInt(LHS, ResultType);
518 RHS = llvm::ConstantExpr::getPtrToInt(RHS, ResultType);
519
520 // No need to divide by element size, since addr of label is always void*,
521 // which has size 1 in GNUish.
522 return llvm::ConstantExpr::getSub(LHS, RHS);
523 }
524
VisitCastExpr(CastExpr * E)525 llvm::Constant *VisitCastExpr(CastExpr* E) {
526 Expr *subExpr = E->getSubExpr();
527 llvm::Constant *C = CGM.EmitConstantExpr(subExpr, subExpr->getType(), CGF);
528 if (!C) return 0;
529
530 llvm::Type *destType = ConvertType(E->getType());
531
532 switch (E->getCastKind()) {
533 case CK_ToUnion: {
534 // GCC cast to union extension
535 assert(E->getType()->isUnionType() &&
536 "Destination type is not union type!");
537
538 // Build a struct with the union sub-element as the first member,
539 // and padded to the appropriate size
540 std::vector<llvm::Constant*> Elts;
541 std::vector<llvm::Type*> Types;
542 Elts.push_back(C);
543 Types.push_back(C->getType());
544 unsigned CurSize = CGM.getTargetData().getTypeAllocSize(C->getType());
545 unsigned TotalSize = CGM.getTargetData().getTypeAllocSize(destType);
546
547 assert(CurSize <= TotalSize && "Union size mismatch!");
548 if (unsigned NumPadBytes = TotalSize - CurSize) {
549 llvm::Type *Ty = llvm::Type::getInt8Ty(VMContext);
550 if (NumPadBytes > 1)
551 Ty = llvm::ArrayType::get(Ty, NumPadBytes);
552
553 Elts.push_back(llvm::UndefValue::get(Ty));
554 Types.push_back(Ty);
555 }
556
557 llvm::StructType* STy =
558 llvm::StructType::get(C->getType()->getContext(), Types, false);
559 return llvm::ConstantStruct::get(STy, Elts);
560 }
561 case CK_NullToMemberPointer: {
562 const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
563 return CGM.getCXXABI().EmitNullMemberPointer(MPT);
564 }
565
566 case CK_DerivedToBaseMemberPointer:
567 case CK_BaseToDerivedMemberPointer:
568 return CGM.getCXXABI().EmitMemberPointerConversion(C, E);
569
570 case CK_LValueToRValue:
571 case CK_NoOp:
572 return C;
573
574 case CK_AnyPointerToObjCPointerCast:
575 case CK_AnyPointerToBlockPointerCast:
576 case CK_LValueBitCast:
577 case CK_BitCast:
578 if (C->getType() == destType) return C;
579 return llvm::ConstantExpr::getBitCast(C, destType);
580
581 case CK_Dependent: llvm_unreachable("saw dependent cast!");
582
583 // These will never be supported.
584 case CK_ObjCObjectLValueCast:
585 case CK_GetObjCProperty:
586 case CK_ToVoid:
587 case CK_Dynamic:
588 case CK_ObjCProduceObject:
589 case CK_ObjCConsumeObject:
590 case CK_ObjCReclaimReturnedObject:
591 return 0;
592
593 // These might need to be supported for constexpr.
594 case CK_UserDefinedConversion:
595 case CK_ConstructorConversion:
596 return 0;
597
598 // These should eventually be supported.
599 case CK_ArrayToPointerDecay:
600 case CK_FunctionToPointerDecay:
601 case CK_BaseToDerived:
602 case CK_DerivedToBase:
603 case CK_UncheckedDerivedToBase:
604 case CK_MemberPointerToBoolean:
605 case CK_VectorSplat:
606 case CK_FloatingRealToComplex:
607 case CK_FloatingComplexToReal:
608 case CK_FloatingComplexToBoolean:
609 case CK_FloatingComplexCast:
610 case CK_FloatingComplexToIntegralComplex:
611 case CK_IntegralRealToComplex:
612 case CK_IntegralComplexToReal:
613 case CK_IntegralComplexToBoolean:
614 case CK_IntegralComplexCast:
615 case CK_IntegralComplexToFloatingComplex:
616 return 0;
617
618 case CK_PointerToIntegral:
619 if (!E->getType()->isBooleanType())
620 return llvm::ConstantExpr::getPtrToInt(C, destType);
621 // fallthrough
622
623 case CK_PointerToBoolean:
624 return llvm::ConstantExpr::getICmp(llvm::CmpInst::ICMP_EQ, C,
625 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(C->getType())));
626
627 case CK_NullToPointer:
628 return llvm::ConstantPointerNull::get(cast<llvm::PointerType>(destType));
629
630 case CK_IntegralCast: {
631 bool isSigned = subExpr->getType()->isSignedIntegerOrEnumerationType();
632 return llvm::ConstantExpr::getIntegerCast(C, destType, isSigned);
633 }
634
635 case CK_IntegralToPointer: {
636 bool isSigned = subExpr->getType()->isSignedIntegerOrEnumerationType();
637 C = llvm::ConstantExpr::getIntegerCast(C, CGM.IntPtrTy, isSigned);
638 return llvm::ConstantExpr::getIntToPtr(C, destType);
639 }
640
641 case CK_IntegralToBoolean:
642 return llvm::ConstantExpr::getICmp(llvm::CmpInst::ICMP_EQ, C,
643 llvm::Constant::getNullValue(C->getType()));
644
645 case CK_IntegralToFloating:
646 if (subExpr->getType()->isSignedIntegerOrEnumerationType())
647 return llvm::ConstantExpr::getSIToFP(C, destType);
648 else
649 return llvm::ConstantExpr::getUIToFP(C, destType);
650
651 case CK_FloatingToIntegral:
652 if (E->getType()->isSignedIntegerOrEnumerationType())
653 return llvm::ConstantExpr::getFPToSI(C, destType);
654 else
655 return llvm::ConstantExpr::getFPToUI(C, destType);
656
657 case CK_FloatingToBoolean:
658 return llvm::ConstantExpr::getFCmp(llvm::CmpInst::FCMP_UNE, C,
659 llvm::Constant::getNullValue(C->getType()));
660
661 case CK_FloatingCast:
662 return llvm::ConstantExpr::getFPCast(C, destType);
663 }
664 llvm_unreachable("Invalid CastKind");
665 }
666
VisitCXXDefaultArgExpr(CXXDefaultArgExpr * DAE)667 llvm::Constant *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
668 return Visit(DAE->getExpr());
669 }
670
VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr * E)671 llvm::Constant *VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) {
672 return Visit(E->GetTemporaryExpr());
673 }
674
EmitArrayInitialization(InitListExpr * ILE)675 llvm::Constant *EmitArrayInitialization(InitListExpr *ILE) {
676 unsigned NumInitElements = ILE->getNumInits();
677 if (NumInitElements == 1 && ILE->getType() == ILE->getInit(0)->getType() &&
678 (isa<StringLiteral>(ILE->getInit(0)) ||
679 isa<ObjCEncodeExpr>(ILE->getInit(0))))
680 return Visit(ILE->getInit(0));
681
682 std::vector<llvm::Constant*> Elts;
683 llvm::ArrayType *AType =
684 cast<llvm::ArrayType>(ConvertType(ILE->getType()));
685 llvm::Type *ElemTy = AType->getElementType();
686 unsigned NumElements = AType->getNumElements();
687
688 // Initialising an array requires us to automatically
689 // initialise any elements that have not been initialised explicitly
690 unsigned NumInitableElts = std::min(NumInitElements, NumElements);
691
692 // Copy initializer elements.
693 unsigned i = 0;
694 bool RewriteType = false;
695 for (; i < NumInitableElts; ++i) {
696 Expr *Init = ILE->getInit(i);
697 llvm::Constant *C = CGM.EmitConstantExpr(Init, Init->getType(), CGF);
698 if (!C)
699 return 0;
700 RewriteType |= (C->getType() != ElemTy);
701 Elts.push_back(C);
702 }
703
704 // Initialize remaining array elements.
705 // FIXME: This doesn't handle member pointers correctly!
706 llvm::Constant *fillC;
707 if (Expr *filler = ILE->getArrayFiller())
708 fillC = CGM.EmitConstantExpr(filler, filler->getType(), CGF);
709 else
710 fillC = llvm::Constant::getNullValue(ElemTy);
711 if (!fillC)
712 return 0;
713 RewriteType |= (fillC->getType() != ElemTy);
714 for (; i < NumElements; ++i)
715 Elts.push_back(fillC);
716
717 if (RewriteType) {
718 // FIXME: Try to avoid packing the array
719 std::vector<llvm::Type*> Types;
720 for (unsigned i = 0; i < Elts.size(); ++i)
721 Types.push_back(Elts[i]->getType());
722 llvm::StructType *SType = llvm::StructType::get(AType->getContext(),
723 Types, true);
724 return llvm::ConstantStruct::get(SType, Elts);
725 }
726
727 return llvm::ConstantArray::get(AType, Elts);
728 }
729
EmitStructInitialization(InitListExpr * ILE)730 llvm::Constant *EmitStructInitialization(InitListExpr *ILE) {
731 return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
732 }
733
EmitUnionInitialization(InitListExpr * ILE)734 llvm::Constant *EmitUnionInitialization(InitListExpr *ILE) {
735 return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
736 }
737
VisitImplicitValueInitExpr(ImplicitValueInitExpr * E)738 llvm::Constant *VisitImplicitValueInitExpr(ImplicitValueInitExpr* E) {
739 return CGM.EmitNullConstant(E->getType());
740 }
741
VisitInitListExpr(InitListExpr * ILE)742 llvm::Constant *VisitInitListExpr(InitListExpr *ILE) {
743 if (ILE->getType()->isScalarType()) {
744 // We have a scalar in braces. Just use the first element.
745 if (ILE->getNumInits() > 0) {
746 Expr *Init = ILE->getInit(0);
747 return CGM.EmitConstantExpr(Init, Init->getType(), CGF);
748 }
749 return CGM.EmitNullConstant(ILE->getType());
750 }
751
752 if (ILE->getType()->isArrayType())
753 return EmitArrayInitialization(ILE);
754
755 if (ILE->getType()->isRecordType())
756 return EmitStructInitialization(ILE);
757
758 if (ILE->getType()->isUnionType())
759 return EmitUnionInitialization(ILE);
760
761 // If ILE was a constant vector, we would have handled it already.
762 if (ILE->getType()->isVectorType())
763 return 0;
764
765 assert(0 && "Unable to handle InitListExpr");
766 // Get rid of control reaches end of void function warning.
767 // Not reached.
768 return 0;
769 }
770
VisitCXXConstructExpr(CXXConstructExpr * E)771 llvm::Constant *VisitCXXConstructExpr(CXXConstructExpr *E) {
772 if (!E->getConstructor()->isTrivial())
773 return 0;
774
775 QualType Ty = E->getType();
776
777 // FIXME: We should not have to call getBaseElementType here.
778 const RecordType *RT =
779 CGM.getContext().getBaseElementType(Ty)->getAs<RecordType>();
780 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
781
782 // If the class doesn't have a trivial destructor, we can't emit it as a
783 // constant expr.
784 if (!RD->hasTrivialDestructor())
785 return 0;
786
787 // Only copy and default constructors can be trivial.
788
789
790 if (E->getNumArgs()) {
791 assert(E->getNumArgs() == 1 && "trivial ctor with > 1 argument");
792 assert(E->getConstructor()->isCopyConstructor() &&
793 "trivial ctor has argument but isn't a copy ctor");
794
795 Expr *Arg = E->getArg(0);
796 assert(CGM.getContext().hasSameUnqualifiedType(Ty, Arg->getType()) &&
797 "argument to copy ctor is of wrong type");
798
799 return Visit(Arg);
800 }
801
802 return CGM.EmitNullConstant(Ty);
803 }
804
VisitStringLiteral(StringLiteral * E)805 llvm::Constant *VisitStringLiteral(StringLiteral *E) {
806 assert(!E->getType()->isPointerType() && "Strings are always arrays");
807
808 // This must be a string initializing an array in a static initializer.
809 // Don't emit it as the address of the string, emit the string data itself
810 // as an inline array.
811 return llvm::ConstantArray::get(VMContext,
812 CGM.GetStringForStringLiteral(E), false);
813 }
814
VisitObjCEncodeExpr(ObjCEncodeExpr * E)815 llvm::Constant *VisitObjCEncodeExpr(ObjCEncodeExpr *E) {
816 // This must be an @encode initializing an array in a static initializer.
817 // Don't emit it as the address of the string, emit the string data itself
818 // as an inline array.
819 std::string Str;
820 CGM.getContext().getObjCEncodingForType(E->getEncodedType(), Str);
821 const ConstantArrayType *CAT = cast<ConstantArrayType>(E->getType());
822
823 // Resize the string to the right size, adding zeros at the end, or
824 // truncating as needed.
825 Str.resize(CAT->getSize().getZExtValue(), '\0');
826 return llvm::ConstantArray::get(VMContext, Str, false);
827 }
828
VisitUnaryExtension(const UnaryOperator * E)829 llvm::Constant *VisitUnaryExtension(const UnaryOperator *E) {
830 return Visit(E->getSubExpr());
831 }
832
833 // Utility methods
ConvertType(QualType T)834 llvm::Type *ConvertType(QualType T) {
835 return CGM.getTypes().ConvertType(T);
836 }
837
838 public:
EmitLValue(Expr * E)839 llvm::Constant *EmitLValue(Expr *E) {
840 switch (E->getStmtClass()) {
841 default: break;
842 case Expr::CompoundLiteralExprClass: {
843 // Note that due to the nature of compound literals, this is guaranteed
844 // to be the only use of the variable, so we just generate it here.
845 CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E);
846 llvm::Constant* C = Visit(CLE->getInitializer());
847 // FIXME: "Leaked" on failure.
848 if (C)
849 C = new llvm::GlobalVariable(CGM.getModule(), C->getType(),
850 E->getType().isConstant(CGM.getContext()),
851 llvm::GlobalValue::InternalLinkage,
852 C, ".compoundliteral", 0, false,
853 CGM.getContext().getTargetAddressSpace(E->getType()));
854 return C;
855 }
856 case Expr::DeclRefExprClass: {
857 ValueDecl *Decl = cast<DeclRefExpr>(E)->getDecl();
858 if (Decl->hasAttr<WeakRefAttr>())
859 return CGM.GetWeakRefReference(Decl);
860 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Decl))
861 return CGM.GetAddrOfFunction(FD);
862 if (const VarDecl* VD = dyn_cast<VarDecl>(Decl)) {
863 // We can never refer to a variable with local storage.
864 if (!VD->hasLocalStorage()) {
865 if (VD->isFileVarDecl() || VD->hasExternalStorage())
866 return CGM.GetAddrOfGlobalVar(VD);
867 else if (VD->isLocalVarDecl()) {
868 assert(CGF && "Can't access static local vars without CGF");
869 return CGF->GetAddrOfStaticLocalVar(VD);
870 }
871 }
872 }
873 break;
874 }
875 case Expr::StringLiteralClass:
876 return CGM.GetAddrOfConstantStringFromLiteral(cast<StringLiteral>(E));
877 case Expr::ObjCEncodeExprClass:
878 return CGM.GetAddrOfConstantStringFromObjCEncode(cast<ObjCEncodeExpr>(E));
879 case Expr::ObjCStringLiteralClass: {
880 ObjCStringLiteral* SL = cast<ObjCStringLiteral>(E);
881 llvm::Constant *C =
882 CGM.getObjCRuntime().GenerateConstantString(SL->getString());
883 return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
884 }
885 case Expr::PredefinedExprClass: {
886 unsigned Type = cast<PredefinedExpr>(E)->getIdentType();
887 if (CGF) {
888 LValue Res = CGF->EmitPredefinedLValue(cast<PredefinedExpr>(E));
889 return cast<llvm::Constant>(Res.getAddress());
890 } else if (Type == PredefinedExpr::PrettyFunction) {
891 return CGM.GetAddrOfConstantCString("top level", ".tmp");
892 }
893
894 return CGM.GetAddrOfConstantCString("", ".tmp");
895 }
896 case Expr::AddrLabelExprClass: {
897 assert(CGF && "Invalid address of label expression outside function.");
898 llvm::Constant *Ptr =
899 CGF->GetAddrOfLabel(cast<AddrLabelExpr>(E)->getLabel());
900 return llvm::ConstantExpr::getBitCast(Ptr, ConvertType(E->getType()));
901 }
902 case Expr::CallExprClass: {
903 CallExpr* CE = cast<CallExpr>(E);
904 unsigned builtin = CE->isBuiltinCall(CGM.getContext());
905 if (builtin !=
906 Builtin::BI__builtin___CFStringMakeConstantString &&
907 builtin !=
908 Builtin::BI__builtin___NSStringMakeConstantString)
909 break;
910 const Expr *Arg = CE->getArg(0)->IgnoreParenCasts();
911 const StringLiteral *Literal = cast<StringLiteral>(Arg);
912 if (builtin ==
913 Builtin::BI__builtin___NSStringMakeConstantString) {
914 return CGM.getObjCRuntime().GenerateConstantString(Literal);
915 }
916 // FIXME: need to deal with UCN conversion issues.
917 return CGM.GetAddrOfConstantCFString(Literal);
918 }
919 case Expr::BlockExprClass: {
920 std::string FunctionName;
921 if (CGF)
922 FunctionName = CGF->CurFn->getName();
923 else
924 FunctionName = "global";
925
926 return CGM.GetAddrOfGlobalBlock(cast<BlockExpr>(E), FunctionName.c_str());
927 }
928 }
929
930 return 0;
931 }
932 };
933
934 } // end anonymous namespace.
935
EmitConstantExpr(const Expr * E,QualType DestType,CodeGenFunction * CGF)936 llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
937 QualType DestType,
938 CodeGenFunction *CGF) {
939 Expr::EvalResult Result;
940
941 bool Success = false;
942
943 if (DestType->isReferenceType())
944 Success = E->EvaluateAsLValue(Result, Context);
945 else
946 Success = E->Evaluate(Result, Context);
947
948 if (Success && !Result.HasSideEffects) {
949 switch (Result.Val.getKind()) {
950 case APValue::Uninitialized:
951 assert(0 && "Constant expressions should be initialized.");
952 return 0;
953 case APValue::LValue: {
954 llvm::Type *DestTy = getTypes().ConvertTypeForMem(DestType);
955 llvm::Constant *Offset =
956 llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
957 Result.Val.getLValueOffset().getQuantity());
958
959 llvm::Constant *C;
960 if (const Expr *LVBase = Result.Val.getLValueBase()) {
961 C = ConstExprEmitter(*this, CGF).EmitLValue(const_cast<Expr*>(LVBase));
962
963 // Apply offset if necessary.
964 if (!Offset->isNullValue()) {
965 llvm::Type *Type = llvm::Type::getInt8PtrTy(VMContext);
966 llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(C, Type);
967 Casted = llvm::ConstantExpr::getGetElementPtr(Casted, &Offset, 1);
968 C = llvm::ConstantExpr::getBitCast(Casted, C->getType());
969 }
970
971 // Convert to the appropriate type; this could be an lvalue for
972 // an integer.
973 if (isa<llvm::PointerType>(DestTy))
974 return llvm::ConstantExpr::getBitCast(C, DestTy);
975
976 return llvm::ConstantExpr::getPtrToInt(C, DestTy);
977 } else {
978 C = Offset;
979
980 // Convert to the appropriate type; this could be an lvalue for
981 // an integer.
982 if (isa<llvm::PointerType>(DestTy))
983 return llvm::ConstantExpr::getIntToPtr(C, DestTy);
984
985 // If the types don't match this should only be a truncate.
986 if (C->getType() != DestTy)
987 return llvm::ConstantExpr::getTrunc(C, DestTy);
988
989 return C;
990 }
991 }
992 case APValue::Int: {
993 llvm::Constant *C = llvm::ConstantInt::get(VMContext,
994 Result.Val.getInt());
995
996 if (C->getType()->isIntegerTy(1)) {
997 llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
998 C = llvm::ConstantExpr::getZExt(C, BoolTy);
999 }
1000 return C;
1001 }
1002 case APValue::ComplexInt: {
1003 llvm::Constant *Complex[2];
1004
1005 Complex[0] = llvm::ConstantInt::get(VMContext,
1006 Result.Val.getComplexIntReal());
1007 Complex[1] = llvm::ConstantInt::get(VMContext,
1008 Result.Val.getComplexIntImag());
1009
1010 // FIXME: the target may want to specify that this is packed.
1011 llvm::StructType *STy = llvm::StructType::get(Complex[0]->getType(),
1012 Complex[1]->getType(),
1013 NULL);
1014 return llvm::ConstantStruct::get(STy, Complex);
1015 }
1016 case APValue::Float:
1017 return llvm::ConstantFP::get(VMContext, Result.Val.getFloat());
1018 case APValue::ComplexFloat: {
1019 llvm::Constant *Complex[2];
1020
1021 Complex[0] = llvm::ConstantFP::get(VMContext,
1022 Result.Val.getComplexFloatReal());
1023 Complex[1] = llvm::ConstantFP::get(VMContext,
1024 Result.Val.getComplexFloatImag());
1025
1026 // FIXME: the target may want to specify that this is packed.
1027 llvm::StructType *STy = llvm::StructType::get(Complex[0]->getType(),
1028 Complex[1]->getType(),
1029 NULL);
1030 return llvm::ConstantStruct::get(STy, Complex);
1031 }
1032 case APValue::Vector: {
1033 llvm::SmallVector<llvm::Constant *, 4> Inits;
1034 unsigned NumElts = Result.Val.getVectorLength();
1035
1036 if (Context.getLangOptions().AltiVec &&
1037 isa<CastExpr>(E) &&
1038 cast<CastExpr>(E)->getCastKind() == CK_VectorSplat) {
1039 // AltiVec vector initialization with a single literal
1040 APValue &Elt = Result.Val.getVectorElt(0);
1041
1042 llvm::Constant* InitValue = Elt.isInt()
1043 ? cast<llvm::Constant>
1044 (llvm::ConstantInt::get(VMContext, Elt.getInt()))
1045 : cast<llvm::Constant>
1046 (llvm::ConstantFP::get(VMContext, Elt.getFloat()));
1047
1048 for (unsigned i = 0; i != NumElts; ++i)
1049 Inits.push_back(InitValue);
1050
1051 } else {
1052 for (unsigned i = 0; i != NumElts; ++i) {
1053 APValue &Elt = Result.Val.getVectorElt(i);
1054 if (Elt.isInt())
1055 Inits.push_back(llvm::ConstantInt::get(VMContext, Elt.getInt()));
1056 else
1057 Inits.push_back(llvm::ConstantFP::get(VMContext, Elt.getFloat()));
1058 }
1059 }
1060 return llvm::ConstantVector::get(Inits);
1061 }
1062 }
1063 }
1064
1065 llvm::Constant* C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
1066 if (C && C->getType()->isIntegerTy(1)) {
1067 llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
1068 C = llvm::ConstantExpr::getZExt(C, BoolTy);
1069 }
1070 return C;
1071 }
1072
getFieldOffset(ASTContext & C,const FieldDecl * field)1073 static uint64_t getFieldOffset(ASTContext &C, const FieldDecl *field) {
1074 const ASTRecordLayout &layout = C.getASTRecordLayout(field->getParent());
1075 return layout.getFieldOffset(field->getFieldIndex());
1076 }
1077
1078 llvm::Constant *
getMemberPointerConstant(const UnaryOperator * uo)1079 CodeGenModule::getMemberPointerConstant(const UnaryOperator *uo) {
1080 // Member pointer constants always have a very particular form.
1081 const MemberPointerType *type = cast<MemberPointerType>(uo->getType());
1082 const ValueDecl *decl = cast<DeclRefExpr>(uo->getSubExpr())->getDecl();
1083
1084 // A member function pointer.
1085 if (const CXXMethodDecl *method = dyn_cast<CXXMethodDecl>(decl))
1086 return getCXXABI().EmitMemberPointer(method);
1087
1088 // Otherwise, a member data pointer.
1089 uint64_t fieldOffset;
1090 if (const FieldDecl *field = dyn_cast<FieldDecl>(decl))
1091 fieldOffset = getFieldOffset(getContext(), field);
1092 else {
1093 const IndirectFieldDecl *ifield = cast<IndirectFieldDecl>(decl);
1094
1095 fieldOffset = 0;
1096 for (IndirectFieldDecl::chain_iterator ci = ifield->chain_begin(),
1097 ce = ifield->chain_end(); ci != ce; ++ci)
1098 fieldOffset += getFieldOffset(getContext(), cast<FieldDecl>(*ci));
1099 }
1100
1101 CharUnits chars = getContext().toCharUnitsFromBits((int64_t) fieldOffset);
1102 return getCXXABI().EmitMemberDataPointer(type, chars);
1103 }
1104
1105 static void
FillInNullDataMemberPointers(CodeGenModule & CGM,QualType T,std::vector<llvm::Constant * > & Elements,uint64_t StartOffset)1106 FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
1107 std::vector<llvm::Constant *> &Elements,
1108 uint64_t StartOffset) {
1109 assert(StartOffset % CGM.getContext().getCharWidth() == 0 &&
1110 "StartOffset not byte aligned!");
1111
1112 if (CGM.getTypes().isZeroInitializable(T))
1113 return;
1114
1115 if (const ConstantArrayType *CAT =
1116 CGM.getContext().getAsConstantArrayType(T)) {
1117 QualType ElementTy = CAT->getElementType();
1118 uint64_t ElementSize = CGM.getContext().getTypeSize(ElementTy);
1119
1120 for (uint64_t I = 0, E = CAT->getSize().getZExtValue(); I != E; ++I) {
1121 FillInNullDataMemberPointers(CGM, ElementTy, Elements,
1122 StartOffset + I * ElementSize);
1123 }
1124 } else if (const RecordType *RT = T->getAs<RecordType>()) {
1125 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1126 const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
1127
1128 // Go through all bases and fill in any null pointer to data members.
1129 for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
1130 E = RD->bases_end(); I != E; ++I) {
1131 if (I->isVirtual()) {
1132 // Ignore virtual bases.
1133 continue;
1134 }
1135
1136 const CXXRecordDecl *BaseDecl =
1137 cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
1138
1139 // Ignore empty bases.
1140 if (BaseDecl->isEmpty())
1141 continue;
1142
1143 // Ignore bases that don't have any pointer to data members.
1144 if (CGM.getTypes().isZeroInitializable(BaseDecl))
1145 continue;
1146
1147 uint64_t BaseOffset = Layout.getBaseClassOffsetInBits(BaseDecl);
1148 FillInNullDataMemberPointers(CGM, I->getType(),
1149 Elements, StartOffset + BaseOffset);
1150 }
1151
1152 // Visit all fields.
1153 unsigned FieldNo = 0;
1154 for (RecordDecl::field_iterator I = RD->field_begin(),
1155 E = RD->field_end(); I != E; ++I, ++FieldNo) {
1156 QualType FieldType = I->getType();
1157
1158 if (CGM.getTypes().isZeroInitializable(FieldType))
1159 continue;
1160
1161 uint64_t FieldOffset = StartOffset + Layout.getFieldOffset(FieldNo);
1162 FillInNullDataMemberPointers(CGM, FieldType, Elements, FieldOffset);
1163 }
1164 } else {
1165 assert(T->isMemberPointerType() && "Should only see member pointers here!");
1166 assert(!T->getAs<MemberPointerType>()->getPointeeType()->isFunctionType() &&
1167 "Should only see pointers to data members here!");
1168
1169 CharUnits StartIndex = CGM.getContext().toCharUnitsFromBits(StartOffset);
1170 CharUnits EndIndex = StartIndex + CGM.getContext().getTypeSizeInChars(T);
1171
1172 // FIXME: hardcodes Itanium member pointer representation!
1173 llvm::Constant *NegativeOne =
1174 llvm::ConstantInt::get(llvm::Type::getInt8Ty(CGM.getLLVMContext()),
1175 -1ULL, /*isSigned*/true);
1176
1177 // Fill in the null data member pointer.
1178 for (CharUnits I = StartIndex; I != EndIndex; ++I)
1179 Elements[I.getQuantity()] = NegativeOne;
1180 }
1181 }
1182
1183 static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
1184 llvm::Type *baseType,
1185 const CXXRecordDecl *base);
1186
EmitNullConstant(CodeGenModule & CGM,const CXXRecordDecl * record,bool asCompleteObject)1187 static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
1188 const CXXRecordDecl *record,
1189 bool asCompleteObject) {
1190 const CGRecordLayout &layout = CGM.getTypes().getCGRecordLayout(record);
1191 llvm::StructType *structure =
1192 (asCompleteObject ? layout.getLLVMType()
1193 : layout.getBaseSubobjectLLVMType());
1194
1195 unsigned numElements = structure->getNumElements();
1196 std::vector<llvm::Constant *> elements(numElements);
1197
1198 // Fill in all the bases.
1199 for (CXXRecordDecl::base_class_const_iterator
1200 I = record->bases_begin(), E = record->bases_end(); I != E; ++I) {
1201 if (I->isVirtual()) {
1202 // Ignore virtual bases; if we're laying out for a complete
1203 // object, we'll lay these out later.
1204 continue;
1205 }
1206
1207 const CXXRecordDecl *base =
1208 cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
1209
1210 // Ignore empty bases.
1211 if (base->isEmpty())
1212 continue;
1213
1214 unsigned fieldIndex = layout.getNonVirtualBaseLLVMFieldNo(base);
1215 llvm::Type *baseType = structure->getElementType(fieldIndex);
1216 elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base);
1217 }
1218
1219 // Fill in all the fields.
1220 for (RecordDecl::field_iterator I = record->field_begin(),
1221 E = record->field_end(); I != E; ++I) {
1222 const FieldDecl *field = *I;
1223
1224 // Ignore bit fields.
1225 if (field->isBitField())
1226 continue;
1227
1228 unsigned fieldIndex = layout.getLLVMFieldNo(field);
1229 elements[fieldIndex] = CGM.EmitNullConstant(field->getType());
1230 }
1231
1232 // Fill in the virtual bases, if we're working with the complete object.
1233 if (asCompleteObject) {
1234 for (CXXRecordDecl::base_class_const_iterator
1235 I = record->vbases_begin(), E = record->vbases_end(); I != E; ++I) {
1236 const CXXRecordDecl *base =
1237 cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
1238
1239 // Ignore empty bases.
1240 if (base->isEmpty())
1241 continue;
1242
1243 unsigned fieldIndex = layout.getVirtualBaseIndex(base);
1244
1245 // We might have already laid this field out.
1246 if (elements[fieldIndex]) continue;
1247
1248 llvm::Type *baseType = structure->getElementType(fieldIndex);
1249 elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base);
1250 }
1251 }
1252
1253 // Now go through all other fields and zero them out.
1254 for (unsigned i = 0; i != numElements; ++i) {
1255 if (!elements[i])
1256 elements[i] = llvm::Constant::getNullValue(structure->getElementType(i));
1257 }
1258
1259 return llvm::ConstantStruct::get(structure, elements);
1260 }
1261
1262 /// Emit the null constant for a base subobject.
EmitNullConstantForBase(CodeGenModule & CGM,llvm::Type * baseType,const CXXRecordDecl * base)1263 static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
1264 llvm::Type *baseType,
1265 const CXXRecordDecl *base) {
1266 const CGRecordLayout &baseLayout = CGM.getTypes().getCGRecordLayout(base);
1267
1268 // Just zero out bases that don't have any pointer to data members.
1269 if (baseLayout.isZeroInitializableAsBase())
1270 return llvm::Constant::getNullValue(baseType);
1271
1272 // If the base type is a struct, we can just use its null constant.
1273 if (isa<llvm::StructType>(baseType)) {
1274 return EmitNullConstant(CGM, base, /*complete*/ false);
1275 }
1276
1277 // Otherwise, some bases are represented as arrays of i8 if the size
1278 // of the base is smaller than its corresponding LLVM type. Figure
1279 // out how many elements this base array has.
1280 llvm::ArrayType *baseArrayType = cast<llvm::ArrayType>(baseType);
1281 unsigned numBaseElements = baseArrayType->getNumElements();
1282
1283 // Fill in null data member pointers.
1284 std::vector<llvm::Constant *> baseElements(numBaseElements);
1285 FillInNullDataMemberPointers(CGM, CGM.getContext().getTypeDeclType(base),
1286 baseElements, 0);
1287
1288 // Now go through all other elements and zero them out.
1289 if (numBaseElements) {
1290 llvm::Type *i8 = llvm::Type::getInt8Ty(CGM.getLLVMContext());
1291 llvm::Constant *i8_zero = llvm::Constant::getNullValue(i8);
1292 for (unsigned i = 0; i != numBaseElements; ++i) {
1293 if (!baseElements[i])
1294 baseElements[i] = i8_zero;
1295 }
1296 }
1297
1298 return llvm::ConstantArray::get(baseArrayType, baseElements);
1299 }
1300
EmitNullConstant(QualType T)1301 llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
1302 if (getTypes().isZeroInitializable(T))
1303 return llvm::Constant::getNullValue(getTypes().ConvertTypeForMem(T));
1304
1305 if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(T)) {
1306
1307 QualType ElementTy = CAT->getElementType();
1308
1309 llvm::Constant *Element = EmitNullConstant(ElementTy);
1310 unsigned NumElements = CAT->getSize().getZExtValue();
1311 std::vector<llvm::Constant *> Array(NumElements);
1312 for (unsigned i = 0; i != NumElements; ++i)
1313 Array[i] = Element;
1314
1315 llvm::ArrayType *ATy =
1316 cast<llvm::ArrayType>(getTypes().ConvertTypeForMem(T));
1317 return llvm::ConstantArray::get(ATy, Array);
1318 }
1319
1320 if (const RecordType *RT = T->getAs<RecordType>()) {
1321 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1322 return ::EmitNullConstant(*this, RD, /*complete object*/ true);
1323 }
1324
1325 assert(T->isMemberPointerType() && "Should only see member pointers here!");
1326 assert(!T->getAs<MemberPointerType>()->getPointeeType()->isFunctionType() &&
1327 "Should only see pointers to data members here!");
1328
1329 // Itanium C++ ABI 2.3:
1330 // A NULL pointer is represented as -1.
1331 return getCXXABI().EmitNullMemberPointer(T->castAs<MemberPointerType>());
1332 }
1333