1 //===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code to emit Aggregate Expr nodes as LLVM code.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "CodeGenFunction.h"
15 #include "CodeGenModule.h"
16 #include "CGObjCRuntime.h"
17 #include "clang/AST/ASTContext.h"
18 #include "clang/AST/DeclCXX.h"
19 #include "clang/AST/StmtVisitor.h"
20 #include "llvm/Constants.h"
21 #include "llvm/Function.h"
22 #include "llvm/GlobalVariable.h"
23 #include "llvm/Intrinsics.h"
24 using namespace clang;
25 using namespace CodeGen;
26
27 //===----------------------------------------------------------------------===//
28 // Aggregate Expression Emitter
29 //===----------------------------------------------------------------------===//
30
31 namespace {
32 class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
33 CodeGenFunction &CGF;
34 CGBuilderTy &Builder;
35 AggValueSlot Dest;
36 bool IgnoreResult;
37
getReturnValueSlot() const38 ReturnValueSlot getReturnValueSlot() const {
39 // If the destination slot requires garbage collection, we can't
40 // use the real return value slot, because we have to use the GC
41 // API.
42 if (Dest.requiresGCollection()) return ReturnValueSlot();
43
44 return ReturnValueSlot(Dest.getAddr(), Dest.isVolatile());
45 }
46
EnsureSlot(QualType T)47 AggValueSlot EnsureSlot(QualType T) {
48 if (!Dest.isIgnored()) return Dest;
49 return CGF.CreateAggTemp(T, "agg.tmp.ensured");
50 }
51
52 public:
AggExprEmitter(CodeGenFunction & cgf,AggValueSlot Dest,bool ignore)53 AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest,
54 bool ignore)
55 : CGF(cgf), Builder(CGF.Builder), Dest(Dest),
56 IgnoreResult(ignore) {
57 }
58
59 //===--------------------------------------------------------------------===//
60 // Utilities
61 //===--------------------------------------------------------------------===//
62
63 /// EmitAggLoadOfLValue - Given an expression with aggregate type that
64 /// represents a value lvalue, this method emits the address of the lvalue,
65 /// then loads the result into DestPtr.
66 void EmitAggLoadOfLValue(const Expr *E);
67
68 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
69 void EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore = false);
70 void EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore = false);
71
72 void EmitGCMove(const Expr *E, RValue Src);
73
74 bool TypeRequiresGCollection(QualType T);
75
76 //===--------------------------------------------------------------------===//
77 // Visitor Methods
78 //===--------------------------------------------------------------------===//
79
VisitStmt(Stmt * S)80 void VisitStmt(Stmt *S) {
81 CGF.ErrorUnsupported(S, "aggregate expression");
82 }
VisitParenExpr(ParenExpr * PE)83 void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
VisitGenericSelectionExpr(GenericSelectionExpr * GE)84 void VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
85 Visit(GE->getResultExpr());
86 }
VisitUnaryExtension(UnaryOperator * E)87 void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr * E)88 void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
89 return Visit(E->getReplacement());
90 }
91
92 // l-values.
VisitDeclRefExpr(DeclRefExpr * DRE)93 void VisitDeclRefExpr(DeclRefExpr *DRE) { EmitAggLoadOfLValue(DRE); }
VisitMemberExpr(MemberExpr * ME)94 void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
VisitUnaryDeref(UnaryOperator * E)95 void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
VisitStringLiteral(StringLiteral * E)96 void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
97 void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
VisitArraySubscriptExpr(ArraySubscriptExpr * E)98 void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
99 EmitAggLoadOfLValue(E);
100 }
VisitBlockDeclRefExpr(const BlockDeclRefExpr * E)101 void VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) {
102 EmitAggLoadOfLValue(E);
103 }
VisitPredefinedExpr(const PredefinedExpr * E)104 void VisitPredefinedExpr(const PredefinedExpr *E) {
105 EmitAggLoadOfLValue(E);
106 }
107
108 // Operators.
109 void VisitCastExpr(CastExpr *E);
110 void VisitCallExpr(const CallExpr *E);
111 void VisitStmtExpr(const StmtExpr *E);
112 void VisitBinaryOperator(const BinaryOperator *BO);
113 void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
114 void VisitBinAssign(const BinaryOperator *E);
115 void VisitBinComma(const BinaryOperator *E);
116
117 void VisitObjCMessageExpr(ObjCMessageExpr *E);
VisitObjCIvarRefExpr(ObjCIvarRefExpr * E)118 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
119 EmitAggLoadOfLValue(E);
120 }
121 void VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E);
122
123 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
124 void VisitChooseExpr(const ChooseExpr *CE);
125 void VisitInitListExpr(InitListExpr *E);
126 void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
VisitCXXDefaultArgExpr(CXXDefaultArgExpr * DAE)127 void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
128 Visit(DAE->getExpr());
129 }
130 void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
131 void VisitCXXConstructExpr(const CXXConstructExpr *E);
132 void VisitExprWithCleanups(ExprWithCleanups *E);
133 void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
VisitCXXTypeidExpr(CXXTypeidExpr * E)134 void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
135 void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
136 void VisitOpaqueValueExpr(OpaqueValueExpr *E);
137
138 void VisitVAArgExpr(VAArgExpr *E);
139
140 void EmitInitializationToLValue(Expr *E, LValue Address);
141 void EmitNullInitializationToLValue(LValue Address);
142 // case Expr::ChooseExprClass:
VisitCXXThrowExpr(const CXXThrowExpr * E)143 void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
144 };
145 } // end anonymous namespace.
146
147 //===----------------------------------------------------------------------===//
148 // Utilities
149 //===----------------------------------------------------------------------===//
150
151 /// EmitAggLoadOfLValue - Given an expression with aggregate type that
152 /// represents a value lvalue, this method emits the address of the lvalue,
153 /// then loads the result into DestPtr.
EmitAggLoadOfLValue(const Expr * E)154 void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
155 LValue LV = CGF.EmitLValue(E);
156 EmitFinalDestCopy(E, LV);
157 }
158
159 /// \brief True if the given aggregate type requires special GC API calls.
TypeRequiresGCollection(QualType T)160 bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
161 // Only record types have members that might require garbage collection.
162 const RecordType *RecordTy = T->getAs<RecordType>();
163 if (!RecordTy) return false;
164
165 // Don't mess with non-trivial C++ types.
166 RecordDecl *Record = RecordTy->getDecl();
167 if (isa<CXXRecordDecl>(Record) &&
168 (!cast<CXXRecordDecl>(Record)->hasTrivialCopyConstructor() ||
169 !cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
170 return false;
171
172 // Check whether the type has an object member.
173 return Record->hasObjectMember();
174 }
175
176 /// \brief Perform the final move to DestPtr if RequiresGCollection is set.
177 ///
178 /// The idea is that you do something like this:
179 /// RValue Result = EmitSomething(..., getReturnValueSlot());
180 /// EmitGCMove(E, Result);
181 /// If GC doesn't interfere, this will cause the result to be emitted
182 /// directly into the return value slot. If GC does interfere, a final
183 /// move will be performed.
EmitGCMove(const Expr * E,RValue Src)184 void AggExprEmitter::EmitGCMove(const Expr *E, RValue Src) {
185 if (Dest.requiresGCollection()) {
186 CharUnits size = CGF.getContext().getTypeSizeInChars(E->getType());
187 llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
188 llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
189 CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, Dest.getAddr(),
190 Src.getAggregateAddr(),
191 SizeVal);
192 }
193 }
194
195 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
EmitFinalDestCopy(const Expr * E,RValue Src,bool Ignore)196 void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore) {
197 assert(Src.isAggregate() && "value must be aggregate value!");
198
199 // If Dest is ignored, then we're evaluating an aggregate expression
200 // in a context (like an expression statement) that doesn't care
201 // about the result. C says that an lvalue-to-rvalue conversion is
202 // performed in these cases; C++ says that it is not. In either
203 // case, we don't actually need to do anything unless the value is
204 // volatile.
205 if (Dest.isIgnored()) {
206 if (!Src.isVolatileQualified() ||
207 CGF.CGM.getLangOptions().CPlusPlus ||
208 (IgnoreResult && Ignore))
209 return;
210
211 // If the source is volatile, we must read from it; to do that, we need
212 // some place to put it.
213 Dest = CGF.CreateAggTemp(E->getType(), "agg.tmp");
214 }
215
216 if (Dest.requiresGCollection()) {
217 CharUnits size = CGF.getContext().getTypeSizeInChars(E->getType());
218 llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
219 llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
220 CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
221 Dest.getAddr(),
222 Src.getAggregateAddr(),
223 SizeVal);
224 return;
225 }
226 // If the result of the assignment is used, copy the LHS there also.
227 // FIXME: Pass VolatileDest as well. I think we also need to merge volatile
228 // from the source as well, as we can't eliminate it if either operand
229 // is volatile, unless copy has volatile for both source and destination..
230 CGF.EmitAggregateCopy(Dest.getAddr(), Src.getAggregateAddr(), E->getType(),
231 Dest.isVolatile()|Src.isVolatileQualified());
232 }
233
234 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
EmitFinalDestCopy(const Expr * E,LValue Src,bool Ignore)235 void AggExprEmitter::EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore) {
236 assert(Src.isSimple() && "Can't have aggregate bitfield, vector, etc");
237
238 EmitFinalDestCopy(E, RValue::getAggregate(Src.getAddress(),
239 Src.isVolatileQualified()),
240 Ignore);
241 }
242
243 //===----------------------------------------------------------------------===//
244 // Visitor Methods
245 //===----------------------------------------------------------------------===//
246
VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr * E)247 void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){
248 Visit(E->GetTemporaryExpr());
249 }
250
VisitOpaqueValueExpr(OpaqueValueExpr * e)251 void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
252 EmitFinalDestCopy(e, CGF.getOpaqueLValueMapping(e));
253 }
254
255 void
VisitCompoundLiteralExpr(CompoundLiteralExpr * E)256 AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
257 if (E->getType().isPODType(CGF.getContext())) {
258 // For a POD type, just emit a load of the lvalue + a copy, because our
259 // compound literal might alias the destination.
260 // FIXME: This is a band-aid; the real problem appears to be in our handling
261 // of assignments, where we store directly into the LHS without checking
262 // whether anything in the RHS aliases.
263 EmitAggLoadOfLValue(E);
264 return;
265 }
266
267 AggValueSlot Slot = EnsureSlot(E->getType());
268 CGF.EmitAggExpr(E->getInitializer(), Slot);
269 }
270
271
VisitCastExpr(CastExpr * E)272 void AggExprEmitter::VisitCastExpr(CastExpr *E) {
273 switch (E->getCastKind()) {
274 case CK_Dynamic: {
275 assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
276 LValue LV = CGF.EmitCheckedLValue(E->getSubExpr());
277 // FIXME: Do we also need to handle property references here?
278 if (LV.isSimple())
279 CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
280 else
281 CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
282
283 if (!Dest.isIgnored())
284 CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
285 break;
286 }
287
288 case CK_ToUnion: {
289 if (Dest.isIgnored()) break;
290
291 // GCC union extension
292 QualType Ty = E->getSubExpr()->getType();
293 QualType PtrTy = CGF.getContext().getPointerType(Ty);
294 llvm::Value *CastPtr = Builder.CreateBitCast(Dest.getAddr(),
295 CGF.ConvertType(PtrTy));
296 EmitInitializationToLValue(E->getSubExpr(),
297 CGF.MakeAddrLValue(CastPtr, Ty));
298 break;
299 }
300
301 case CK_DerivedToBase:
302 case CK_BaseToDerived:
303 case CK_UncheckedDerivedToBase: {
304 assert(0 && "cannot perform hierarchy conversion in EmitAggExpr: "
305 "should have been unpacked before we got here");
306 break;
307 }
308
309 case CK_GetObjCProperty: {
310 LValue LV = CGF.EmitLValue(E->getSubExpr());
311 assert(LV.isPropertyRef());
312 RValue RV = CGF.EmitLoadOfPropertyRefLValue(LV, getReturnValueSlot());
313 EmitGCMove(E, RV);
314 break;
315 }
316
317 case CK_LValueToRValue: // hope for downstream optimization
318 case CK_NoOp:
319 case CK_UserDefinedConversion:
320 case CK_ConstructorConversion:
321 assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
322 E->getType()) &&
323 "Implicit cast types must be compatible");
324 Visit(E->getSubExpr());
325 break;
326
327 case CK_LValueBitCast:
328 llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
329 break;
330
331 case CK_Dependent:
332 case CK_BitCast:
333 case CK_ArrayToPointerDecay:
334 case CK_FunctionToPointerDecay:
335 case CK_NullToPointer:
336 case CK_NullToMemberPointer:
337 case CK_BaseToDerivedMemberPointer:
338 case CK_DerivedToBaseMemberPointer:
339 case CK_MemberPointerToBoolean:
340 case CK_IntegralToPointer:
341 case CK_PointerToIntegral:
342 case CK_PointerToBoolean:
343 case CK_ToVoid:
344 case CK_VectorSplat:
345 case CK_IntegralCast:
346 case CK_IntegralToBoolean:
347 case CK_IntegralToFloating:
348 case CK_FloatingToIntegral:
349 case CK_FloatingToBoolean:
350 case CK_FloatingCast:
351 case CK_AnyPointerToObjCPointerCast:
352 case CK_AnyPointerToBlockPointerCast:
353 case CK_ObjCObjectLValueCast:
354 case CK_FloatingRealToComplex:
355 case CK_FloatingComplexToReal:
356 case CK_FloatingComplexToBoolean:
357 case CK_FloatingComplexCast:
358 case CK_FloatingComplexToIntegralComplex:
359 case CK_IntegralRealToComplex:
360 case CK_IntegralComplexToReal:
361 case CK_IntegralComplexToBoolean:
362 case CK_IntegralComplexCast:
363 case CK_IntegralComplexToFloatingComplex:
364 case CK_ObjCProduceObject:
365 case CK_ObjCConsumeObject:
366 case CK_ObjCReclaimReturnedObject:
367 llvm_unreachable("cast kind invalid for aggregate types");
368 }
369 }
370
VisitCallExpr(const CallExpr * E)371 void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
372 if (E->getCallReturnType()->isReferenceType()) {
373 EmitAggLoadOfLValue(E);
374 return;
375 }
376
377 RValue RV = CGF.EmitCallExpr(E, getReturnValueSlot());
378 EmitGCMove(E, RV);
379 }
380
VisitObjCMessageExpr(ObjCMessageExpr * E)381 void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
382 RValue RV = CGF.EmitObjCMessageExpr(E, getReturnValueSlot());
383 EmitGCMove(E, RV);
384 }
385
VisitObjCPropertyRefExpr(ObjCPropertyRefExpr * E)386 void AggExprEmitter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
387 llvm_unreachable("direct property access not surrounded by "
388 "lvalue-to-rvalue cast");
389 }
390
VisitBinComma(const BinaryOperator * E)391 void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
392 CGF.EmitIgnoredExpr(E->getLHS());
393 Visit(E->getRHS());
394 }
395
VisitStmtExpr(const StmtExpr * E)396 void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
397 CodeGenFunction::StmtExprEvaluation eval(CGF);
398 CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest);
399 }
400
VisitBinaryOperator(const BinaryOperator * E)401 void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
402 if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI)
403 VisitPointerToDataMemberBinaryOperator(E);
404 else
405 CGF.ErrorUnsupported(E, "aggregate binary expression");
406 }
407
VisitPointerToDataMemberBinaryOperator(const BinaryOperator * E)408 void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
409 const BinaryOperator *E) {
410 LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E);
411 EmitFinalDestCopy(E, LV);
412 }
413
VisitBinAssign(const BinaryOperator * E)414 void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
415 // For an assignment to work, the value on the right has
416 // to be compatible with the value on the left.
417 assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
418 E->getRHS()->getType())
419 && "Invalid assignment");
420
421 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E->getLHS()))
422 if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()))
423 if (VD->hasAttr<BlocksAttr>() &&
424 E->getRHS()->HasSideEffects(CGF.getContext())) {
425 // When __block variable on LHS, the RHS must be evaluated first
426 // as it may change the 'forwarding' field via call to Block_copy.
427 LValue RHS = CGF.EmitLValue(E->getRHS());
428 LValue LHS = CGF.EmitLValue(E->getLHS());
429 bool GCollection = false;
430 if (CGF.getContext().getLangOptions().getGCMode())
431 GCollection = TypeRequiresGCollection(E->getLHS()->getType());
432 Dest = AggValueSlot::forLValue(LHS, true, GCollection);
433 EmitFinalDestCopy(E, RHS, true);
434 return;
435 }
436
437 LValue LHS = CGF.EmitLValue(E->getLHS());
438
439 // We have to special case property setters, otherwise we must have
440 // a simple lvalue (no aggregates inside vectors, bitfields).
441 if (LHS.isPropertyRef()) {
442 const ObjCPropertyRefExpr *RE = LHS.getPropertyRefExpr();
443 QualType ArgType = RE->getSetterArgType();
444 RValue Src;
445 if (ArgType->isReferenceType())
446 Src = CGF.EmitReferenceBindingToExpr(E->getRHS(), 0);
447 else {
448 AggValueSlot Slot = EnsureSlot(E->getRHS()->getType());
449 CGF.EmitAggExpr(E->getRHS(), Slot);
450 Src = Slot.asRValue();
451 }
452 CGF.EmitStoreThroughPropertyRefLValue(Src, LHS);
453 } else {
454 bool GCollection = false;
455 if (CGF.getContext().getLangOptions().getGCMode())
456 GCollection = TypeRequiresGCollection(E->getLHS()->getType());
457
458 // Codegen the RHS so that it stores directly into the LHS.
459 AggValueSlot LHSSlot = AggValueSlot::forLValue(LHS, true,
460 GCollection);
461 CGF.EmitAggExpr(E->getRHS(), LHSSlot, false);
462 EmitFinalDestCopy(E, LHS, true);
463 }
464 }
465
466 void AggExprEmitter::
VisitAbstractConditionalOperator(const AbstractConditionalOperator * E)467 VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
468 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
469 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
470 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
471
472 // Bind the common expression if necessary.
473 CodeGenFunction::OpaqueValueMapping binding(CGF, E);
474
475 CodeGenFunction::ConditionalEvaluation eval(CGF);
476 CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
477
478 // Save whether the destination's lifetime is externally managed.
479 bool DestLifetimeManaged = Dest.isLifetimeExternallyManaged();
480
481 eval.begin(CGF);
482 CGF.EmitBlock(LHSBlock);
483 Visit(E->getTrueExpr());
484 eval.end(CGF);
485
486 assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!");
487 CGF.Builder.CreateBr(ContBlock);
488
489 // If the result of an agg expression is unused, then the emission
490 // of the LHS might need to create a destination slot. That's fine
491 // with us, and we can safely emit the RHS into the same slot, but
492 // we shouldn't claim that its lifetime is externally managed.
493 Dest.setLifetimeExternallyManaged(DestLifetimeManaged);
494
495 eval.begin(CGF);
496 CGF.EmitBlock(RHSBlock);
497 Visit(E->getFalseExpr());
498 eval.end(CGF);
499
500 CGF.EmitBlock(ContBlock);
501 }
502
VisitChooseExpr(const ChooseExpr * CE)503 void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
504 Visit(CE->getChosenSubExpr(CGF.getContext()));
505 }
506
VisitVAArgExpr(VAArgExpr * VE)507 void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
508 llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr());
509 llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType());
510
511 if (!ArgPtr) {
512 CGF.ErrorUnsupported(VE, "aggregate va_arg expression");
513 return;
514 }
515
516 EmitFinalDestCopy(VE, CGF.MakeAddrLValue(ArgPtr, VE->getType()));
517 }
518
VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr * E)519 void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
520 // Ensure that we have a slot, but if we already do, remember
521 // whether its lifetime was externally managed.
522 bool WasManaged = Dest.isLifetimeExternallyManaged();
523 Dest = EnsureSlot(E->getType());
524 Dest.setLifetimeExternallyManaged();
525
526 Visit(E->getSubExpr());
527
528 // Set up the temporary's destructor if its lifetime wasn't already
529 // being managed.
530 if (!WasManaged)
531 CGF.EmitCXXTemporary(E->getTemporary(), Dest.getAddr());
532 }
533
534 void
VisitCXXConstructExpr(const CXXConstructExpr * E)535 AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
536 AggValueSlot Slot = EnsureSlot(E->getType());
537 CGF.EmitCXXConstructExpr(E, Slot);
538 }
539
VisitExprWithCleanups(ExprWithCleanups * E)540 void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
541 CGF.EmitExprWithCleanups(E, Dest);
542 }
543
VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr * E)544 void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
545 QualType T = E->getType();
546 AggValueSlot Slot = EnsureSlot(T);
547 EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T));
548 }
549
VisitImplicitValueInitExpr(ImplicitValueInitExpr * E)550 void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
551 QualType T = E->getType();
552 AggValueSlot Slot = EnsureSlot(T);
553 EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T));
554 }
555
556 /// isSimpleZero - If emitting this value will obviously just cause a store of
557 /// zero to memory, return true. This can return false if uncertain, so it just
558 /// handles simple cases.
isSimpleZero(const Expr * E,CodeGenFunction & CGF)559 static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
560 E = E->IgnoreParens();
561
562 // 0
563 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
564 return IL->getValue() == 0;
565 // +0.0
566 if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E))
567 return FL->getValue().isPosZero();
568 // int()
569 if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) &&
570 CGF.getTypes().isZeroInitializable(E->getType()))
571 return true;
572 // (int*)0 - Null pointer expressions.
573 if (const CastExpr *ICE = dyn_cast<CastExpr>(E))
574 return ICE->getCastKind() == CK_NullToPointer;
575 // '\0'
576 if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))
577 return CL->getValue() == 0;
578
579 // Otherwise, hard case: conservatively return false.
580 return false;
581 }
582
583
584 void
EmitInitializationToLValue(Expr * E,LValue LV)585 AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV) {
586 QualType type = LV.getType();
587 // FIXME: Ignore result?
588 // FIXME: Are initializers affected by volatile?
589 if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
590 // Storing "i32 0" to a zero'd memory location is a noop.
591 } else if (isa<ImplicitValueInitExpr>(E)) {
592 EmitNullInitializationToLValue(LV);
593 } else if (type->isReferenceType()) {
594 RValue RV = CGF.EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
595 CGF.EmitStoreThroughLValue(RV, LV);
596 } else if (type->isAnyComplexType()) {
597 CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false);
598 } else if (CGF.hasAggregateLLVMType(type)) {
599 CGF.EmitAggExpr(E, AggValueSlot::forLValue(LV, true, false,
600 Dest.isZeroed()));
601 } else if (LV.isSimple()) {
602 CGF.EmitScalarInit(E, /*D=*/0, LV, /*Captured=*/false);
603 } else {
604 CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV);
605 }
606 }
607
EmitNullInitializationToLValue(LValue lv)608 void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
609 QualType type = lv.getType();
610
611 // If the destination slot is already zeroed out before the aggregate is
612 // copied into it, we don't have to emit any zeros here.
613 if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type))
614 return;
615
616 if (!CGF.hasAggregateLLVMType(type)) {
617 // For non-aggregates, we can store zero
618 llvm::Value *null = llvm::Constant::getNullValue(CGF.ConvertType(type));
619 CGF.EmitStoreThroughLValue(RValue::get(null), lv);
620 } else {
621 // There's a potential optimization opportunity in combining
622 // memsets; that would be easy for arrays, but relatively
623 // difficult for structures with the current code.
624 CGF.EmitNullInitialization(lv.getAddress(), lv.getType());
625 }
626 }
627
VisitInitListExpr(InitListExpr * E)628 void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
629 #if 0
630 // FIXME: Assess perf here? Figure out what cases are worth optimizing here
631 // (Length of globals? Chunks of zeroed-out space?).
632 //
633 // If we can, prefer a copy from a global; this is a lot less code for long
634 // globals, and it's easier for the current optimizers to analyze.
635 if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) {
636 llvm::GlobalVariable* GV =
637 new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
638 llvm::GlobalValue::InternalLinkage, C, "");
639 EmitFinalDestCopy(E, CGF.MakeAddrLValue(GV, E->getType()));
640 return;
641 }
642 #endif
643 if (E->hadArrayRangeDesignator())
644 CGF.ErrorUnsupported(E, "GNU array range designator extension");
645
646 llvm::Value *DestPtr = Dest.getAddr();
647
648 // Handle initialization of an array.
649 if (E->getType()->isArrayType()) {
650 llvm::PointerType *APType =
651 cast<llvm::PointerType>(DestPtr->getType());
652 llvm::ArrayType *AType =
653 cast<llvm::ArrayType>(APType->getElementType());
654
655 uint64_t NumInitElements = E->getNumInits();
656
657 if (E->getNumInits() > 0) {
658 QualType T1 = E->getType();
659 QualType T2 = E->getInit(0)->getType();
660 if (CGF.getContext().hasSameUnqualifiedType(T1, T2)) {
661 EmitAggLoadOfLValue(E->getInit(0));
662 return;
663 }
664 }
665
666 uint64_t NumArrayElements = AType->getNumElements();
667 assert(NumInitElements <= NumArrayElements);
668
669 QualType elementType = E->getType().getCanonicalType();
670 elementType = CGF.getContext().getQualifiedType(
671 cast<ArrayType>(elementType)->getElementType(),
672 elementType.getQualifiers() + Dest.getQualifiers());
673
674 // DestPtr is an array*. Construct an elementType* by drilling
675 // down a level.
676 llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
677 llvm::Value *indices[] = { zero, zero };
678 llvm::Value *begin =
679 Builder.CreateInBoundsGEP(DestPtr, indices, indices+2, "arrayinit.begin");
680
681 // Exception safety requires us to destroy all the
682 // already-constructed members if an initializer throws.
683 // For that, we'll need an EH cleanup.
684 QualType::DestructionKind dtorKind = elementType.isDestructedType();
685 llvm::AllocaInst *endOfInit = 0;
686 EHScopeStack::stable_iterator cleanup;
687 if (CGF.needsEHCleanup(dtorKind)) {
688 // In principle we could tell the cleanup where we are more
689 // directly, but the control flow can get so varied here that it
690 // would actually be quite complex. Therefore we go through an
691 // alloca.
692 endOfInit = CGF.CreateTempAlloca(begin->getType(),
693 "arrayinit.endOfInit");
694 Builder.CreateStore(begin, endOfInit);
695 CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
696 CGF.getDestroyer(dtorKind));
697 cleanup = CGF.EHStack.stable_begin();
698
699 // Otherwise, remember that we didn't need a cleanup.
700 } else {
701 dtorKind = QualType::DK_none;
702 }
703
704 llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
705
706 // The 'current element to initialize'. The invariants on this
707 // variable are complicated. Essentially, after each iteration of
708 // the loop, it points to the last initialized element, except
709 // that it points to the beginning of the array before any
710 // elements have been initialized.
711 llvm::Value *element = begin;
712
713 // Emit the explicit initializers.
714 for (uint64_t i = 0; i != NumInitElements; ++i) {
715 // Advance to the next element.
716 if (i > 0) {
717 element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element");
718
719 // Tell the cleanup that it needs to destroy up to this
720 // element. TODO: some of these stores can be trivially
721 // observed to be unnecessary.
722 if (endOfInit) Builder.CreateStore(element, endOfInit);
723 }
724
725 LValue elementLV = CGF.MakeAddrLValue(element, elementType);
726 EmitInitializationToLValue(E->getInit(i), elementLV);
727 }
728
729 // Check whether there's a non-trivial array-fill expression.
730 // Note that this will be a CXXConstructExpr even if the element
731 // type is an array (or array of array, etc.) of class type.
732 Expr *filler = E->getArrayFiller();
733 bool hasTrivialFiller = true;
734 if (CXXConstructExpr *cons = dyn_cast_or_null<CXXConstructExpr>(filler)) {
735 assert(cons->getConstructor()->isDefaultConstructor());
736 hasTrivialFiller = cons->getConstructor()->isTrivial();
737 }
738
739 // Any remaining elements need to be zero-initialized, possibly
740 // using the filler expression. We can skip this if the we're
741 // emitting to zeroed memory.
742 if (NumInitElements != NumArrayElements &&
743 !(Dest.isZeroed() && hasTrivialFiller &&
744 CGF.getTypes().isZeroInitializable(elementType))) {
745
746 // Use an actual loop. This is basically
747 // do { *array++ = filler; } while (array != end);
748
749 // Advance to the start of the rest of the array.
750 if (NumInitElements) {
751 element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start");
752 if (endOfInit) Builder.CreateStore(element, endOfInit);
753 }
754
755 // Compute the end of the array.
756 llvm::Value *end = Builder.CreateInBoundsGEP(begin,
757 llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements),
758 "arrayinit.end");
759
760 llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
761 llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
762
763 // Jump into the body.
764 CGF.EmitBlock(bodyBB);
765 llvm::PHINode *currentElement =
766 Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
767 currentElement->addIncoming(element, entryBB);
768
769 // Emit the actual filler expression.
770 LValue elementLV = CGF.MakeAddrLValue(currentElement, elementType);
771 if (filler)
772 EmitInitializationToLValue(filler, elementLV);
773 else
774 EmitNullInitializationToLValue(elementLV);
775
776 // Move on to the next element.
777 llvm::Value *nextElement =
778 Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next");
779
780 // Tell the EH cleanup that we finished with the last element.
781 if (endOfInit) Builder.CreateStore(nextElement, endOfInit);
782
783 // Leave the loop if we're done.
784 llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
785 "arrayinit.done");
786 llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
787 Builder.CreateCondBr(done, endBB, bodyBB);
788 currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
789
790 CGF.EmitBlock(endBB);
791 }
792
793 // Leave the partial-array cleanup if we entered one.
794 if (dtorKind) CGF.DeactivateCleanupBlock(cleanup);
795
796 return;
797 }
798
799 assert(E->getType()->isRecordType() && "Only support structs/unions here!");
800
801 // Do struct initialization; this code just sets each individual member
802 // to the approprate value. This makes bitfield support automatic;
803 // the disadvantage is that the generated code is more difficult for
804 // the optimizer, especially with bitfields.
805 unsigned NumInitElements = E->getNumInits();
806 RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl();
807
808 if (record->isUnion()) {
809 // Only initialize one field of a union. The field itself is
810 // specified by the initializer list.
811 if (!E->getInitializedFieldInUnion()) {
812 // Empty union; we have nothing to do.
813
814 #ifndef NDEBUG
815 // Make sure that it's really an empty and not a failure of
816 // semantic analysis.
817 for (RecordDecl::field_iterator Field = record->field_begin(),
818 FieldEnd = record->field_end();
819 Field != FieldEnd; ++Field)
820 assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
821 #endif
822 return;
823 }
824
825 // FIXME: volatility
826 FieldDecl *Field = E->getInitializedFieldInUnion();
827
828 LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestPtr, Field, 0);
829 if (NumInitElements) {
830 // Store the initializer into the field
831 EmitInitializationToLValue(E->getInit(0), FieldLoc);
832 } else {
833 // Default-initialize to null.
834 EmitNullInitializationToLValue(FieldLoc);
835 }
836
837 return;
838 }
839
840 // We'll need to enter cleanup scopes in case any of the member
841 // initializers throw an exception.
842 llvm::SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
843
844 // Here we iterate over the fields; this makes it simpler to both
845 // default-initialize fields and skip over unnamed fields.
846 unsigned curInitIndex = 0;
847 for (RecordDecl::field_iterator field = record->field_begin(),
848 fieldEnd = record->field_end();
849 field != fieldEnd; ++field) {
850 // We're done once we hit the flexible array member.
851 if (field->getType()->isIncompleteArrayType())
852 break;
853
854 // Always skip anonymous bitfields.
855 if (field->isUnnamedBitfield())
856 continue;
857
858 // We're done if we reach the end of the explicit initializers, we
859 // have a zeroed object, and the rest of the fields are
860 // zero-initializable.
861 if (curInitIndex == NumInitElements && Dest.isZeroed() &&
862 CGF.getTypes().isZeroInitializable(E->getType()))
863 break;
864
865 // FIXME: volatility
866 LValue LV = CGF.EmitLValueForFieldInitialization(DestPtr, *field, 0);
867 // We never generate write-barries for initialized fields.
868 LV.setNonGC(true);
869
870 if (curInitIndex < NumInitElements) {
871 // Store the initializer into the field.
872 EmitInitializationToLValue(E->getInit(curInitIndex++), LV);
873 } else {
874 // We're out of initalizers; default-initialize to null
875 EmitNullInitializationToLValue(LV);
876 }
877
878 // Push a destructor if necessary.
879 // FIXME: if we have an array of structures, all explicitly
880 // initialized, we can end up pushing a linear number of cleanups.
881 bool pushedCleanup = false;
882 if (QualType::DestructionKind dtorKind
883 = field->getType().isDestructedType()) {
884 assert(LV.isSimple());
885 if (CGF.needsEHCleanup(dtorKind)) {
886 CGF.pushDestroy(EHCleanup, LV.getAddress(), field->getType(),
887 CGF.getDestroyer(dtorKind), false);
888 cleanups.push_back(CGF.EHStack.stable_begin());
889 pushedCleanup = true;
890 }
891 }
892
893 // If the GEP didn't get used because of a dead zero init or something
894 // else, clean it up for -O0 builds and general tidiness.
895 if (!pushedCleanup && LV.isSimple())
896 if (llvm::GetElementPtrInst *GEP =
897 dyn_cast<llvm::GetElementPtrInst>(LV.getAddress()))
898 if (GEP->use_empty())
899 GEP->eraseFromParent();
900 }
901
902 // Deactivate all the partial cleanups in reverse order, which
903 // generally means popping them.
904 for (unsigned i = cleanups.size(); i != 0; --i)
905 CGF.DeactivateCleanupBlock(cleanups[i-1]);
906 }
907
908 //===----------------------------------------------------------------------===//
909 // Entry Points into this File
910 //===----------------------------------------------------------------------===//
911
912 /// GetNumNonZeroBytesInInit - Get an approximate count of the number of
913 /// non-zero bytes that will be stored when outputting the initializer for the
914 /// specified initializer expression.
GetNumNonZeroBytesInInit(const Expr * E,CodeGenFunction & CGF)915 static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
916 E = E->IgnoreParens();
917
918 // 0 and 0.0 won't require any non-zero stores!
919 if (isSimpleZero(E, CGF)) return CharUnits::Zero();
920
921 // If this is an initlist expr, sum up the size of sizes of the (present)
922 // elements. If this is something weird, assume the whole thing is non-zero.
923 const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
924 if (ILE == 0 || !CGF.getTypes().isZeroInitializable(ILE->getType()))
925 return CGF.getContext().getTypeSizeInChars(E->getType());
926
927 // InitListExprs for structs have to be handled carefully. If there are
928 // reference members, we need to consider the size of the reference, not the
929 // referencee. InitListExprs for unions and arrays can't have references.
930 if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
931 if (!RT->isUnionType()) {
932 RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
933 CharUnits NumNonZeroBytes = CharUnits::Zero();
934
935 unsigned ILEElement = 0;
936 for (RecordDecl::field_iterator Field = SD->field_begin(),
937 FieldEnd = SD->field_end(); Field != FieldEnd; ++Field) {
938 // We're done once we hit the flexible array member or run out of
939 // InitListExpr elements.
940 if (Field->getType()->isIncompleteArrayType() ||
941 ILEElement == ILE->getNumInits())
942 break;
943 if (Field->isUnnamedBitfield())
944 continue;
945
946 const Expr *E = ILE->getInit(ILEElement++);
947
948 // Reference values are always non-null and have the width of a pointer.
949 if (Field->getType()->isReferenceType())
950 NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
951 CGF.getContext().Target.getPointerWidth(0));
952 else
953 NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
954 }
955
956 return NumNonZeroBytes;
957 }
958 }
959
960
961 CharUnits NumNonZeroBytes = CharUnits::Zero();
962 for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
963 NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
964 return NumNonZeroBytes;
965 }
966
967 /// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
968 /// zeros in it, emit a memset and avoid storing the individual zeros.
969 ///
CheckAggExprForMemSetUse(AggValueSlot & Slot,const Expr * E,CodeGenFunction & CGF)970 static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
971 CodeGenFunction &CGF) {
972 // If the slot is already known to be zeroed, nothing to do. Don't mess with
973 // volatile stores.
974 if (Slot.isZeroed() || Slot.isVolatile() || Slot.getAddr() == 0) return;
975
976 // C++ objects with a user-declared constructor don't need zero'ing.
977 if (CGF.getContext().getLangOptions().CPlusPlus)
978 if (const RecordType *RT = CGF.getContext()
979 .getBaseElementType(E->getType())->getAs<RecordType>()) {
980 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
981 if (RD->hasUserDeclaredConstructor())
982 return;
983 }
984
985 // If the type is 16-bytes or smaller, prefer individual stores over memset.
986 std::pair<CharUnits, CharUnits> TypeInfo =
987 CGF.getContext().getTypeInfoInChars(E->getType());
988 if (TypeInfo.first <= CharUnits::fromQuantity(16))
989 return;
990
991 // Check to see if over 3/4 of the initializer are known to be zero. If so,
992 // we prefer to emit memset + individual stores for the rest.
993 CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
994 if (NumNonZeroBytes*4 > TypeInfo.first)
995 return;
996
997 // Okay, it seems like a good idea to use an initial memset, emit the call.
998 llvm::Constant *SizeVal = CGF.Builder.getInt64(TypeInfo.first.getQuantity());
999 CharUnits Align = TypeInfo.second;
1000
1001 llvm::Value *Loc = Slot.getAddr();
1002 llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
1003
1004 Loc = CGF.Builder.CreateBitCast(Loc, BP);
1005 CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal,
1006 Align.getQuantity(), false);
1007
1008 // Tell the AggExprEmitter that the slot is known zero.
1009 Slot.setZeroed();
1010 }
1011
1012
1013
1014
1015 /// EmitAggExpr - Emit the computation of the specified expression of aggregate
1016 /// type. The result is computed into DestPtr. Note that if DestPtr is null,
1017 /// the value of the aggregate expression is not needed. If VolatileDest is
1018 /// true, DestPtr cannot be 0.
1019 ///
1020 /// \param IsInitializer - true if this evaluation is initializing an
1021 /// object whose lifetime is already being managed.
EmitAggExpr(const Expr * E,AggValueSlot Slot,bool IgnoreResult)1022 void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot,
1023 bool IgnoreResult) {
1024 assert(E && hasAggregateLLVMType(E->getType()) &&
1025 "Invalid aggregate expression to emit");
1026 assert((Slot.getAddr() != 0 || Slot.isIgnored()) &&
1027 "slot has bits but no address");
1028
1029 // Optimize the slot if possible.
1030 CheckAggExprForMemSetUse(Slot, E, *this);
1031
1032 AggExprEmitter(*this, Slot, IgnoreResult).Visit(const_cast<Expr*>(E));
1033 }
1034
EmitAggExprToLValue(const Expr * E)1035 LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
1036 assert(hasAggregateLLVMType(E->getType()) && "Invalid argument!");
1037 llvm::Value *Temp = CreateMemTemp(E->getType());
1038 LValue LV = MakeAddrLValue(Temp, E->getType());
1039 EmitAggExpr(E, AggValueSlot::forLValue(LV, false));
1040 return LV;
1041 }
1042
EmitAggregateCopy(llvm::Value * DestPtr,llvm::Value * SrcPtr,QualType Ty,bool isVolatile)1043 void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
1044 llvm::Value *SrcPtr, QualType Ty,
1045 bool isVolatile) {
1046 assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
1047
1048 if (getContext().getLangOptions().CPlusPlus) {
1049 if (const RecordType *RT = Ty->getAs<RecordType>()) {
1050 CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
1051 assert((Record->hasTrivialCopyConstructor() ||
1052 Record->hasTrivialCopyAssignment()) &&
1053 "Trying to aggregate-copy a type without a trivial copy "
1054 "constructor or assignment operator");
1055 // Ignore empty classes in C++.
1056 if (Record->isEmpty())
1057 return;
1058 }
1059 }
1060
1061 // Aggregate assignment turns into llvm.memcpy. This is almost valid per
1062 // C99 6.5.16.1p3, which states "If the value being stored in an object is
1063 // read from another object that overlaps in anyway the storage of the first
1064 // object, then the overlap shall be exact and the two objects shall have
1065 // qualified or unqualified versions of a compatible type."
1066 //
1067 // memcpy is not defined if the source and destination pointers are exactly
1068 // equal, but other compilers do this optimization, and almost every memcpy
1069 // implementation handles this case safely. If there is a libc that does not
1070 // safely handle this, we can add a target hook.
1071
1072 // Get size and alignment info for this aggregate.
1073 std::pair<CharUnits, CharUnits> TypeInfo =
1074 getContext().getTypeInfoInChars(Ty);
1075
1076 // FIXME: Handle variable sized types.
1077
1078 // FIXME: If we have a volatile struct, the optimizer can remove what might
1079 // appear to be `extra' memory ops:
1080 //
1081 // volatile struct { int i; } a, b;
1082 //
1083 // int main() {
1084 // a = b;
1085 // a = b;
1086 // }
1087 //
1088 // we need to use a different call here. We use isVolatile to indicate when
1089 // either the source or the destination is volatile.
1090
1091 llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType());
1092 llvm::Type *DBP =
1093 llvm::Type::getInt8PtrTy(getLLVMContext(), DPT->getAddressSpace());
1094 DestPtr = Builder.CreateBitCast(DestPtr, DBP, "tmp");
1095
1096 llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
1097 llvm::Type *SBP =
1098 llvm::Type::getInt8PtrTy(getLLVMContext(), SPT->getAddressSpace());
1099 SrcPtr = Builder.CreateBitCast(SrcPtr, SBP, "tmp");
1100
1101 // Don't do any of the memmove_collectable tests if GC isn't set.
1102 if (CGM.getLangOptions().getGCMode() == LangOptions::NonGC) {
1103 // fall through
1104 } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
1105 RecordDecl *Record = RecordTy->getDecl();
1106 if (Record->hasObjectMember()) {
1107 CharUnits size = TypeInfo.first;
1108 llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
1109 llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
1110 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
1111 SizeVal);
1112 return;
1113 }
1114 } else if (Ty->isArrayType()) {
1115 QualType BaseType = getContext().getBaseElementType(Ty);
1116 if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
1117 if (RecordTy->getDecl()->hasObjectMember()) {
1118 CharUnits size = TypeInfo.first;
1119 llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
1120 llvm::Value *SizeVal =
1121 llvm::ConstantInt::get(SizeTy, size.getQuantity());
1122 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
1123 SizeVal);
1124 return;
1125 }
1126 }
1127 }
1128
1129 Builder.CreateMemCpy(DestPtr, SrcPtr,
1130 llvm::ConstantInt::get(IntPtrTy,
1131 TypeInfo.first.getQuantity()),
1132 TypeInfo.second.getQuantity(), isVolatile);
1133 }
1134