1 //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code to emit Expr nodes as LLVM code.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "CodeGenFunction.h"
15 #include "CodeGenModule.h"
16 #include "CGCall.h"
17 #include "CGCXXABI.h"
18 #include "CGDebugInfo.h"
19 #include "CGRecordLayout.h"
20 #include "CGObjCRuntime.h"
21 #include "TargetInfo.h"
22 #include "clang/AST/ASTContext.h"
23 #include "clang/AST/DeclObjC.h"
24 #include "clang/Frontend/CodeGenOptions.h"
25 #include "llvm/Intrinsics.h"
26 #include "llvm/LLVMContext.h"
27 #include "llvm/Support/MDBuilder.h"
28 #include "llvm/Target/TargetData.h"
29 using namespace clang;
30 using namespace CodeGen;
31
32 //===--------------------------------------------------------------------===//
33 // Miscellaneous Helper Methods
34 //===--------------------------------------------------------------------===//
35
EmitCastToVoidPtr(llvm::Value * value)36 llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) {
37 unsigned addressSpace =
38 cast<llvm::PointerType>(value->getType())->getAddressSpace();
39
40 llvm::PointerType *destType = Int8PtrTy;
41 if (addressSpace)
42 destType = llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace);
43
44 if (value->getType() == destType) return value;
45 return Builder.CreateBitCast(value, destType);
46 }
47
48 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
49 /// block.
CreateTempAlloca(llvm::Type * Ty,const Twine & Name)50 llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
51 const Twine &Name) {
52 if (!Builder.isNamePreserving())
53 return new llvm::AllocaInst(Ty, 0, "", AllocaInsertPt);
54 return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt);
55 }
56
InitTempAlloca(llvm::AllocaInst * Var,llvm::Value * Init)57 void CodeGenFunction::InitTempAlloca(llvm::AllocaInst *Var,
58 llvm::Value *Init) {
59 llvm::StoreInst *Store = new llvm::StoreInst(Init, Var);
60 llvm::BasicBlock *Block = AllocaInsertPt->getParent();
61 Block->getInstList().insertAfter(&*AllocaInsertPt, Store);
62 }
63
CreateIRTemp(QualType Ty,const Twine & Name)64 llvm::AllocaInst *CodeGenFunction::CreateIRTemp(QualType Ty,
65 const Twine &Name) {
66 llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertType(Ty), Name);
67 // FIXME: Should we prefer the preferred type alignment here?
68 CharUnits Align = getContext().getTypeAlignInChars(Ty);
69 Alloc->setAlignment(Align.getQuantity());
70 return Alloc;
71 }
72
CreateMemTemp(QualType Ty,const Twine & Name)73 llvm::AllocaInst *CodeGenFunction::CreateMemTemp(QualType Ty,
74 const Twine &Name) {
75 llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty), Name);
76 // FIXME: Should we prefer the preferred type alignment here?
77 CharUnits Align = getContext().getTypeAlignInChars(Ty);
78 Alloc->setAlignment(Align.getQuantity());
79 return Alloc;
80 }
81
82 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
83 /// expression and compare the result against zero, returning an Int1Ty value.
EvaluateExprAsBool(const Expr * E)84 llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
85 if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
86 llvm::Value *MemPtr = EmitScalarExpr(E);
87 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT);
88 }
89
90 QualType BoolTy = getContext().BoolTy;
91 if (!E->getType()->isAnyComplexType())
92 return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy);
93
94 return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy);
95 }
96
97 /// EmitIgnoredExpr - Emit code to compute the specified expression,
98 /// ignoring the result.
EmitIgnoredExpr(const Expr * E)99 void CodeGenFunction::EmitIgnoredExpr(const Expr *E) {
100 if (E->isRValue())
101 return (void) EmitAnyExpr(E, AggValueSlot::ignored(), true);
102
103 // Just emit it as an l-value and drop the result.
104 EmitLValue(E);
105 }
106
107 /// EmitAnyExpr - Emit code to compute the specified expression which
108 /// can have any type. The result is returned as an RValue struct.
109 /// If this is an aggregate expression, AggSlot indicates where the
110 /// result should be returned.
EmitAnyExpr(const Expr * E,AggValueSlot AggSlot,bool IgnoreResult)111 RValue CodeGenFunction::EmitAnyExpr(const Expr *E, AggValueSlot AggSlot,
112 bool IgnoreResult) {
113 if (!hasAggregateLLVMType(E->getType()))
114 return RValue::get(EmitScalarExpr(E, IgnoreResult));
115 else if (E->getType()->isAnyComplexType())
116 return RValue::getComplex(EmitComplexExpr(E, IgnoreResult, IgnoreResult));
117
118 EmitAggExpr(E, AggSlot, IgnoreResult);
119 return AggSlot.asRValue();
120 }
121
122 /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
123 /// always be accessible even if no aggregate location is provided.
EmitAnyExprToTemp(const Expr * E)124 RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) {
125 AggValueSlot AggSlot = AggValueSlot::ignored();
126
127 if (hasAggregateLLVMType(E->getType()) &&
128 !E->getType()->isAnyComplexType())
129 AggSlot = CreateAggTemp(E->getType(), "agg.tmp");
130 return EmitAnyExpr(E, AggSlot);
131 }
132
133 /// EmitAnyExprToMem - Evaluate an expression into a given memory
134 /// location.
EmitAnyExprToMem(const Expr * E,llvm::Value * Location,Qualifiers Quals,bool IsInit)135 void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
136 llvm::Value *Location,
137 Qualifiers Quals,
138 bool IsInit) {
139 // FIXME: This function should take an LValue as an argument.
140 if (E->getType()->isAnyComplexType()) {
141 EmitComplexExprIntoAddr(E, Location, Quals.hasVolatile());
142 } else if (hasAggregateLLVMType(E->getType())) {
143 CharUnits Alignment = getContext().getTypeAlignInChars(E->getType());
144 EmitAggExpr(E, AggValueSlot::forAddr(Location, Alignment, Quals,
145 AggValueSlot::IsDestructed_t(IsInit),
146 AggValueSlot::DoesNotNeedGCBarriers,
147 AggValueSlot::IsAliased_t(!IsInit)));
148 } else {
149 RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
150 LValue LV = MakeAddrLValue(Location, E->getType());
151 EmitStoreThroughLValue(RV, LV);
152 }
153 }
154
155 namespace {
156 /// \brief An adjustment to be made to the temporary created when emitting a
157 /// reference binding, which accesses a particular subobject of that temporary.
158 struct SubobjectAdjustment {
159 enum { DerivedToBaseAdjustment, FieldAdjustment } Kind;
160
161 union {
162 struct {
163 const CastExpr *BasePath;
164 const CXXRecordDecl *DerivedClass;
165 } DerivedToBase;
166
167 FieldDecl *Field;
168 };
169
SubobjectAdjustment__anon5c5bf7ad0111::SubobjectAdjustment170 SubobjectAdjustment(const CastExpr *BasePath,
171 const CXXRecordDecl *DerivedClass)
172 : Kind(DerivedToBaseAdjustment) {
173 DerivedToBase.BasePath = BasePath;
174 DerivedToBase.DerivedClass = DerivedClass;
175 }
176
SubobjectAdjustment__anon5c5bf7ad0111::SubobjectAdjustment177 SubobjectAdjustment(FieldDecl *Field)
178 : Kind(FieldAdjustment) {
179 this->Field = Field;
180 }
181 };
182 }
183
184 static llvm::Value *
CreateReferenceTemporary(CodeGenFunction & CGF,QualType Type,const NamedDecl * InitializedDecl)185 CreateReferenceTemporary(CodeGenFunction &CGF, QualType Type,
186 const NamedDecl *InitializedDecl) {
187 if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
188 if (VD->hasGlobalStorage()) {
189 SmallString<256> Name;
190 llvm::raw_svector_ostream Out(Name);
191 CGF.CGM.getCXXABI().getMangleContext().mangleReferenceTemporary(VD, Out);
192 Out.flush();
193
194 llvm::Type *RefTempTy = CGF.ConvertTypeForMem(Type);
195
196 // Create the reference temporary.
197 llvm::GlobalValue *RefTemp =
198 new llvm::GlobalVariable(CGF.CGM.getModule(),
199 RefTempTy, /*isConstant=*/false,
200 llvm::GlobalValue::InternalLinkage,
201 llvm::Constant::getNullValue(RefTempTy),
202 Name.str());
203 return RefTemp;
204 }
205 }
206
207 return CGF.CreateMemTemp(Type, "ref.tmp");
208 }
209
210 static llvm::Value *
EmitExprForReferenceBinding(CodeGenFunction & CGF,const Expr * E,llvm::Value * & ReferenceTemporary,const CXXDestructorDecl * & ReferenceTemporaryDtor,QualType & ObjCARCReferenceLifetimeType,const NamedDecl * InitializedDecl)211 EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
212 llvm::Value *&ReferenceTemporary,
213 const CXXDestructorDecl *&ReferenceTemporaryDtor,
214 QualType &ObjCARCReferenceLifetimeType,
215 const NamedDecl *InitializedDecl) {
216 // Look through single-element init lists that claim to be lvalues. They're
217 // just syntactic wrappers in this case.
218 if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E)) {
219 if (ILE->getNumInits() == 1 && ILE->isGLValue())
220 E = ILE->getInit(0);
221 }
222
223 // Look through expressions for materialized temporaries (for now).
224 if (const MaterializeTemporaryExpr *M
225 = dyn_cast<MaterializeTemporaryExpr>(E)) {
226 // Objective-C++ ARC:
227 // If we are binding a reference to a temporary that has ownership, we
228 // need to perform retain/release operations on the temporary.
229 if (CGF.getContext().getLangOpts().ObjCAutoRefCount &&
230 E->getType()->isObjCLifetimeType() &&
231 (E->getType().getObjCLifetime() == Qualifiers::OCL_Strong ||
232 E->getType().getObjCLifetime() == Qualifiers::OCL_Weak ||
233 E->getType().getObjCLifetime() == Qualifiers::OCL_Autoreleasing))
234 ObjCARCReferenceLifetimeType = E->getType();
235
236 E = M->GetTemporaryExpr();
237 }
238
239 if (const CXXDefaultArgExpr *DAE = dyn_cast<CXXDefaultArgExpr>(E))
240 E = DAE->getExpr();
241
242 if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(E)) {
243 CGF.enterFullExpression(EWC);
244 CodeGenFunction::RunCleanupsScope Scope(CGF);
245
246 return EmitExprForReferenceBinding(CGF, EWC->getSubExpr(),
247 ReferenceTemporary,
248 ReferenceTemporaryDtor,
249 ObjCARCReferenceLifetimeType,
250 InitializedDecl);
251 }
252
253 RValue RV;
254 if (E->isGLValue()) {
255 // Emit the expression as an lvalue.
256 LValue LV = CGF.EmitLValue(E);
257
258 if (LV.isSimple())
259 return LV.getAddress();
260
261 // We have to load the lvalue.
262 RV = CGF.EmitLoadOfLValue(LV);
263 } else {
264 if (!ObjCARCReferenceLifetimeType.isNull()) {
265 ReferenceTemporary = CreateReferenceTemporary(CGF,
266 ObjCARCReferenceLifetimeType,
267 InitializedDecl);
268
269
270 LValue RefTempDst = CGF.MakeAddrLValue(ReferenceTemporary,
271 ObjCARCReferenceLifetimeType);
272
273 CGF.EmitScalarInit(E, dyn_cast_or_null<ValueDecl>(InitializedDecl),
274 RefTempDst, false);
275
276 bool ExtendsLifeOfTemporary = false;
277 if (const VarDecl *Var = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
278 if (Var->extendsLifetimeOfTemporary())
279 ExtendsLifeOfTemporary = true;
280 } else if (InitializedDecl && isa<FieldDecl>(InitializedDecl)) {
281 ExtendsLifeOfTemporary = true;
282 }
283
284 if (!ExtendsLifeOfTemporary) {
285 // Since the lifetime of this temporary isn't going to be extended,
286 // we need to clean it up ourselves at the end of the full expression.
287 switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) {
288 case Qualifiers::OCL_None:
289 case Qualifiers::OCL_ExplicitNone:
290 case Qualifiers::OCL_Autoreleasing:
291 break;
292
293 case Qualifiers::OCL_Strong: {
294 assert(!ObjCARCReferenceLifetimeType->isArrayType());
295 CleanupKind cleanupKind = CGF.getARCCleanupKind();
296 CGF.pushDestroy(cleanupKind,
297 ReferenceTemporary,
298 ObjCARCReferenceLifetimeType,
299 CodeGenFunction::destroyARCStrongImprecise,
300 cleanupKind & EHCleanup);
301 break;
302 }
303
304 case Qualifiers::OCL_Weak:
305 assert(!ObjCARCReferenceLifetimeType->isArrayType());
306 CGF.pushDestroy(NormalAndEHCleanup,
307 ReferenceTemporary,
308 ObjCARCReferenceLifetimeType,
309 CodeGenFunction::destroyARCWeak,
310 /*useEHCleanupForArray*/ true);
311 break;
312 }
313
314 ObjCARCReferenceLifetimeType = QualType();
315 }
316
317 return ReferenceTemporary;
318 }
319
320 SmallVector<SubobjectAdjustment, 2> Adjustments;
321 while (true) {
322 E = E->IgnoreParens();
323
324 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
325 if ((CE->getCastKind() == CK_DerivedToBase ||
326 CE->getCastKind() == CK_UncheckedDerivedToBase) &&
327 E->getType()->isRecordType()) {
328 E = CE->getSubExpr();
329 CXXRecordDecl *Derived
330 = cast<CXXRecordDecl>(E->getType()->getAs<RecordType>()->getDecl());
331 Adjustments.push_back(SubobjectAdjustment(CE, Derived));
332 continue;
333 }
334
335 if (CE->getCastKind() == CK_NoOp) {
336 E = CE->getSubExpr();
337 continue;
338 }
339 } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
340 if (!ME->isArrow() && ME->getBase()->isRValue()) {
341 assert(ME->getBase()->getType()->isRecordType());
342 if (FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
343 E = ME->getBase();
344 Adjustments.push_back(SubobjectAdjustment(Field));
345 continue;
346 }
347 }
348 }
349
350 if (const OpaqueValueExpr *opaque = dyn_cast<OpaqueValueExpr>(E))
351 if (opaque->getType()->isRecordType())
352 return CGF.EmitOpaqueValueLValue(opaque).getAddress();
353
354 // Nothing changed.
355 break;
356 }
357
358 // Create a reference temporary if necessary.
359 AggValueSlot AggSlot = AggValueSlot::ignored();
360 if (CGF.hasAggregateLLVMType(E->getType()) &&
361 !E->getType()->isAnyComplexType()) {
362 ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
363 InitializedDecl);
364 CharUnits Alignment = CGF.getContext().getTypeAlignInChars(E->getType());
365 AggValueSlot::IsDestructed_t isDestructed
366 = AggValueSlot::IsDestructed_t(InitializedDecl != 0);
367 AggSlot = AggValueSlot::forAddr(ReferenceTemporary, Alignment,
368 Qualifiers(), isDestructed,
369 AggValueSlot::DoesNotNeedGCBarriers,
370 AggValueSlot::IsNotAliased);
371 }
372
373 if (InitializedDecl) {
374 // Get the destructor for the reference temporary.
375 if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
376 CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
377 if (!ClassDecl->hasTrivialDestructor())
378 ReferenceTemporaryDtor = ClassDecl->getDestructor();
379 }
380 }
381
382 RV = CGF.EmitAnyExpr(E, AggSlot);
383
384 // Check if need to perform derived-to-base casts and/or field accesses, to
385 // get from the temporary object we created (and, potentially, for which we
386 // extended the lifetime) to the subobject we're binding the reference to.
387 if (!Adjustments.empty()) {
388 llvm::Value *Object = RV.getAggregateAddr();
389 for (unsigned I = Adjustments.size(); I != 0; --I) {
390 SubobjectAdjustment &Adjustment = Adjustments[I-1];
391 switch (Adjustment.Kind) {
392 case SubobjectAdjustment::DerivedToBaseAdjustment:
393 Object =
394 CGF.GetAddressOfBaseClass(Object,
395 Adjustment.DerivedToBase.DerivedClass,
396 Adjustment.DerivedToBase.BasePath->path_begin(),
397 Adjustment.DerivedToBase.BasePath->path_end(),
398 /*NullCheckValue=*/false);
399 break;
400
401 case SubobjectAdjustment::FieldAdjustment: {
402 LValue LV = CGF.MakeAddrLValue(Object, E->getType());
403 LV = CGF.EmitLValueForField(LV, Adjustment.Field);
404 if (LV.isSimple()) {
405 Object = LV.getAddress();
406 break;
407 }
408
409 // For non-simple lvalues, we actually have to create a copy of
410 // the object we're binding to.
411 QualType T = Adjustment.Field->getType().getNonReferenceType()
412 .getUnqualifiedType();
413 Object = CreateReferenceTemporary(CGF, T, InitializedDecl);
414 LValue TempLV = CGF.MakeAddrLValue(Object,
415 Adjustment.Field->getType());
416 CGF.EmitStoreThroughLValue(CGF.EmitLoadOfLValue(LV), TempLV);
417 break;
418 }
419
420 }
421 }
422
423 return Object;
424 }
425 }
426
427 if (RV.isAggregate())
428 return RV.getAggregateAddr();
429
430 // Create a temporary variable that we can bind the reference to.
431 ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
432 InitializedDecl);
433
434
435 unsigned Alignment =
436 CGF.getContext().getTypeAlignInChars(E->getType()).getQuantity();
437 if (RV.isScalar())
438 CGF.EmitStoreOfScalar(RV.getScalarVal(), ReferenceTemporary,
439 /*Volatile=*/false, Alignment, E->getType());
440 else
441 CGF.StoreComplexToAddr(RV.getComplexVal(), ReferenceTemporary,
442 /*Volatile=*/false);
443 return ReferenceTemporary;
444 }
445
446 RValue
EmitReferenceBindingToExpr(const Expr * E,const NamedDecl * InitializedDecl)447 CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E,
448 const NamedDecl *InitializedDecl) {
449 llvm::Value *ReferenceTemporary = 0;
450 const CXXDestructorDecl *ReferenceTemporaryDtor = 0;
451 QualType ObjCARCReferenceLifetimeType;
452 llvm::Value *Value = EmitExprForReferenceBinding(*this, E, ReferenceTemporary,
453 ReferenceTemporaryDtor,
454 ObjCARCReferenceLifetimeType,
455 InitializedDecl);
456 if (!ReferenceTemporaryDtor && ObjCARCReferenceLifetimeType.isNull())
457 return RValue::get(Value);
458
459 // Make sure to call the destructor for the reference temporary.
460 const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl);
461 if (VD && VD->hasGlobalStorage()) {
462 if (ReferenceTemporaryDtor) {
463 llvm::Constant *DtorFn =
464 CGM.GetAddrOfCXXDestructor(ReferenceTemporaryDtor, Dtor_Complete);
465 EmitCXXGlobalDtorRegistration(DtorFn,
466 cast<llvm::Constant>(ReferenceTemporary));
467 } else {
468 assert(!ObjCARCReferenceLifetimeType.isNull());
469 // Note: We intentionally do not register a global "destructor" to
470 // release the object.
471 }
472
473 return RValue::get(Value);
474 }
475
476 if (ReferenceTemporaryDtor)
477 PushDestructorCleanup(ReferenceTemporaryDtor, ReferenceTemporary);
478 else {
479 switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) {
480 case Qualifiers::OCL_None:
481 llvm_unreachable(
482 "Not a reference temporary that needs to be deallocated");
483 case Qualifiers::OCL_ExplicitNone:
484 case Qualifiers::OCL_Autoreleasing:
485 // Nothing to do.
486 break;
487
488 case Qualifiers::OCL_Strong: {
489 bool precise = VD && VD->hasAttr<ObjCPreciseLifetimeAttr>();
490 CleanupKind cleanupKind = getARCCleanupKind();
491 pushDestroy(cleanupKind, ReferenceTemporary, ObjCARCReferenceLifetimeType,
492 precise ? destroyARCStrongPrecise : destroyARCStrongImprecise,
493 cleanupKind & EHCleanup);
494 break;
495 }
496
497 case Qualifiers::OCL_Weak: {
498 // __weak objects always get EH cleanups; otherwise, exceptions
499 // could cause really nasty crashes instead of mere leaks.
500 pushDestroy(NormalAndEHCleanup, ReferenceTemporary,
501 ObjCARCReferenceLifetimeType, destroyARCWeak, true);
502 break;
503 }
504 }
505 }
506
507 return RValue::get(Value);
508 }
509
510
511 /// getAccessedFieldNo - Given an encoded value and a result number, return the
512 /// input field number being accessed.
getAccessedFieldNo(unsigned Idx,const llvm::Constant * Elts)513 unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
514 const llvm::Constant *Elts) {
515 return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx))
516 ->getZExtValue();
517 }
518
EmitCheck(llvm::Value * Address,unsigned Size)519 void CodeGenFunction::EmitCheck(llvm::Value *Address, unsigned Size) {
520 if (!CatchUndefined)
521 return;
522
523 // This needs to be to the standard address space.
524 Address = Builder.CreateBitCast(Address, Int8PtrTy);
525
526 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, IntPtrTy);
527
528 // In time, people may want to control this and use a 1 here.
529 llvm::Value *Arg = Builder.getFalse();
530 llvm::Value *C = Builder.CreateCall2(F, Address, Arg);
531 llvm::BasicBlock *Cont = createBasicBlock();
532 llvm::BasicBlock *Check = createBasicBlock();
533 llvm::Value *NegativeOne = llvm::ConstantInt::get(IntPtrTy, -1ULL);
534 Builder.CreateCondBr(Builder.CreateICmpEQ(C, NegativeOne), Cont, Check);
535
536 EmitBlock(Check);
537 Builder.CreateCondBr(Builder.CreateICmpUGE(C,
538 llvm::ConstantInt::get(IntPtrTy, Size)),
539 Cont, getTrapBB());
540 EmitBlock(Cont);
541 }
542
543
544 CodeGenFunction::ComplexPairTy CodeGenFunction::
EmitComplexPrePostIncDec(const UnaryOperator * E,LValue LV,bool isInc,bool isPre)545 EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
546 bool isInc, bool isPre) {
547 ComplexPairTy InVal = LoadComplexFromAddr(LV.getAddress(),
548 LV.isVolatileQualified());
549
550 llvm::Value *NextVal;
551 if (isa<llvm::IntegerType>(InVal.first->getType())) {
552 uint64_t AmountVal = isInc ? 1 : -1;
553 NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
554
555 // Add the inc/dec to the real part.
556 NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
557 } else {
558 QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType();
559 llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
560 if (!isInc)
561 FVal.changeSign();
562 NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
563
564 // Add the inc/dec to the real part.
565 NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
566 }
567
568 ComplexPairTy IncVal(NextVal, InVal.second);
569
570 // Store the updated result through the lvalue.
571 StoreComplexToAddr(IncVal, LV.getAddress(), LV.isVolatileQualified());
572
573 // If this is a postinc, return the value read from memory, otherwise use the
574 // updated value.
575 return isPre ? IncVal : InVal;
576 }
577
578
579 //===----------------------------------------------------------------------===//
580 // LValue Expression Emission
581 //===----------------------------------------------------------------------===//
582
GetUndefRValue(QualType Ty)583 RValue CodeGenFunction::GetUndefRValue(QualType Ty) {
584 if (Ty->isVoidType())
585 return RValue::get(0);
586
587 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
588 llvm::Type *EltTy = ConvertType(CTy->getElementType());
589 llvm::Value *U = llvm::UndefValue::get(EltTy);
590 return RValue::getComplex(std::make_pair(U, U));
591 }
592
593 // If this is a use of an undefined aggregate type, the aggregate must have an
594 // identifiable address. Just because the contents of the value are undefined
595 // doesn't mean that the address can't be taken and compared.
596 if (hasAggregateLLVMType(Ty)) {
597 llvm::Value *DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
598 return RValue::getAggregate(DestPtr);
599 }
600
601 return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
602 }
603
EmitUnsupportedRValue(const Expr * E,const char * Name)604 RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E,
605 const char *Name) {
606 ErrorUnsupported(E, Name);
607 return GetUndefRValue(E->getType());
608 }
609
EmitUnsupportedLValue(const Expr * E,const char * Name)610 LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
611 const char *Name) {
612 ErrorUnsupported(E, Name);
613 llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType()));
614 return MakeAddrLValue(llvm::UndefValue::get(Ty), E->getType());
615 }
616
EmitCheckedLValue(const Expr * E)617 LValue CodeGenFunction::EmitCheckedLValue(const Expr *E) {
618 LValue LV = EmitLValue(E);
619 if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple())
620 EmitCheck(LV.getAddress(),
621 getContext().getTypeSizeInChars(E->getType()).getQuantity());
622 return LV;
623 }
624
625 /// EmitLValue - Emit code to compute a designator that specifies the location
626 /// of the expression.
627 ///
628 /// This can return one of two things: a simple address or a bitfield reference.
629 /// In either case, the LLVM Value* in the LValue structure is guaranteed to be
630 /// an LLVM pointer type.
631 ///
632 /// If this returns a bitfield reference, nothing about the pointee type of the
633 /// LLVM value is known: For example, it may not be a pointer to an integer.
634 ///
635 /// If this returns a normal address, and if the lvalue's C type is fixed size,
636 /// this method guarantees that the returned pointer type will point to an LLVM
637 /// type of the same size of the lvalue's type. If the lvalue has a variable
638 /// length type, this is not possible.
639 ///
EmitLValue(const Expr * E)640 LValue CodeGenFunction::EmitLValue(const Expr *E) {
641 switch (E->getStmtClass()) {
642 default: return EmitUnsupportedLValue(E, "l-value expression");
643
644 case Expr::ObjCPropertyRefExprClass:
645 llvm_unreachable("cannot emit a property reference directly");
646
647 case Expr::ObjCSelectorExprClass:
648 return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
649 case Expr::ObjCIsaExprClass:
650 return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));
651 case Expr::BinaryOperatorClass:
652 return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
653 case Expr::CompoundAssignOperatorClass:
654 if (!E->getType()->isAnyComplexType())
655 return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
656 return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
657 case Expr::CallExprClass:
658 case Expr::CXXMemberCallExprClass:
659 case Expr::CXXOperatorCallExprClass:
660 case Expr::UserDefinedLiteralClass:
661 return EmitCallExprLValue(cast<CallExpr>(E));
662 case Expr::VAArgExprClass:
663 return EmitVAArgExprLValue(cast<VAArgExpr>(E));
664 case Expr::DeclRefExprClass:
665 return EmitDeclRefLValue(cast<DeclRefExpr>(E));
666 case Expr::ParenExprClass:
667 return EmitLValue(cast<ParenExpr>(E)->getSubExpr());
668 case Expr::GenericSelectionExprClass:
669 return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr());
670 case Expr::PredefinedExprClass:
671 return EmitPredefinedLValue(cast<PredefinedExpr>(E));
672 case Expr::StringLiteralClass:
673 return EmitStringLiteralLValue(cast<StringLiteral>(E));
674 case Expr::ObjCEncodeExprClass:
675 return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
676 case Expr::PseudoObjectExprClass:
677 return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E));
678 case Expr::InitListExprClass:
679 assert(cast<InitListExpr>(E)->getNumInits() == 1 &&
680 "Only single-element init list can be lvalue.");
681 return EmitLValue(cast<InitListExpr>(E)->getInit(0));
682
683 case Expr::CXXTemporaryObjectExprClass:
684 case Expr::CXXConstructExprClass:
685 return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
686 case Expr::CXXBindTemporaryExprClass:
687 return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
688 case Expr::LambdaExprClass:
689 return EmitLambdaLValue(cast<LambdaExpr>(E));
690
691 case Expr::ExprWithCleanupsClass: {
692 const ExprWithCleanups *cleanups = cast<ExprWithCleanups>(E);
693 enterFullExpression(cleanups);
694 RunCleanupsScope Scope(*this);
695 return EmitLValue(cleanups->getSubExpr());
696 }
697
698 case Expr::CXXScalarValueInitExprClass:
699 return EmitNullInitializationLValue(cast<CXXScalarValueInitExpr>(E));
700 case Expr::CXXDefaultArgExprClass:
701 return EmitLValue(cast<CXXDefaultArgExpr>(E)->getExpr());
702 case Expr::CXXTypeidExprClass:
703 return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E));
704
705 case Expr::ObjCMessageExprClass:
706 return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
707 case Expr::ObjCIvarRefExprClass:
708 return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
709 case Expr::StmtExprClass:
710 return EmitStmtExprLValue(cast<StmtExpr>(E));
711 case Expr::UnaryOperatorClass:
712 return EmitUnaryOpLValue(cast<UnaryOperator>(E));
713 case Expr::ArraySubscriptExprClass:
714 return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
715 case Expr::ExtVectorElementExprClass:
716 return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
717 case Expr::MemberExprClass:
718 return EmitMemberExpr(cast<MemberExpr>(E));
719 case Expr::CompoundLiteralExprClass:
720 return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));
721 case Expr::ConditionalOperatorClass:
722 return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E));
723 case Expr::BinaryConditionalOperatorClass:
724 return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E));
725 case Expr::ChooseExprClass:
726 return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext()));
727 case Expr::OpaqueValueExprClass:
728 return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E));
729 case Expr::SubstNonTypeTemplateParmExprClass:
730 return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement());
731 case Expr::ImplicitCastExprClass:
732 case Expr::CStyleCastExprClass:
733 case Expr::CXXFunctionalCastExprClass:
734 case Expr::CXXStaticCastExprClass:
735 case Expr::CXXDynamicCastExprClass:
736 case Expr::CXXReinterpretCastExprClass:
737 case Expr::CXXConstCastExprClass:
738 case Expr::ObjCBridgedCastExprClass:
739 return EmitCastLValue(cast<CastExpr>(E));
740
741 case Expr::MaterializeTemporaryExprClass:
742 return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E));
743 }
744 }
745
746 /// Given an object of the given canonical type, can we safely copy a
747 /// value out of it based on its initializer?
isConstantEmittableObjectType(QualType type)748 static bool isConstantEmittableObjectType(QualType type) {
749 assert(type.isCanonical());
750 assert(!type->isReferenceType());
751
752 // Must be const-qualified but non-volatile.
753 Qualifiers qs = type.getLocalQualifiers();
754 if (!qs.hasConst() || qs.hasVolatile()) return false;
755
756 // Otherwise, all object types satisfy this except C++ classes with
757 // mutable subobjects or non-trivial copy/destroy behavior.
758 if (const RecordType *RT = dyn_cast<RecordType>(type))
759 if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
760 if (RD->hasMutableFields() || !RD->isTrivial())
761 return false;
762
763 return true;
764 }
765
766 /// Can we constant-emit a load of a reference to a variable of the
767 /// given type? This is different from predicates like
768 /// Decl::isUsableInConstantExpressions because we do want it to apply
769 /// in situations that don't necessarily satisfy the language's rules
770 /// for this (e.g. C++'s ODR-use rules). For example, we want to able
771 /// to do this with const float variables even if those variables
772 /// aren't marked 'constexpr'.
773 enum ConstantEmissionKind {
774 CEK_None,
775 CEK_AsReferenceOnly,
776 CEK_AsValueOrReference,
777 CEK_AsValueOnly
778 };
checkVarTypeForConstantEmission(QualType type)779 static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) {
780 type = type.getCanonicalType();
781 if (const ReferenceType *ref = dyn_cast<ReferenceType>(type)) {
782 if (isConstantEmittableObjectType(ref->getPointeeType()))
783 return CEK_AsValueOrReference;
784 return CEK_AsReferenceOnly;
785 }
786 if (isConstantEmittableObjectType(type))
787 return CEK_AsValueOnly;
788 return CEK_None;
789 }
790
791 /// Try to emit a reference to the given value without producing it as
792 /// an l-value. This is actually more than an optimization: we can't
793 /// produce an l-value for variables that we never actually captured
794 /// in a block or lambda, which means const int variables or constexpr
795 /// literals or similar.
796 CodeGenFunction::ConstantEmission
tryEmitAsConstant(DeclRefExpr * refExpr)797 CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) {
798 ValueDecl *value = refExpr->getDecl();
799
800 // The value needs to be an enum constant or a constant variable.
801 ConstantEmissionKind CEK;
802 if (isa<ParmVarDecl>(value)) {
803 CEK = CEK_None;
804 } else if (VarDecl *var = dyn_cast<VarDecl>(value)) {
805 CEK = checkVarTypeForConstantEmission(var->getType());
806 } else if (isa<EnumConstantDecl>(value)) {
807 CEK = CEK_AsValueOnly;
808 } else {
809 CEK = CEK_None;
810 }
811 if (CEK == CEK_None) return ConstantEmission();
812
813 Expr::EvalResult result;
814 bool resultIsReference;
815 QualType resultType;
816
817 // It's best to evaluate all the way as an r-value if that's permitted.
818 if (CEK != CEK_AsReferenceOnly &&
819 refExpr->EvaluateAsRValue(result, getContext())) {
820 resultIsReference = false;
821 resultType = refExpr->getType();
822
823 // Otherwise, try to evaluate as an l-value.
824 } else if (CEK != CEK_AsValueOnly &&
825 refExpr->EvaluateAsLValue(result, getContext())) {
826 resultIsReference = true;
827 resultType = value->getType();
828
829 // Failure.
830 } else {
831 return ConstantEmission();
832 }
833
834 // In any case, if the initializer has side-effects, abandon ship.
835 if (result.HasSideEffects)
836 return ConstantEmission();
837
838 // Emit as a constant.
839 llvm::Constant *C = CGM.EmitConstantValue(result.Val, resultType, this);
840
841 // Make sure we emit a debug reference to the global variable.
842 // This should probably fire even for
843 if (isa<VarDecl>(value)) {
844 if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value)))
845 EmitDeclRefExprDbgValue(refExpr, C);
846 } else {
847 assert(isa<EnumConstantDecl>(value));
848 EmitDeclRefExprDbgValue(refExpr, C);
849 }
850
851 // If we emitted a reference constant, we need to dereference that.
852 if (resultIsReference)
853 return ConstantEmission::forReference(C);
854
855 return ConstantEmission::forValue(C);
856 }
857
EmitLoadOfScalar(LValue lvalue)858 llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue) {
859 return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
860 lvalue.getAlignment().getQuantity(),
861 lvalue.getType(), lvalue.getTBAAInfo());
862 }
863
hasBooleanRepresentation(QualType Ty)864 static bool hasBooleanRepresentation(QualType Ty) {
865 if (Ty->isBooleanType())
866 return true;
867
868 if (const EnumType *ET = Ty->getAs<EnumType>())
869 return ET->getDecl()->getIntegerType()->isBooleanType();
870
871 if (const AtomicType *AT = Ty->getAs<AtomicType>())
872 return hasBooleanRepresentation(AT->getValueType());
873
874 return false;
875 }
876
getRangeForLoadFromType(QualType Ty)877 llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
878 const EnumType *ET = Ty->getAs<EnumType>();
879 bool IsRegularCPlusPlusEnum = (getLangOpts().CPlusPlus && ET &&
880 CGM.getCodeGenOpts().StrictEnums &&
881 !ET->getDecl()->isFixed());
882 bool IsBool = hasBooleanRepresentation(Ty);
883 llvm::Type *LTy;
884 if (!IsBool && !IsRegularCPlusPlusEnum)
885 return NULL;
886
887 llvm::APInt Min;
888 llvm::APInt End;
889 if (IsBool) {
890 Min = llvm::APInt(8, 0);
891 End = llvm::APInt(8, 2);
892 LTy = Int8Ty;
893 } else {
894 const EnumDecl *ED = ET->getDecl();
895 LTy = ConvertTypeForMem(ED->getIntegerType());
896 unsigned Bitwidth = LTy->getScalarSizeInBits();
897 unsigned NumNegativeBits = ED->getNumNegativeBits();
898 unsigned NumPositiveBits = ED->getNumPositiveBits();
899
900 if (NumNegativeBits) {
901 unsigned NumBits = std::max(NumNegativeBits, NumPositiveBits + 1);
902 assert(NumBits <= Bitwidth);
903 End = llvm::APInt(Bitwidth, 1) << (NumBits - 1);
904 Min = -End;
905 } else {
906 assert(NumPositiveBits <= Bitwidth);
907 End = llvm::APInt(Bitwidth, 1) << NumPositiveBits;
908 Min = llvm::APInt(Bitwidth, 0);
909 }
910 }
911
912 llvm::MDBuilder MDHelper(getLLVMContext());
913 return MDHelper.createRange(Min, End);
914 }
915
EmitLoadOfScalar(llvm::Value * Addr,bool Volatile,unsigned Alignment,QualType Ty,llvm::MDNode * TBAAInfo)916 llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
917 unsigned Alignment, QualType Ty,
918 llvm::MDNode *TBAAInfo) {
919 llvm::LoadInst *Load = Builder.CreateLoad(Addr);
920 if (Volatile)
921 Load->setVolatile(true);
922 if (Alignment)
923 Load->setAlignment(Alignment);
924 if (TBAAInfo)
925 CGM.DecorateInstruction(Load, TBAAInfo);
926 // If this is an atomic type, all normal reads must be atomic
927 if (Ty->isAtomicType())
928 Load->setAtomic(llvm::SequentiallyConsistent);
929
930 if (CGM.getCodeGenOpts().OptimizationLevel > 0)
931 if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty))
932 Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo);
933
934 return EmitFromMemory(Load, Ty);
935 }
936
EmitToMemory(llvm::Value * Value,QualType Ty)937 llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
938 // Bool has a different representation in memory than in registers.
939 if (hasBooleanRepresentation(Ty)) {
940 // This should really always be an i1, but sometimes it's already
941 // an i8, and it's awkward to track those cases down.
942 if (Value->getType()->isIntegerTy(1))
943 return Builder.CreateZExt(Value, Builder.getInt8Ty(), "frombool");
944 assert(Value->getType()->isIntegerTy(8) && "value rep of bool not i1/i8");
945 }
946
947 return Value;
948 }
949
EmitFromMemory(llvm::Value * Value,QualType Ty)950 llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
951 // Bool has a different representation in memory than in registers.
952 if (hasBooleanRepresentation(Ty)) {
953 assert(Value->getType()->isIntegerTy(8) && "memory rep of bool not i8");
954 return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool");
955 }
956
957 return Value;
958 }
959
EmitStoreOfScalar(llvm::Value * Value,llvm::Value * Addr,bool Volatile,unsigned Alignment,QualType Ty,llvm::MDNode * TBAAInfo,bool isInit)960 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
961 bool Volatile, unsigned Alignment,
962 QualType Ty,
963 llvm::MDNode *TBAAInfo,
964 bool isInit) {
965 Value = EmitToMemory(Value, Ty);
966
967 llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
968 if (Alignment)
969 Store->setAlignment(Alignment);
970 if (TBAAInfo)
971 CGM.DecorateInstruction(Store, TBAAInfo);
972 if (!isInit && Ty->isAtomicType())
973 Store->setAtomic(llvm::SequentiallyConsistent);
974 }
975
EmitStoreOfScalar(llvm::Value * value,LValue lvalue,bool isInit)976 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
977 bool isInit) {
978 EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
979 lvalue.getAlignment().getQuantity(), lvalue.getType(),
980 lvalue.getTBAAInfo(), isInit);
981 }
982
983 /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
984 /// method emits the address of the lvalue, then loads the result as an rvalue,
985 /// returning the rvalue.
EmitLoadOfLValue(LValue LV)986 RValue CodeGenFunction::EmitLoadOfLValue(LValue LV) {
987 if (LV.isObjCWeak()) {
988 // load of a __weak object.
989 llvm::Value *AddrWeakObj = LV.getAddress();
990 return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this,
991 AddrWeakObj));
992 }
993 if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak)
994 return RValue::get(EmitARCLoadWeak(LV.getAddress()));
995
996 if (LV.isSimple()) {
997 assert(!LV.getType()->isFunctionType());
998
999 // Everything needs a load.
1000 return RValue::get(EmitLoadOfScalar(LV));
1001 }
1002
1003 if (LV.isVectorElt()) {
1004 llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddr(),
1005 LV.isVolatileQualified());
1006 Load->setAlignment(LV.getAlignment().getQuantity());
1007 return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(),
1008 "vecext"));
1009 }
1010
1011 // If this is a reference to a subset of the elements of a vector, either
1012 // shuffle the input or extract/insert them as appropriate.
1013 if (LV.isExtVectorElt())
1014 return EmitLoadOfExtVectorElementLValue(LV);
1015
1016 assert(LV.isBitField() && "Unknown LValue type!");
1017 return EmitLoadOfBitfieldLValue(LV);
1018 }
1019
EmitLoadOfBitfieldLValue(LValue LV)1020 RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
1021 const CGBitFieldInfo &Info = LV.getBitFieldInfo();
1022
1023 // Get the output type.
1024 llvm::Type *ResLTy = ConvertType(LV.getType());
1025 unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy);
1026
1027 // Compute the result as an OR of all of the individual component accesses.
1028 llvm::Value *Res = 0;
1029 for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
1030 const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
1031
1032 // Get the field pointer.
1033 llvm::Value *Ptr = LV.getBitFieldBaseAddr();
1034
1035 // Only offset by the field index if used, so that incoming values are not
1036 // required to be structures.
1037 if (AI.FieldIndex)
1038 Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field");
1039
1040 // Offset by the byte offset, if used.
1041 if (!AI.FieldByteOffset.isZero()) {
1042 Ptr = EmitCastToVoidPtr(Ptr);
1043 Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset.getQuantity(),
1044 "bf.field.offs");
1045 }
1046
1047 // Cast to the access type.
1048 llvm::Type *PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), AI.AccessWidth,
1049 CGM.getContext().getTargetAddressSpace(LV.getType()));
1050 Ptr = Builder.CreateBitCast(Ptr, PTy);
1051
1052 // Perform the load.
1053 llvm::LoadInst *Load = Builder.CreateLoad(Ptr, LV.isVolatileQualified());
1054 if (!AI.AccessAlignment.isZero())
1055 Load->setAlignment(AI.AccessAlignment.getQuantity());
1056
1057 // Shift out unused low bits and mask out unused high bits.
1058 llvm::Value *Val = Load;
1059 if (AI.FieldBitStart)
1060 Val = Builder.CreateLShr(Load, AI.FieldBitStart);
1061 Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(AI.AccessWidth,
1062 AI.TargetBitWidth),
1063 "bf.clear");
1064
1065 // Extend or truncate to the target size.
1066 if (AI.AccessWidth < ResSizeInBits)
1067 Val = Builder.CreateZExt(Val, ResLTy);
1068 else if (AI.AccessWidth > ResSizeInBits)
1069 Val = Builder.CreateTrunc(Val, ResLTy);
1070
1071 // Shift into place, and OR into the result.
1072 if (AI.TargetBitOffset)
1073 Val = Builder.CreateShl(Val, AI.TargetBitOffset);
1074 Res = Res ? Builder.CreateOr(Res, Val) : Val;
1075 }
1076
1077 // If the bit-field is signed, perform the sign-extension.
1078 //
1079 // FIXME: This can easily be folded into the load of the high bits, which
1080 // could also eliminate the mask of high bits in some situations.
1081 if (Info.isSigned()) {
1082 unsigned ExtraBits = ResSizeInBits - Info.getSize();
1083 if (ExtraBits)
1084 Res = Builder.CreateAShr(Builder.CreateShl(Res, ExtraBits),
1085 ExtraBits, "bf.val.sext");
1086 }
1087
1088 return RValue::get(Res);
1089 }
1090
1091 // If this is a reference to a subset of the elements of a vector, create an
1092 // appropriate shufflevector.
EmitLoadOfExtVectorElementLValue(LValue LV)1093 RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
1094 llvm::LoadInst *Load = Builder.CreateLoad(LV.getExtVectorAddr(),
1095 LV.isVolatileQualified());
1096 Load->setAlignment(LV.getAlignment().getQuantity());
1097 llvm::Value *Vec = Load;
1098
1099 const llvm::Constant *Elts = LV.getExtVectorElts();
1100
1101 // If the result of the expression is a non-vector type, we must be extracting
1102 // a single element. Just codegen as an extractelement.
1103 const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
1104 if (!ExprVT) {
1105 unsigned InIdx = getAccessedFieldNo(0, Elts);
1106 llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx);
1107 return RValue::get(Builder.CreateExtractElement(Vec, Elt));
1108 }
1109
1110 // Always use shuffle vector to try to retain the original program structure
1111 unsigned NumResultElts = ExprVT->getNumElements();
1112
1113 SmallVector<llvm::Constant*, 4> Mask;
1114 for (unsigned i = 0; i != NumResultElts; ++i)
1115 Mask.push_back(Builder.getInt32(getAccessedFieldNo(i, Elts)));
1116
1117 llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
1118 Vec = Builder.CreateShuffleVector(Vec, llvm::UndefValue::get(Vec->getType()),
1119 MaskV);
1120 return RValue::get(Vec);
1121 }
1122
1123
1124
1125 /// EmitStoreThroughLValue - Store the specified rvalue into the specified
1126 /// lvalue, where both are guaranteed to the have the same type, and that type
1127 /// is 'Ty'.
EmitStoreThroughLValue(RValue Src,LValue Dst,bool isInit)1128 void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit) {
1129 if (!Dst.isSimple()) {
1130 if (Dst.isVectorElt()) {
1131 // Read/modify/write the vector, inserting the new element.
1132 llvm::LoadInst *Load = Builder.CreateLoad(Dst.getVectorAddr(),
1133 Dst.isVolatileQualified());
1134 Load->setAlignment(Dst.getAlignment().getQuantity());
1135 llvm::Value *Vec = Load;
1136 Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
1137 Dst.getVectorIdx(), "vecins");
1138 llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getVectorAddr(),
1139 Dst.isVolatileQualified());
1140 Store->setAlignment(Dst.getAlignment().getQuantity());
1141 return;
1142 }
1143
1144 // If this is an update of extended vector elements, insert them as
1145 // appropriate.
1146 if (Dst.isExtVectorElt())
1147 return EmitStoreThroughExtVectorComponentLValue(Src, Dst);
1148
1149 assert(Dst.isBitField() && "Unknown LValue type");
1150 return EmitStoreThroughBitfieldLValue(Src, Dst);
1151 }
1152
1153 // There's special magic for assigning into an ARC-qualified l-value.
1154 if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) {
1155 switch (Lifetime) {
1156 case Qualifiers::OCL_None:
1157 llvm_unreachable("present but none");
1158
1159 case Qualifiers::OCL_ExplicitNone:
1160 // nothing special
1161 break;
1162
1163 case Qualifiers::OCL_Strong:
1164 EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true);
1165 return;
1166
1167 case Qualifiers::OCL_Weak:
1168 EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(), /*ignore*/ true);
1169 return;
1170
1171 case Qualifiers::OCL_Autoreleasing:
1172 Src = RValue::get(EmitObjCExtendObjectLifetime(Dst.getType(),
1173 Src.getScalarVal()));
1174 // fall into the normal path
1175 break;
1176 }
1177 }
1178
1179 if (Dst.isObjCWeak() && !Dst.isNonGC()) {
1180 // load of a __weak object.
1181 llvm::Value *LvalueDst = Dst.getAddress();
1182 llvm::Value *src = Src.getScalarVal();
1183 CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
1184 return;
1185 }
1186
1187 if (Dst.isObjCStrong() && !Dst.isNonGC()) {
1188 // load of a __strong object.
1189 llvm::Value *LvalueDst = Dst.getAddress();
1190 llvm::Value *src = Src.getScalarVal();
1191 if (Dst.isObjCIvar()) {
1192 assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
1193 llvm::Type *ResultType = ConvertType(getContext().LongTy);
1194 llvm::Value *RHS = EmitScalarExpr(Dst.getBaseIvarExp());
1195 llvm::Value *dst = RHS;
1196 RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
1197 llvm::Value *LHS =
1198 Builder.CreatePtrToInt(LvalueDst, ResultType, "sub.ptr.lhs.cast");
1199 llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
1200 CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst,
1201 BytesBetween);
1202 } else if (Dst.isGlobalObjCRef()) {
1203 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,
1204 Dst.isThreadLocalRef());
1205 }
1206 else
1207 CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
1208 return;
1209 }
1210
1211 assert(Src.isScalar() && "Can't emit an agg store with this method");
1212 EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit);
1213 }
1214
EmitStoreThroughBitfieldLValue(RValue Src,LValue Dst,llvm::Value ** Result)1215 void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
1216 llvm::Value **Result) {
1217 const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
1218
1219 // Get the output type.
1220 llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType());
1221 unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy);
1222
1223 // Get the source value, truncated to the width of the bit-field.
1224 llvm::Value *SrcVal = Src.getScalarVal();
1225
1226 if (hasBooleanRepresentation(Dst.getType()))
1227 SrcVal = Builder.CreateIntCast(SrcVal, ResLTy, /*IsSigned=*/false);
1228
1229 SrcVal = Builder.CreateAnd(SrcVal, llvm::APInt::getLowBitsSet(ResSizeInBits,
1230 Info.getSize()),
1231 "bf.value");
1232
1233 // Return the new value of the bit-field, if requested.
1234 if (Result) {
1235 // Cast back to the proper type for result.
1236 llvm::Type *SrcTy = Src.getScalarVal()->getType();
1237 llvm::Value *ReloadVal = Builder.CreateIntCast(SrcVal, SrcTy, false,
1238 "bf.reload.val");
1239
1240 // Sign extend if necessary.
1241 if (Info.isSigned()) {
1242 unsigned ExtraBits = ResSizeInBits - Info.getSize();
1243 if (ExtraBits)
1244 ReloadVal = Builder.CreateAShr(Builder.CreateShl(ReloadVal, ExtraBits),
1245 ExtraBits, "bf.reload.sext");
1246 }
1247
1248 *Result = ReloadVal;
1249 }
1250
1251 // Iterate over the components, writing each piece to memory.
1252 for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
1253 const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
1254
1255 // Get the field pointer.
1256 llvm::Value *Ptr = Dst.getBitFieldBaseAddr();
1257 unsigned addressSpace =
1258 cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
1259
1260 // Only offset by the field index if used, so that incoming values are not
1261 // required to be structures.
1262 if (AI.FieldIndex)
1263 Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field");
1264
1265 // Offset by the byte offset, if used.
1266 if (!AI.FieldByteOffset.isZero()) {
1267 Ptr = EmitCastToVoidPtr(Ptr);
1268 Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset.getQuantity(),
1269 "bf.field.offs");
1270 }
1271
1272 // Cast to the access type.
1273 llvm::Type *AccessLTy =
1274 llvm::Type::getIntNTy(getLLVMContext(), AI.AccessWidth);
1275
1276 llvm::Type *PTy = AccessLTy->getPointerTo(addressSpace);
1277 Ptr = Builder.CreateBitCast(Ptr, PTy);
1278
1279 // Extract the piece of the bit-field value to write in this access, limited
1280 // to the values that are part of this access.
1281 llvm::Value *Val = SrcVal;
1282 if (AI.TargetBitOffset)
1283 Val = Builder.CreateLShr(Val, AI.TargetBitOffset);
1284 Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(ResSizeInBits,
1285 AI.TargetBitWidth));
1286
1287 // Extend or truncate to the access size.
1288 if (ResSizeInBits < AI.AccessWidth)
1289 Val = Builder.CreateZExt(Val, AccessLTy);
1290 else if (ResSizeInBits > AI.AccessWidth)
1291 Val = Builder.CreateTrunc(Val, AccessLTy);
1292
1293 // Shift into the position in memory.
1294 if (AI.FieldBitStart)
1295 Val = Builder.CreateShl(Val, AI.FieldBitStart);
1296
1297 // If necessary, load and OR in bits that are outside of the bit-field.
1298 if (AI.TargetBitWidth != AI.AccessWidth) {
1299 llvm::LoadInst *Load = Builder.CreateLoad(Ptr, Dst.isVolatileQualified());
1300 if (!AI.AccessAlignment.isZero())
1301 Load->setAlignment(AI.AccessAlignment.getQuantity());
1302
1303 // Compute the mask for zeroing the bits that are part of the bit-field.
1304 llvm::APInt InvMask =
1305 ~llvm::APInt::getBitsSet(AI.AccessWidth, AI.FieldBitStart,
1306 AI.FieldBitStart + AI.TargetBitWidth);
1307
1308 // Apply the mask and OR in to the value to write.
1309 Val = Builder.CreateOr(Builder.CreateAnd(Load, InvMask), Val);
1310 }
1311
1312 // Write the value.
1313 llvm::StoreInst *Store = Builder.CreateStore(Val, Ptr,
1314 Dst.isVolatileQualified());
1315 if (!AI.AccessAlignment.isZero())
1316 Store->setAlignment(AI.AccessAlignment.getQuantity());
1317 }
1318 }
1319
EmitStoreThroughExtVectorComponentLValue(RValue Src,LValue Dst)1320 void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
1321 LValue Dst) {
1322 // This access turns into a read/modify/write of the vector. Load the input
1323 // value now.
1324 llvm::LoadInst *Load = Builder.CreateLoad(Dst.getExtVectorAddr(),
1325 Dst.isVolatileQualified());
1326 Load->setAlignment(Dst.getAlignment().getQuantity());
1327 llvm::Value *Vec = Load;
1328 const llvm::Constant *Elts = Dst.getExtVectorElts();
1329
1330 llvm::Value *SrcVal = Src.getScalarVal();
1331
1332 if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
1333 unsigned NumSrcElts = VTy->getNumElements();
1334 unsigned NumDstElts =
1335 cast<llvm::VectorType>(Vec->getType())->getNumElements();
1336 if (NumDstElts == NumSrcElts) {
1337 // Use shuffle vector is the src and destination are the same number of
1338 // elements and restore the vector mask since it is on the side it will be
1339 // stored.
1340 SmallVector<llvm::Constant*, 4> Mask(NumDstElts);
1341 for (unsigned i = 0; i != NumSrcElts; ++i)
1342 Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i);
1343
1344 llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
1345 Vec = Builder.CreateShuffleVector(SrcVal,
1346 llvm::UndefValue::get(Vec->getType()),
1347 MaskV);
1348 } else if (NumDstElts > NumSrcElts) {
1349 // Extended the source vector to the same length and then shuffle it
1350 // into the destination.
1351 // FIXME: since we're shuffling with undef, can we just use the indices
1352 // into that? This could be simpler.
1353 SmallVector<llvm::Constant*, 4> ExtMask;
1354 for (unsigned i = 0; i != NumSrcElts; ++i)
1355 ExtMask.push_back(Builder.getInt32(i));
1356 ExtMask.resize(NumDstElts, llvm::UndefValue::get(Int32Ty));
1357 llvm::Value *ExtMaskV = llvm::ConstantVector::get(ExtMask);
1358 llvm::Value *ExtSrcVal =
1359 Builder.CreateShuffleVector(SrcVal,
1360 llvm::UndefValue::get(SrcVal->getType()),
1361 ExtMaskV);
1362 // build identity
1363 SmallVector<llvm::Constant*, 4> Mask;
1364 for (unsigned i = 0; i != NumDstElts; ++i)
1365 Mask.push_back(Builder.getInt32(i));
1366
1367 // modify when what gets shuffled in
1368 for (unsigned i = 0; i != NumSrcElts; ++i)
1369 Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i+NumDstElts);
1370 llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
1371 Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV);
1372 } else {
1373 // We should never shorten the vector
1374 llvm_unreachable("unexpected shorten vector length");
1375 }
1376 } else {
1377 // If the Src is a scalar (not a vector) it must be updating one element.
1378 unsigned InIdx = getAccessedFieldNo(0, Elts);
1379 llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx);
1380 Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
1381 }
1382
1383 llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getExtVectorAddr(),
1384 Dst.isVolatileQualified());
1385 Store->setAlignment(Dst.getAlignment().getQuantity());
1386 }
1387
1388 // setObjCGCLValueClass - sets class of he lvalue for the purpose of
1389 // generating write-barries API. It is currently a global, ivar,
1390 // or neither.
setObjCGCLValueClass(const ASTContext & Ctx,const Expr * E,LValue & LV,bool IsMemberAccess=false)1391 static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
1392 LValue &LV,
1393 bool IsMemberAccess=false) {
1394 if (Ctx.getLangOpts().getGC() == LangOptions::NonGC)
1395 return;
1396
1397 if (isa<ObjCIvarRefExpr>(E)) {
1398 QualType ExpTy = E->getType();
1399 if (IsMemberAccess && ExpTy->isPointerType()) {
1400 // If ivar is a structure pointer, assigning to field of
1401 // this struct follows gcc's behavior and makes it a non-ivar
1402 // writer-barrier conservatively.
1403 ExpTy = ExpTy->getAs<PointerType>()->getPointeeType();
1404 if (ExpTy->isRecordType()) {
1405 LV.setObjCIvar(false);
1406 return;
1407 }
1408 }
1409 LV.setObjCIvar(true);
1410 ObjCIvarRefExpr *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr*>(E));
1411 LV.setBaseIvarExp(Exp->getBase());
1412 LV.setObjCArray(E->getType()->isArrayType());
1413 return;
1414 }
1415
1416 if (const DeclRefExpr *Exp = dyn_cast<DeclRefExpr>(E)) {
1417 if (const VarDecl *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
1418 if (VD->hasGlobalStorage()) {
1419 LV.setGlobalObjCRef(true);
1420 LV.setThreadLocalRef(VD->isThreadSpecified());
1421 }
1422 }
1423 LV.setObjCArray(E->getType()->isArrayType());
1424 return;
1425 }
1426
1427 if (const UnaryOperator *Exp = dyn_cast<UnaryOperator>(E)) {
1428 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1429 return;
1430 }
1431
1432 if (const ParenExpr *Exp = dyn_cast<ParenExpr>(E)) {
1433 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1434 if (LV.isObjCIvar()) {
1435 // If cast is to a structure pointer, follow gcc's behavior and make it
1436 // a non-ivar write-barrier.
1437 QualType ExpTy = E->getType();
1438 if (ExpTy->isPointerType())
1439 ExpTy = ExpTy->getAs<PointerType>()->getPointeeType();
1440 if (ExpTy->isRecordType())
1441 LV.setObjCIvar(false);
1442 }
1443 return;
1444 }
1445
1446 if (const GenericSelectionExpr *Exp = dyn_cast<GenericSelectionExpr>(E)) {
1447 setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV);
1448 return;
1449 }
1450
1451 if (const ImplicitCastExpr *Exp = dyn_cast<ImplicitCastExpr>(E)) {
1452 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1453 return;
1454 }
1455
1456 if (const CStyleCastExpr *Exp = dyn_cast<CStyleCastExpr>(E)) {
1457 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1458 return;
1459 }
1460
1461 if (const ObjCBridgedCastExpr *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) {
1462 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1463 return;
1464 }
1465
1466 if (const ArraySubscriptExpr *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
1467 setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
1468 if (LV.isObjCIvar() && !LV.isObjCArray())
1469 // Using array syntax to assigning to what an ivar points to is not
1470 // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
1471 LV.setObjCIvar(false);
1472 else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
1473 // Using array syntax to assigning to what global points to is not
1474 // same as assigning to the global itself. {id *G;} G[i] = 0;
1475 LV.setGlobalObjCRef(false);
1476 return;
1477 }
1478
1479 if (const MemberExpr *Exp = dyn_cast<MemberExpr>(E)) {
1480 setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true);
1481 // We don't know if member is an 'ivar', but this flag is looked at
1482 // only in the context of LV.isObjCIvar().
1483 LV.setObjCArray(E->getType()->isArrayType());
1484 return;
1485 }
1486 }
1487
1488 static llvm::Value *
EmitBitCastOfLValueToProperType(CodeGenFunction & CGF,llvm::Value * V,llvm::Type * IRType,StringRef Name=StringRef ())1489 EmitBitCastOfLValueToProperType(CodeGenFunction &CGF,
1490 llvm::Value *V, llvm::Type *IRType,
1491 StringRef Name = StringRef()) {
1492 unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace();
1493 return CGF.Builder.CreateBitCast(V, IRType->getPointerTo(AS), Name);
1494 }
1495
EmitGlobalVarDeclLValue(CodeGenFunction & CGF,const Expr * E,const VarDecl * VD)1496 static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
1497 const Expr *E, const VarDecl *VD) {
1498 assert((VD->hasExternalStorage() || VD->isFileVarDecl()) &&
1499 "Var decl must have external storage or be a file var decl!");
1500
1501 llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
1502 llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
1503 V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy);
1504 CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
1505 QualType T = E->getType();
1506 LValue LV;
1507 if (VD->getType()->isReferenceType()) {
1508 llvm::LoadInst *LI = CGF.Builder.CreateLoad(V);
1509 LI->setAlignment(Alignment.getQuantity());
1510 V = LI;
1511 LV = CGF.MakeNaturalAlignAddrLValue(V, T);
1512 } else {
1513 LV = CGF.MakeAddrLValue(V, E->getType(), Alignment);
1514 }
1515 setObjCGCLValueClass(CGF.getContext(), E, LV);
1516 return LV;
1517 }
1518
EmitFunctionDeclLValue(CodeGenFunction & CGF,const Expr * E,const FunctionDecl * FD)1519 static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF,
1520 const Expr *E, const FunctionDecl *FD) {
1521 llvm::Value *V = CGF.CGM.GetAddrOfFunction(FD);
1522 if (!FD->hasPrototype()) {
1523 if (const FunctionProtoType *Proto =
1524 FD->getType()->getAs<FunctionProtoType>()) {
1525 // Ugly case: for a K&R-style definition, the type of the definition
1526 // isn't the same as the type of a use. Correct for this with a
1527 // bitcast.
1528 QualType NoProtoType =
1529 CGF.getContext().getFunctionNoProtoType(Proto->getResultType());
1530 NoProtoType = CGF.getContext().getPointerType(NoProtoType);
1531 V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType));
1532 }
1533 }
1534 CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
1535 return CGF.MakeAddrLValue(V, E->getType(), Alignment);
1536 }
1537
EmitDeclRefLValue(const DeclRefExpr * E)1538 LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
1539 const NamedDecl *ND = E->getDecl();
1540 CharUnits Alignment = getContext().getDeclAlign(ND);
1541 QualType T = E->getType();
1542
1543 // FIXME: We should be able to assert this for FunctionDecls as well!
1544 // FIXME: We should be able to assert this for all DeclRefExprs, not just
1545 // those with a valid source location.
1546 assert((ND->isUsed(false) || !isa<VarDecl>(ND) ||
1547 !E->getLocation().isValid()) &&
1548 "Should not use decl without marking it used!");
1549
1550 if (ND->hasAttr<WeakRefAttr>()) {
1551 const ValueDecl *VD = cast<ValueDecl>(ND);
1552 llvm::Constant *Aliasee = CGM.GetWeakRefReference(VD);
1553 return MakeAddrLValue(Aliasee, E->getType(), Alignment);
1554 }
1555
1556 if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
1557 // Check if this is a global variable.
1558 if (VD->hasExternalStorage() || VD->isFileVarDecl())
1559 return EmitGlobalVarDeclLValue(*this, E, VD);
1560
1561 bool isBlockVariable = VD->hasAttr<BlocksAttr>();
1562
1563 bool NonGCable = VD->hasLocalStorage() &&
1564 !VD->getType()->isReferenceType() &&
1565 !isBlockVariable;
1566
1567 llvm::Value *V = LocalDeclMap[VD];
1568 if (!V && VD->isStaticLocal())
1569 V = CGM.getStaticLocalDeclAddress(VD);
1570
1571 // Use special handling for lambdas.
1572 if (!V) {
1573 if (FieldDecl *FD = LambdaCaptureFields.lookup(VD)) {
1574 QualType LambdaTagType = getContext().getTagDeclType(FD->getParent());
1575 LValue LambdaLV = MakeNaturalAlignAddrLValue(CXXABIThisValue,
1576 LambdaTagType);
1577 return EmitLValueForField(LambdaLV, FD);
1578 }
1579
1580 assert(isa<BlockDecl>(CurCodeDecl) && E->refersToEnclosingLocal());
1581 CharUnits alignment = getContext().getDeclAlign(VD);
1582 return MakeAddrLValue(GetAddrOfBlockDecl(VD, isBlockVariable),
1583 E->getType(), alignment);
1584 }
1585
1586 assert(V && "DeclRefExpr not entered in LocalDeclMap?");
1587
1588 if (isBlockVariable)
1589 V = BuildBlockByrefAddress(V, VD);
1590
1591 LValue LV;
1592 if (VD->getType()->isReferenceType()) {
1593 llvm::LoadInst *LI = Builder.CreateLoad(V);
1594 LI->setAlignment(Alignment.getQuantity());
1595 V = LI;
1596 LV = MakeNaturalAlignAddrLValue(V, T);
1597 } else {
1598 LV = MakeAddrLValue(V, T, Alignment);
1599 }
1600
1601 if (NonGCable) {
1602 LV.getQuals().removeObjCGCAttr();
1603 LV.setNonGC(true);
1604 }
1605 setObjCGCLValueClass(getContext(), E, LV);
1606 return LV;
1607 }
1608
1609 if (const FunctionDecl *fn = dyn_cast<FunctionDecl>(ND))
1610 return EmitFunctionDeclLValue(*this, E, fn);
1611
1612 llvm_unreachable("Unhandled DeclRefExpr");
1613 }
1614
EmitUnaryOpLValue(const UnaryOperator * E)1615 LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
1616 // __extension__ doesn't affect lvalue-ness.
1617 if (E->getOpcode() == UO_Extension)
1618 return EmitLValue(E->getSubExpr());
1619
1620 QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType());
1621 switch (E->getOpcode()) {
1622 default: llvm_unreachable("Unknown unary operator lvalue!");
1623 case UO_Deref: {
1624 QualType T = E->getSubExpr()->getType()->getPointeeType();
1625 assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
1626
1627 LValue LV = MakeNaturalAlignAddrLValue(EmitScalarExpr(E->getSubExpr()), T);
1628 LV.getQuals().setAddressSpace(ExprTy.getAddressSpace());
1629
1630 // We should not generate __weak write barrier on indirect reference
1631 // of a pointer to object; as in void foo (__weak id *param); *param = 0;
1632 // But, we continue to generate __strong write barrier on indirect write
1633 // into a pointer to object.
1634 if (getContext().getLangOpts().ObjC1 &&
1635 getContext().getLangOpts().getGC() != LangOptions::NonGC &&
1636 LV.isObjCWeak())
1637 LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
1638 return LV;
1639 }
1640 case UO_Real:
1641 case UO_Imag: {
1642 LValue LV = EmitLValue(E->getSubExpr());
1643 assert(LV.isSimple() && "real/imag on non-ordinary l-value");
1644 llvm::Value *Addr = LV.getAddress();
1645
1646 // __real is valid on scalars. This is a faster way of testing that.
1647 // __imag can only produce an rvalue on scalars.
1648 if (E->getOpcode() == UO_Real &&
1649 !cast<llvm::PointerType>(Addr->getType())
1650 ->getElementType()->isStructTy()) {
1651 assert(E->getSubExpr()->getType()->isArithmeticType());
1652 return LV;
1653 }
1654
1655 assert(E->getSubExpr()->getType()->isAnyComplexType());
1656
1657 unsigned Idx = E->getOpcode() == UO_Imag;
1658 return MakeAddrLValue(Builder.CreateStructGEP(LV.getAddress(),
1659 Idx, "idx"),
1660 ExprTy);
1661 }
1662 case UO_PreInc:
1663 case UO_PreDec: {
1664 LValue LV = EmitLValue(E->getSubExpr());
1665 bool isInc = E->getOpcode() == UO_PreInc;
1666
1667 if (E->getType()->isAnyComplexType())
1668 EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
1669 else
1670 EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);
1671 return LV;
1672 }
1673 }
1674 }
1675
EmitStringLiteralLValue(const StringLiteral * E)1676 LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) {
1677 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E),
1678 E->getType());
1679 }
1680
EmitObjCEncodeExprLValue(const ObjCEncodeExpr * E)1681 LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) {
1682 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E),
1683 E->getType());
1684 }
1685
1686
EmitPredefinedLValue(const PredefinedExpr * E)1687 LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
1688 switch (E->getIdentType()) {
1689 default:
1690 return EmitUnsupportedLValue(E, "predefined expression");
1691
1692 case PredefinedExpr::Func:
1693 case PredefinedExpr::Function:
1694 case PredefinedExpr::PrettyFunction: {
1695 unsigned Type = E->getIdentType();
1696 std::string GlobalVarName;
1697
1698 switch (Type) {
1699 default: llvm_unreachable("Invalid type");
1700 case PredefinedExpr::Func:
1701 GlobalVarName = "__func__.";
1702 break;
1703 case PredefinedExpr::Function:
1704 GlobalVarName = "__FUNCTION__.";
1705 break;
1706 case PredefinedExpr::PrettyFunction:
1707 GlobalVarName = "__PRETTY_FUNCTION__.";
1708 break;
1709 }
1710
1711 StringRef FnName = CurFn->getName();
1712 if (FnName.startswith("\01"))
1713 FnName = FnName.substr(1);
1714 GlobalVarName += FnName;
1715
1716 const Decl *CurDecl = CurCodeDecl;
1717 if (CurDecl == 0)
1718 CurDecl = getContext().getTranslationUnitDecl();
1719
1720 std::string FunctionName =
1721 (isa<BlockDecl>(CurDecl)
1722 ? FnName.str()
1723 : PredefinedExpr::ComputeName((PredefinedExpr::IdentType)Type, CurDecl));
1724
1725 llvm::Constant *C =
1726 CGM.GetAddrOfConstantCString(FunctionName, GlobalVarName.c_str());
1727 return MakeAddrLValue(C, E->getType());
1728 }
1729 }
1730 }
1731
getTrapBB()1732 llvm::BasicBlock *CodeGenFunction::getTrapBB() {
1733 const CodeGenOptions &GCO = CGM.getCodeGenOpts();
1734
1735 // If we are not optimzing, don't collapse all calls to trap in the function
1736 // to the same call, that way, in the debugger they can see which operation
1737 // did in fact fail. If we are optimizing, we collapse all calls to trap down
1738 // to just one per function to save on codesize.
1739 if (GCO.OptimizationLevel && TrapBB)
1740 return TrapBB;
1741
1742 llvm::BasicBlock *Cont = 0;
1743 if (HaveInsertPoint()) {
1744 Cont = createBasicBlock("cont");
1745 EmitBranch(Cont);
1746 }
1747 TrapBB = createBasicBlock("trap");
1748 EmitBlock(TrapBB);
1749
1750 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::trap);
1751 llvm::CallInst *TrapCall = Builder.CreateCall(F);
1752 TrapCall->setDoesNotReturn();
1753 TrapCall->setDoesNotThrow();
1754 Builder.CreateUnreachable();
1755
1756 if (Cont)
1757 EmitBlock(Cont);
1758 return TrapBB;
1759 }
1760
1761 /// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
1762 /// array to pointer, return the array subexpression.
isSimpleArrayDecayOperand(const Expr * E)1763 static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
1764 // If this isn't just an array->pointer decay, bail out.
1765 const CastExpr *CE = dyn_cast<CastExpr>(E);
1766 if (CE == 0 || CE->getCastKind() != CK_ArrayToPointerDecay)
1767 return 0;
1768
1769 // If this is a decay from variable width array, bail out.
1770 const Expr *SubExpr = CE->getSubExpr();
1771 if (SubExpr->getType()->isVariableArrayType())
1772 return 0;
1773
1774 return SubExpr;
1775 }
1776
EmitArraySubscriptExpr(const ArraySubscriptExpr * E)1777 LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
1778 // The index must always be an integer, which is not an aggregate. Emit it.
1779 llvm::Value *Idx = EmitScalarExpr(E->getIdx());
1780 QualType IdxTy = E->getIdx()->getType();
1781 bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
1782
1783 // If the base is a vector type, then we are forming a vector element lvalue
1784 // with this subscript.
1785 if (E->getBase()->getType()->isVectorType()) {
1786 // Emit the vector as an lvalue to get its address.
1787 LValue LHS = EmitLValue(E->getBase());
1788 assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
1789 Idx = Builder.CreateIntCast(Idx, Int32Ty, IdxSigned, "vidx");
1790 return LValue::MakeVectorElt(LHS.getAddress(), Idx,
1791 E->getBase()->getType(), LHS.getAlignment());
1792 }
1793
1794 // Extend or truncate the index type to 32 or 64-bits.
1795 if (Idx->getType() != IntPtrTy)
1796 Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
1797
1798 // FIXME: As llvm implements the object size checking, this can come out.
1799 if (CatchUndefined) {
1800 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E->getBase())){
1801 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr())) {
1802 if (ICE->getCastKind() == CK_ArrayToPointerDecay) {
1803 if (const ConstantArrayType *CAT
1804 = getContext().getAsConstantArrayType(DRE->getType())) {
1805 llvm::APInt Size = CAT->getSize();
1806 llvm::BasicBlock *Cont = createBasicBlock("cont");
1807 Builder.CreateCondBr(Builder.CreateICmpULE(Idx,
1808 llvm::ConstantInt::get(Idx->getType(), Size)),
1809 Cont, getTrapBB());
1810 EmitBlock(Cont);
1811 }
1812 }
1813 }
1814 }
1815 }
1816
1817 // We know that the pointer points to a type of the correct size, unless the
1818 // size is a VLA or Objective-C interface.
1819 llvm::Value *Address = 0;
1820 CharUnits ArrayAlignment;
1821 if (const VariableArrayType *vla =
1822 getContext().getAsVariableArrayType(E->getType())) {
1823 // The base must be a pointer, which is not an aggregate. Emit
1824 // it. It needs to be emitted first in case it's what captures
1825 // the VLA bounds.
1826 Address = EmitScalarExpr(E->getBase());
1827
1828 // The element count here is the total number of non-VLA elements.
1829 llvm::Value *numElements = getVLASize(vla).first;
1830
1831 // Effectively, the multiply by the VLA size is part of the GEP.
1832 // GEP indexes are signed, and scaling an index isn't permitted to
1833 // signed-overflow, so we use the same semantics for our explicit
1834 // multiply. We suppress this if overflow is not undefined behavior.
1835 if (getLangOpts().isSignedOverflowDefined()) {
1836 Idx = Builder.CreateMul(Idx, numElements);
1837 Address = Builder.CreateGEP(Address, Idx, "arrayidx");
1838 } else {
1839 Idx = Builder.CreateNSWMul(Idx, numElements);
1840 Address = Builder.CreateInBoundsGEP(Address, Idx, "arrayidx");
1841 }
1842 } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
1843 // Indexing over an interface, as in "NSString *P; P[4];"
1844 llvm::Value *InterfaceSize =
1845 llvm::ConstantInt::get(Idx->getType(),
1846 getContext().getTypeSizeInChars(OIT).getQuantity());
1847
1848 Idx = Builder.CreateMul(Idx, InterfaceSize);
1849
1850 // The base must be a pointer, which is not an aggregate. Emit it.
1851 llvm::Value *Base = EmitScalarExpr(E->getBase());
1852 Address = EmitCastToVoidPtr(Base);
1853 Address = Builder.CreateGEP(Address, Idx, "arrayidx");
1854 Address = Builder.CreateBitCast(Address, Base->getType());
1855 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
1856 // If this is A[i] where A is an array, the frontend will have decayed the
1857 // base to be a ArrayToPointerDecay implicit cast. While correct, it is
1858 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
1859 // "gep x, i" here. Emit one "gep A, 0, i".
1860 assert(Array->getType()->isArrayType() &&
1861 "Array to pointer decay must have array source type!");
1862 LValue ArrayLV = EmitLValue(Array);
1863 llvm::Value *ArrayPtr = ArrayLV.getAddress();
1864 llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0);
1865 llvm::Value *Args[] = { Zero, Idx };
1866
1867 // Propagate the alignment from the array itself to the result.
1868 ArrayAlignment = ArrayLV.getAlignment();
1869
1870 if (getContext().getLangOpts().isSignedOverflowDefined())
1871 Address = Builder.CreateGEP(ArrayPtr, Args, "arrayidx");
1872 else
1873 Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, "arrayidx");
1874 } else {
1875 // The base must be a pointer, which is not an aggregate. Emit it.
1876 llvm::Value *Base = EmitScalarExpr(E->getBase());
1877 if (getContext().getLangOpts().isSignedOverflowDefined())
1878 Address = Builder.CreateGEP(Base, Idx, "arrayidx");
1879 else
1880 Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx");
1881 }
1882
1883 QualType T = E->getBase()->getType()->getPointeeType();
1884 assert(!T.isNull() &&
1885 "CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type");
1886
1887
1888 // Limit the alignment to that of the result type.
1889 LValue LV;
1890 if (!ArrayAlignment.isZero()) {
1891 CharUnits Align = getContext().getTypeAlignInChars(T);
1892 ArrayAlignment = std::min(Align, ArrayAlignment);
1893 LV = MakeAddrLValue(Address, T, ArrayAlignment);
1894 } else {
1895 LV = MakeNaturalAlignAddrLValue(Address, T);
1896 }
1897
1898 LV.getQuals().setAddressSpace(E->getBase()->getType().getAddressSpace());
1899
1900 if (getContext().getLangOpts().ObjC1 &&
1901 getContext().getLangOpts().getGC() != LangOptions::NonGC) {
1902 LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
1903 setObjCGCLValueClass(getContext(), E, LV);
1904 }
1905 return LV;
1906 }
1907
1908 static
GenerateConstantVector(CGBuilderTy & Builder,SmallVector<unsigned,4> & Elts)1909 llvm::Constant *GenerateConstantVector(CGBuilderTy &Builder,
1910 SmallVector<unsigned, 4> &Elts) {
1911 SmallVector<llvm::Constant*, 4> CElts;
1912 for (unsigned i = 0, e = Elts.size(); i != e; ++i)
1913 CElts.push_back(Builder.getInt32(Elts[i]));
1914
1915 return llvm::ConstantVector::get(CElts);
1916 }
1917
1918 LValue CodeGenFunction::
EmitExtVectorElementExpr(const ExtVectorElementExpr * E)1919 EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
1920 // Emit the base vector as an l-value.
1921 LValue Base;
1922
1923 // ExtVectorElementExpr's base can either be a vector or pointer to vector.
1924 if (E->isArrow()) {
1925 // If it is a pointer to a vector, emit the address and form an lvalue with
1926 // it.
1927 llvm::Value *Ptr = EmitScalarExpr(E->getBase());
1928 const PointerType *PT = E->getBase()->getType()->getAs<PointerType>();
1929 Base = MakeAddrLValue(Ptr, PT->getPointeeType());
1930 Base.getQuals().removeObjCGCAttr();
1931 } else if (E->getBase()->isGLValue()) {
1932 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
1933 // emit the base as an lvalue.
1934 assert(E->getBase()->getType()->isVectorType());
1935 Base = EmitLValue(E->getBase());
1936 } else {
1937 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
1938 assert(E->getBase()->getType()->isVectorType() &&
1939 "Result must be a vector");
1940 llvm::Value *Vec = EmitScalarExpr(E->getBase());
1941
1942 // Store the vector to memory (because LValue wants an address).
1943 llvm::Value *VecMem = CreateMemTemp(E->getBase()->getType());
1944 Builder.CreateStore(Vec, VecMem);
1945 Base = MakeAddrLValue(VecMem, E->getBase()->getType());
1946 }
1947
1948 QualType type =
1949 E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
1950
1951 // Encode the element access list into a vector of unsigned indices.
1952 SmallVector<unsigned, 4> Indices;
1953 E->getEncodedElementAccess(Indices);
1954
1955 if (Base.isSimple()) {
1956 llvm::Constant *CV = GenerateConstantVector(Builder, Indices);
1957 return LValue::MakeExtVectorElt(Base.getAddress(), CV, type,
1958 Base.getAlignment());
1959 }
1960 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
1961
1962 llvm::Constant *BaseElts = Base.getExtVectorElts();
1963 SmallVector<llvm::Constant *, 4> CElts;
1964
1965 for (unsigned i = 0, e = Indices.size(); i != e; ++i)
1966 CElts.push_back(BaseElts->getAggregateElement(Indices[i]));
1967 llvm::Constant *CV = llvm::ConstantVector::get(CElts);
1968 return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, type,
1969 Base.getAlignment());
1970 }
1971
EmitMemberExpr(const MemberExpr * E)1972 LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
1973 Expr *BaseExpr = E->getBase();
1974
1975 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
1976 LValue BaseLV;
1977 if (E->isArrow())
1978 BaseLV = MakeNaturalAlignAddrLValue(EmitScalarExpr(BaseExpr),
1979 BaseExpr->getType()->getPointeeType());
1980 else
1981 BaseLV = EmitLValue(BaseExpr);
1982
1983 NamedDecl *ND = E->getMemberDecl();
1984 if (FieldDecl *Field = dyn_cast<FieldDecl>(ND)) {
1985 LValue LV = EmitLValueForField(BaseLV, Field);
1986 setObjCGCLValueClass(getContext(), E, LV);
1987 return LV;
1988 }
1989
1990 if (VarDecl *VD = dyn_cast<VarDecl>(ND))
1991 return EmitGlobalVarDeclLValue(*this, E, VD);
1992
1993 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND))
1994 return EmitFunctionDeclLValue(*this, E, FD);
1995
1996 llvm_unreachable("Unhandled member declaration!");
1997 }
1998
EmitLValueForBitfield(llvm::Value * BaseValue,const FieldDecl * Field,unsigned CVRQualifiers)1999 LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value *BaseValue,
2000 const FieldDecl *Field,
2001 unsigned CVRQualifiers) {
2002 const CGRecordLayout &RL =
2003 CGM.getTypes().getCGRecordLayout(Field->getParent());
2004 const CGBitFieldInfo &Info = RL.getBitFieldInfo(Field);
2005 return LValue::MakeBitfield(BaseValue, Info,
2006 Field->getType().withCVRQualifiers(CVRQualifiers));
2007 }
2008
2009 /// EmitLValueForAnonRecordField - Given that the field is a member of
2010 /// an anonymous struct or union buried inside a record, and given
2011 /// that the base value is a pointer to the enclosing record, derive
2012 /// an lvalue for the ultimate field.
EmitLValueForAnonRecordField(llvm::Value * BaseValue,const IndirectFieldDecl * Field,unsigned CVRQualifiers)2013 LValue CodeGenFunction::EmitLValueForAnonRecordField(llvm::Value *BaseValue,
2014 const IndirectFieldDecl *Field,
2015 unsigned CVRQualifiers) {
2016 IndirectFieldDecl::chain_iterator I = Field->chain_begin(),
2017 IEnd = Field->chain_end();
2018 while (true) {
2019 QualType RecordTy =
2020 getContext().getTypeDeclType(cast<FieldDecl>(*I)->getParent());
2021 LValue LV = EmitLValueForField(MakeAddrLValue(BaseValue, RecordTy),
2022 cast<FieldDecl>(*I));
2023 if (++I == IEnd) return LV;
2024
2025 assert(LV.isSimple());
2026 BaseValue = LV.getAddress();
2027 CVRQualifiers |= LV.getVRQualifiers();
2028 }
2029 }
2030
EmitLValueForField(LValue base,const FieldDecl * field)2031 LValue CodeGenFunction::EmitLValueForField(LValue base,
2032 const FieldDecl *field) {
2033 if (field->isBitField())
2034 return EmitLValueForBitfield(base.getAddress(), field,
2035 base.getVRQualifiers());
2036
2037 const RecordDecl *rec = field->getParent();
2038 QualType type = field->getType();
2039 CharUnits alignment = getContext().getDeclAlign(field);
2040
2041 // FIXME: It should be impossible to have an LValue without alignment for a
2042 // complete type.
2043 if (!base.getAlignment().isZero())
2044 alignment = std::min(alignment, base.getAlignment());
2045
2046 bool mayAlias = rec->hasAttr<MayAliasAttr>();
2047
2048 llvm::Value *addr = base.getAddress();
2049 unsigned cvr = base.getVRQualifiers();
2050 if (rec->isUnion()) {
2051 // For unions, there is no pointer adjustment.
2052 assert(!type->isReferenceType() && "union has reference member");
2053 } else {
2054 // For structs, we GEP to the field that the record layout suggests.
2055 unsigned idx = CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
2056 addr = Builder.CreateStructGEP(addr, idx, field->getName());
2057
2058 // If this is a reference field, load the reference right now.
2059 if (const ReferenceType *refType = type->getAs<ReferenceType>()) {
2060 llvm::LoadInst *load = Builder.CreateLoad(addr, "ref");
2061 if (cvr & Qualifiers::Volatile) load->setVolatile(true);
2062 load->setAlignment(alignment.getQuantity());
2063
2064 if (CGM.shouldUseTBAA()) {
2065 llvm::MDNode *tbaa;
2066 if (mayAlias)
2067 tbaa = CGM.getTBAAInfo(getContext().CharTy);
2068 else
2069 tbaa = CGM.getTBAAInfo(type);
2070 CGM.DecorateInstruction(load, tbaa);
2071 }
2072
2073 addr = load;
2074 mayAlias = false;
2075 type = refType->getPointeeType();
2076 if (type->isIncompleteType())
2077 alignment = CharUnits();
2078 else
2079 alignment = getContext().getTypeAlignInChars(type);
2080 cvr = 0; // qualifiers don't recursively apply to referencee
2081 }
2082 }
2083
2084 // Make sure that the address is pointing to the right type. This is critical
2085 // for both unions and structs. A union needs a bitcast, a struct element
2086 // will need a bitcast if the LLVM type laid out doesn't match the desired
2087 // type.
2088 addr = EmitBitCastOfLValueToProperType(*this, addr,
2089 CGM.getTypes().ConvertTypeForMem(type),
2090 field->getName());
2091
2092 if (field->hasAttr<AnnotateAttr>())
2093 addr = EmitFieldAnnotations(field, addr);
2094
2095 LValue LV = MakeAddrLValue(addr, type, alignment);
2096 LV.getQuals().addCVRQualifiers(cvr);
2097
2098 // __weak attribute on a field is ignored.
2099 if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak)
2100 LV.getQuals().removeObjCGCAttr();
2101
2102 // Fields of may_alias structs act like 'char' for TBAA purposes.
2103 // FIXME: this should get propagated down through anonymous structs
2104 // and unions.
2105 if (mayAlias && LV.getTBAAInfo())
2106 LV.setTBAAInfo(CGM.getTBAAInfo(getContext().CharTy));
2107
2108 return LV;
2109 }
2110
2111 LValue
EmitLValueForFieldInitialization(LValue Base,const FieldDecl * Field)2112 CodeGenFunction::EmitLValueForFieldInitialization(LValue Base,
2113 const FieldDecl *Field) {
2114 QualType FieldType = Field->getType();
2115
2116 if (!FieldType->isReferenceType())
2117 return EmitLValueForField(Base, Field);
2118
2119 const CGRecordLayout &RL =
2120 CGM.getTypes().getCGRecordLayout(Field->getParent());
2121 unsigned idx = RL.getLLVMFieldNo(Field);
2122 llvm::Value *V = Builder.CreateStructGEP(Base.getAddress(), idx);
2123 assert(!FieldType.getObjCGCAttr() && "fields cannot have GC attrs");
2124
2125 // Make sure that the address is pointing to the right type. This is critical
2126 // for both unions and structs. A union needs a bitcast, a struct element
2127 // will need a bitcast if the LLVM type laid out doesn't match the desired
2128 // type.
2129 llvm::Type *llvmType = ConvertTypeForMem(FieldType);
2130 V = EmitBitCastOfLValueToProperType(*this, V, llvmType, Field->getName());
2131
2132 CharUnits Alignment = getContext().getDeclAlign(Field);
2133
2134 // FIXME: It should be impossible to have an LValue without alignment for a
2135 // complete type.
2136 if (!Base.getAlignment().isZero())
2137 Alignment = std::min(Alignment, Base.getAlignment());
2138
2139 return MakeAddrLValue(V, FieldType, Alignment);
2140 }
2141
EmitCompoundLiteralLValue(const CompoundLiteralExpr * E)2142 LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){
2143 if (E->isFileScope()) {
2144 llvm::Value *GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E);
2145 return MakeAddrLValue(GlobalPtr, E->getType());
2146 }
2147
2148 llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
2149 const Expr *InitExpr = E->getInitializer();
2150 LValue Result = MakeAddrLValue(DeclPtr, E->getType());
2151
2152 EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
2153 /*Init*/ true);
2154
2155 return Result;
2156 }
2157
2158 LValue CodeGenFunction::
EmitConditionalOperatorLValue(const AbstractConditionalOperator * expr)2159 EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
2160 if (!expr->isGLValue()) {
2161 // ?: here should be an aggregate.
2162 assert((hasAggregateLLVMType(expr->getType()) &&
2163 !expr->getType()->isAnyComplexType()) &&
2164 "Unexpected conditional operator!");
2165 return EmitAggExprToLValue(expr);
2166 }
2167
2168 OpaqueValueMapping binding(*this, expr);
2169
2170 const Expr *condExpr = expr->getCond();
2171 bool CondExprBool;
2172 if (ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
2173 const Expr *live = expr->getTrueExpr(), *dead = expr->getFalseExpr();
2174 if (!CondExprBool) std::swap(live, dead);
2175
2176 if (!ContainsLabel(dead))
2177 return EmitLValue(live);
2178 }
2179
2180 llvm::BasicBlock *lhsBlock = createBasicBlock("cond.true");
2181 llvm::BasicBlock *rhsBlock = createBasicBlock("cond.false");
2182 llvm::BasicBlock *contBlock = createBasicBlock("cond.end");
2183
2184 ConditionalEvaluation eval(*this);
2185 EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock);
2186
2187 // Any temporaries created here are conditional.
2188 EmitBlock(lhsBlock);
2189 eval.begin(*this);
2190 LValue lhs = EmitLValue(expr->getTrueExpr());
2191 eval.end(*this);
2192
2193 if (!lhs.isSimple())
2194 return EmitUnsupportedLValue(expr, "conditional operator");
2195
2196 lhsBlock = Builder.GetInsertBlock();
2197 Builder.CreateBr(contBlock);
2198
2199 // Any temporaries created here are conditional.
2200 EmitBlock(rhsBlock);
2201 eval.begin(*this);
2202 LValue rhs = EmitLValue(expr->getFalseExpr());
2203 eval.end(*this);
2204 if (!rhs.isSimple())
2205 return EmitUnsupportedLValue(expr, "conditional operator");
2206 rhsBlock = Builder.GetInsertBlock();
2207
2208 EmitBlock(contBlock);
2209
2210 llvm::PHINode *phi = Builder.CreatePHI(lhs.getAddress()->getType(), 2,
2211 "cond-lvalue");
2212 phi->addIncoming(lhs.getAddress(), lhsBlock);
2213 phi->addIncoming(rhs.getAddress(), rhsBlock);
2214 return MakeAddrLValue(phi, expr->getType());
2215 }
2216
2217 /// EmitCastLValue - Casts are never lvalues unless that cast is a dynamic_cast.
2218 /// If the cast is a dynamic_cast, we can have the usual lvalue result,
2219 /// otherwise if a cast is needed by the code generator in an lvalue context,
2220 /// then it must mean that we need the address of an aggregate in order to
2221 /// access one of its fields. This can happen for all the reasons that casts
2222 /// are permitted with aggregate result, including noop aggregate casts, and
2223 /// cast from scalar to union.
EmitCastLValue(const CastExpr * E)2224 LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
2225 switch (E->getCastKind()) {
2226 case CK_ToVoid:
2227 return EmitUnsupportedLValue(E, "unexpected cast lvalue");
2228
2229 case CK_Dependent:
2230 llvm_unreachable("dependent cast kind in IR gen!");
2231
2232 // These two casts are currently treated as no-ops, although they could
2233 // potentially be real operations depending on the target's ABI.
2234 case CK_NonAtomicToAtomic:
2235 case CK_AtomicToNonAtomic:
2236
2237 case CK_NoOp:
2238 case CK_LValueToRValue:
2239 if (!E->getSubExpr()->Classify(getContext()).isPRValue()
2240 || E->getType()->isRecordType())
2241 return EmitLValue(E->getSubExpr());
2242 // Fall through to synthesize a temporary.
2243
2244 case CK_BitCast:
2245 case CK_ArrayToPointerDecay:
2246 case CK_FunctionToPointerDecay:
2247 case CK_NullToMemberPointer:
2248 case CK_NullToPointer:
2249 case CK_IntegralToPointer:
2250 case CK_PointerToIntegral:
2251 case CK_PointerToBoolean:
2252 case CK_VectorSplat:
2253 case CK_IntegralCast:
2254 case CK_IntegralToBoolean:
2255 case CK_IntegralToFloating:
2256 case CK_FloatingToIntegral:
2257 case CK_FloatingToBoolean:
2258 case CK_FloatingCast:
2259 case CK_FloatingRealToComplex:
2260 case CK_FloatingComplexToReal:
2261 case CK_FloatingComplexToBoolean:
2262 case CK_FloatingComplexCast:
2263 case CK_FloatingComplexToIntegralComplex:
2264 case CK_IntegralRealToComplex:
2265 case CK_IntegralComplexToReal:
2266 case CK_IntegralComplexToBoolean:
2267 case CK_IntegralComplexCast:
2268 case CK_IntegralComplexToFloatingComplex:
2269 case CK_DerivedToBaseMemberPointer:
2270 case CK_BaseToDerivedMemberPointer:
2271 case CK_MemberPointerToBoolean:
2272 case CK_ReinterpretMemberPointer:
2273 case CK_AnyPointerToBlockPointerCast:
2274 case CK_ARCProduceObject:
2275 case CK_ARCConsumeObject:
2276 case CK_ARCReclaimReturnedObject:
2277 case CK_ARCExtendBlockObject:
2278 case CK_CopyAndAutoreleaseBlockObject: {
2279 // These casts only produce lvalues when we're binding a reference to a
2280 // temporary realized from a (converted) pure rvalue. Emit the expression
2281 // as a value, copy it into a temporary, and return an lvalue referring to
2282 // that temporary.
2283 llvm::Value *V = CreateMemTemp(E->getType(), "ref.temp");
2284 EmitAnyExprToMem(E, V, E->getType().getQualifiers(), false);
2285 return MakeAddrLValue(V, E->getType());
2286 }
2287
2288 case CK_Dynamic: {
2289 LValue LV = EmitLValue(E->getSubExpr());
2290 llvm::Value *V = LV.getAddress();
2291 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(E);
2292 return MakeAddrLValue(EmitDynamicCast(V, DCE), E->getType());
2293 }
2294
2295 case CK_ConstructorConversion:
2296 case CK_UserDefinedConversion:
2297 case CK_CPointerToObjCPointerCast:
2298 case CK_BlockPointerToObjCPointerCast:
2299 return EmitLValue(E->getSubExpr());
2300
2301 case CK_UncheckedDerivedToBase:
2302 case CK_DerivedToBase: {
2303 const RecordType *DerivedClassTy =
2304 E->getSubExpr()->getType()->getAs<RecordType>();
2305 CXXRecordDecl *DerivedClassDecl =
2306 cast<CXXRecordDecl>(DerivedClassTy->getDecl());
2307
2308 LValue LV = EmitLValue(E->getSubExpr());
2309 llvm::Value *This = LV.getAddress();
2310
2311 // Perform the derived-to-base conversion
2312 llvm::Value *Base =
2313 GetAddressOfBaseClass(This, DerivedClassDecl,
2314 E->path_begin(), E->path_end(),
2315 /*NullCheckValue=*/false);
2316
2317 return MakeAddrLValue(Base, E->getType());
2318 }
2319 case CK_ToUnion:
2320 return EmitAggExprToLValue(E);
2321 case CK_BaseToDerived: {
2322 const RecordType *DerivedClassTy = E->getType()->getAs<RecordType>();
2323 CXXRecordDecl *DerivedClassDecl =
2324 cast<CXXRecordDecl>(DerivedClassTy->getDecl());
2325
2326 LValue LV = EmitLValue(E->getSubExpr());
2327
2328 // Perform the base-to-derived conversion
2329 llvm::Value *Derived =
2330 GetAddressOfDerivedClass(LV.getAddress(), DerivedClassDecl,
2331 E->path_begin(), E->path_end(),
2332 /*NullCheckValue=*/false);
2333
2334 return MakeAddrLValue(Derived, E->getType());
2335 }
2336 case CK_LValueBitCast: {
2337 // This must be a reinterpret_cast (or c-style equivalent).
2338 const ExplicitCastExpr *CE = cast<ExplicitCastExpr>(E);
2339
2340 LValue LV = EmitLValue(E->getSubExpr());
2341 llvm::Value *V = Builder.CreateBitCast(LV.getAddress(),
2342 ConvertType(CE->getTypeAsWritten()));
2343 return MakeAddrLValue(V, E->getType());
2344 }
2345 case CK_ObjCObjectLValueCast: {
2346 LValue LV = EmitLValue(E->getSubExpr());
2347 QualType ToType = getContext().getLValueReferenceType(E->getType());
2348 llvm::Value *V = Builder.CreateBitCast(LV.getAddress(),
2349 ConvertType(ToType));
2350 return MakeAddrLValue(V, E->getType());
2351 }
2352 }
2353
2354 llvm_unreachable("Unhandled lvalue cast kind?");
2355 }
2356
EmitNullInitializationLValue(const CXXScalarValueInitExpr * E)2357 LValue CodeGenFunction::EmitNullInitializationLValue(
2358 const CXXScalarValueInitExpr *E) {
2359 QualType Ty = E->getType();
2360 LValue LV = MakeAddrLValue(CreateMemTemp(Ty), Ty);
2361 EmitNullInitialization(LV.getAddress(), Ty);
2362 return LV;
2363 }
2364
EmitOpaqueValueLValue(const OpaqueValueExpr * e)2365 LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) {
2366 assert(OpaqueValueMappingData::shouldBindAsLValue(e));
2367 return getOpaqueLValueMapping(e);
2368 }
2369
EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr * E)2370 LValue CodeGenFunction::EmitMaterializeTemporaryExpr(
2371 const MaterializeTemporaryExpr *E) {
2372 RValue RV = EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
2373 return MakeAddrLValue(RV.getScalarVal(), E->getType());
2374 }
2375
EmitRValueForField(LValue LV,const FieldDecl * FD)2376 RValue CodeGenFunction::EmitRValueForField(LValue LV,
2377 const FieldDecl *FD) {
2378 QualType FT = FD->getType();
2379 LValue FieldLV = EmitLValueForField(LV, FD);
2380 if (FT->isAnyComplexType())
2381 return RValue::getComplex(
2382 LoadComplexFromAddr(FieldLV.getAddress(),
2383 FieldLV.isVolatileQualified()));
2384 else if (CodeGenFunction::hasAggregateLLVMType(FT))
2385 return FieldLV.asAggregateRValue();
2386
2387 return EmitLoadOfLValue(FieldLV);
2388 }
2389
2390 //===--------------------------------------------------------------------===//
2391 // Expression Emission
2392 //===--------------------------------------------------------------------===//
2393
EmitCallExpr(const CallExpr * E,ReturnValueSlot ReturnValue)2394 RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
2395 ReturnValueSlot ReturnValue) {
2396 if (CGDebugInfo *DI = getDebugInfo())
2397 DI->EmitLocation(Builder, E->getLocStart());
2398
2399 // Builtins never have block type.
2400 if (E->getCallee()->getType()->isBlockPointerType())
2401 return EmitBlockCallExpr(E, ReturnValue);
2402
2403 if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(E))
2404 return EmitCXXMemberCallExpr(CE, ReturnValue);
2405
2406 if (const CUDAKernelCallExpr *CE = dyn_cast<CUDAKernelCallExpr>(E))
2407 return EmitCUDAKernelCallExpr(CE, ReturnValue);
2408
2409 const Decl *TargetDecl = E->getCalleeDecl();
2410 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
2411 if (unsigned builtinID = FD->getBuiltinID())
2412 return EmitBuiltinExpr(FD, builtinID, E);
2413 }
2414
2415 if (const CXXOperatorCallExpr *CE = dyn_cast<CXXOperatorCallExpr>(E))
2416 if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl))
2417 return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue);
2418
2419 if (const CXXPseudoDestructorExpr *PseudoDtor
2420 = dyn_cast<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) {
2421 QualType DestroyedType = PseudoDtor->getDestroyedType();
2422 if (getContext().getLangOpts().ObjCAutoRefCount &&
2423 DestroyedType->isObjCLifetimeType() &&
2424 (DestroyedType.getObjCLifetime() == Qualifiers::OCL_Strong ||
2425 DestroyedType.getObjCLifetime() == Qualifiers::OCL_Weak)) {
2426 // Automatic Reference Counting:
2427 // If the pseudo-expression names a retainable object with weak or
2428 // strong lifetime, the object shall be released.
2429 Expr *BaseExpr = PseudoDtor->getBase();
2430 llvm::Value *BaseValue = NULL;
2431 Qualifiers BaseQuals;
2432
2433 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
2434 if (PseudoDtor->isArrow()) {
2435 BaseValue = EmitScalarExpr(BaseExpr);
2436 const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>();
2437 BaseQuals = PTy->getPointeeType().getQualifiers();
2438 } else {
2439 LValue BaseLV = EmitLValue(BaseExpr);
2440 BaseValue = BaseLV.getAddress();
2441 QualType BaseTy = BaseExpr->getType();
2442 BaseQuals = BaseTy.getQualifiers();
2443 }
2444
2445 switch (PseudoDtor->getDestroyedType().getObjCLifetime()) {
2446 case Qualifiers::OCL_None:
2447 case Qualifiers::OCL_ExplicitNone:
2448 case Qualifiers::OCL_Autoreleasing:
2449 break;
2450
2451 case Qualifiers::OCL_Strong:
2452 EmitARCRelease(Builder.CreateLoad(BaseValue,
2453 PseudoDtor->getDestroyedType().isVolatileQualified()),
2454 /*precise*/ true);
2455 break;
2456
2457 case Qualifiers::OCL_Weak:
2458 EmitARCDestroyWeak(BaseValue);
2459 break;
2460 }
2461 } else {
2462 // C++ [expr.pseudo]p1:
2463 // The result shall only be used as the operand for the function call
2464 // operator (), and the result of such a call has type void. The only
2465 // effect is the evaluation of the postfix-expression before the dot or
2466 // arrow.
2467 EmitScalarExpr(E->getCallee());
2468 }
2469
2470 return RValue::get(0);
2471 }
2472
2473 llvm::Value *Callee = EmitScalarExpr(E->getCallee());
2474 return EmitCall(E->getCallee()->getType(), Callee, ReturnValue,
2475 E->arg_begin(), E->arg_end(), TargetDecl);
2476 }
2477
EmitBinaryOperatorLValue(const BinaryOperator * E)2478 LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
2479 // Comma expressions just emit their LHS then their RHS as an l-value.
2480 if (E->getOpcode() == BO_Comma) {
2481 EmitIgnoredExpr(E->getLHS());
2482 EnsureInsertPoint();
2483 return EmitLValue(E->getRHS());
2484 }
2485
2486 if (E->getOpcode() == BO_PtrMemD ||
2487 E->getOpcode() == BO_PtrMemI)
2488 return EmitPointerToDataMemberBinaryExpr(E);
2489
2490 assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
2491
2492 // Note that in all of these cases, __block variables need the RHS
2493 // evaluated first just in case the variable gets moved by the RHS.
2494
2495 if (!hasAggregateLLVMType(E->getType())) {
2496 switch (E->getLHS()->getType().getObjCLifetime()) {
2497 case Qualifiers::OCL_Strong:
2498 return EmitARCStoreStrong(E, /*ignored*/ false).first;
2499
2500 case Qualifiers::OCL_Autoreleasing:
2501 return EmitARCStoreAutoreleasing(E).first;
2502
2503 // No reason to do any of these differently.
2504 case Qualifiers::OCL_None:
2505 case Qualifiers::OCL_ExplicitNone:
2506 case Qualifiers::OCL_Weak:
2507 break;
2508 }
2509
2510 RValue RV = EmitAnyExpr(E->getRHS());
2511 LValue LV = EmitLValue(E->getLHS());
2512 EmitStoreThroughLValue(RV, LV);
2513 return LV;
2514 }
2515
2516 if (E->getType()->isAnyComplexType())
2517 return EmitComplexAssignmentLValue(E);
2518
2519 return EmitAggExprToLValue(E);
2520 }
2521
EmitCallExprLValue(const CallExpr * E)2522 LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) {
2523 RValue RV = EmitCallExpr(E);
2524
2525 if (!RV.isScalar())
2526 return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
2527
2528 assert(E->getCallReturnType()->isReferenceType() &&
2529 "Can't have a scalar return unless the return type is a "
2530 "reference type!");
2531
2532 return MakeAddrLValue(RV.getScalarVal(), E->getType());
2533 }
2534
EmitVAArgExprLValue(const VAArgExpr * E)2535 LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) {
2536 // FIXME: This shouldn't require another copy.
2537 return EmitAggExprToLValue(E);
2538 }
2539
EmitCXXConstructLValue(const CXXConstructExpr * E)2540 LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) {
2541 assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor()
2542 && "binding l-value to type which needs a temporary");
2543 AggValueSlot Slot = CreateAggTemp(E->getType());
2544 EmitCXXConstructExpr(E, Slot);
2545 return MakeAddrLValue(Slot.getAddr(), E->getType());
2546 }
2547
2548 LValue
EmitCXXTypeidLValue(const CXXTypeidExpr * E)2549 CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
2550 return MakeAddrLValue(EmitCXXTypeidExpr(E), E->getType());
2551 }
2552
2553 LValue
EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr * E)2554 CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
2555 AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
2556 Slot.setExternallyDestructed();
2557 EmitAggExpr(E->getSubExpr(), Slot);
2558 EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddr());
2559 return MakeAddrLValue(Slot.getAddr(), E->getType());
2560 }
2561
2562 LValue
EmitLambdaLValue(const LambdaExpr * E)2563 CodeGenFunction::EmitLambdaLValue(const LambdaExpr *E) {
2564 AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
2565 EmitLambdaExpr(E, Slot);
2566 return MakeAddrLValue(Slot.getAddr(), E->getType());
2567 }
2568
EmitObjCMessageExprLValue(const ObjCMessageExpr * E)2569 LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) {
2570 RValue RV = EmitObjCMessageExpr(E);
2571
2572 if (!RV.isScalar())
2573 return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
2574
2575 assert(E->getMethodDecl()->getResultType()->isReferenceType() &&
2576 "Can't have a scalar return unless the return type is a "
2577 "reference type!");
2578
2579 return MakeAddrLValue(RV.getScalarVal(), E->getType());
2580 }
2581
EmitObjCSelectorLValue(const ObjCSelectorExpr * E)2582 LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) {
2583 llvm::Value *V =
2584 CGM.getObjCRuntime().GetSelector(Builder, E->getSelector(), true);
2585 return MakeAddrLValue(V, E->getType());
2586 }
2587
EmitIvarOffset(const ObjCInterfaceDecl * Interface,const ObjCIvarDecl * Ivar)2588 llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface,
2589 const ObjCIvarDecl *Ivar) {
2590 return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar);
2591 }
2592
EmitLValueForIvar(QualType ObjectTy,llvm::Value * BaseValue,const ObjCIvarDecl * Ivar,unsigned CVRQualifiers)2593 LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy,
2594 llvm::Value *BaseValue,
2595 const ObjCIvarDecl *Ivar,
2596 unsigned CVRQualifiers) {
2597 return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue,
2598 Ivar, CVRQualifiers);
2599 }
2600
EmitObjCIvarRefLValue(const ObjCIvarRefExpr * E)2601 LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) {
2602 // FIXME: A lot of the code below could be shared with EmitMemberExpr.
2603 llvm::Value *BaseValue = 0;
2604 const Expr *BaseExpr = E->getBase();
2605 Qualifiers BaseQuals;
2606 QualType ObjectTy;
2607 if (E->isArrow()) {
2608 BaseValue = EmitScalarExpr(BaseExpr);
2609 ObjectTy = BaseExpr->getType()->getPointeeType();
2610 BaseQuals = ObjectTy.getQualifiers();
2611 } else {
2612 LValue BaseLV = EmitLValue(BaseExpr);
2613 // FIXME: this isn't right for bitfields.
2614 BaseValue = BaseLV.getAddress();
2615 ObjectTy = BaseExpr->getType();
2616 BaseQuals = ObjectTy.getQualifiers();
2617 }
2618
2619 LValue LV =
2620 EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(),
2621 BaseQuals.getCVRQualifiers());
2622 setObjCGCLValueClass(getContext(), E, LV);
2623 return LV;
2624 }
2625
EmitStmtExprLValue(const StmtExpr * E)2626 LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {
2627 // Can only get l-value for message expression returning aggregate type
2628 RValue RV = EmitAnyExprToTemp(E);
2629 return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
2630 }
2631
EmitCall(QualType CalleeType,llvm::Value * Callee,ReturnValueSlot ReturnValue,CallExpr::const_arg_iterator ArgBeg,CallExpr::const_arg_iterator ArgEnd,const Decl * TargetDecl)2632 RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
2633 ReturnValueSlot ReturnValue,
2634 CallExpr::const_arg_iterator ArgBeg,
2635 CallExpr::const_arg_iterator ArgEnd,
2636 const Decl *TargetDecl) {
2637 // Get the actual function type. The callee type will always be a pointer to
2638 // function type or a block pointer type.
2639 assert(CalleeType->isFunctionPointerType() &&
2640 "Call must have function pointer type!");
2641
2642 CalleeType = getContext().getCanonicalType(CalleeType);
2643
2644 const FunctionType *FnType
2645 = cast<FunctionType>(cast<PointerType>(CalleeType)->getPointeeType());
2646
2647 CallArgList Args;
2648 EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd);
2649
2650 const CGFunctionInfo &FnInfo =
2651 CGM.getTypes().arrangeFunctionCall(Args, FnType);
2652
2653 // C99 6.5.2.2p6:
2654 // If the expression that denotes the called function has a type
2655 // that does not include a prototype, [the default argument
2656 // promotions are performed]. If the number of arguments does not
2657 // equal the number of parameters, the behavior is undefined. If
2658 // the function is defined with a type that includes a prototype,
2659 // and either the prototype ends with an ellipsis (, ...) or the
2660 // types of the arguments after promotion are not compatible with
2661 // the types of the parameters, the behavior is undefined. If the
2662 // function is defined with a type that does not include a
2663 // prototype, and the types of the arguments after promotion are
2664 // not compatible with those of the parameters after promotion,
2665 // the behavior is undefined [except in some trivial cases].
2666 // That is, in the general case, we should assume that a call
2667 // through an unprototyped function type works like a *non-variadic*
2668 // call. The way we make this work is to cast to the exact type
2669 // of the promoted arguments.
2670 if (isa<FunctionNoProtoType>(FnType) && !FnInfo.isVariadic()) {
2671 llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo);
2672 CalleeTy = CalleeTy->getPointerTo();
2673 Callee = Builder.CreateBitCast(Callee, CalleeTy, "callee.knr.cast");
2674 }
2675
2676 return EmitCall(FnInfo, Callee, ReturnValue, Args, TargetDecl);
2677 }
2678
2679 LValue CodeGenFunction::
EmitPointerToDataMemberBinaryExpr(const BinaryOperator * E)2680 EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) {
2681 llvm::Value *BaseV;
2682 if (E->getOpcode() == BO_PtrMemI)
2683 BaseV = EmitScalarExpr(E->getLHS());
2684 else
2685 BaseV = EmitLValue(E->getLHS()).getAddress();
2686
2687 llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());
2688
2689 const MemberPointerType *MPT
2690 = E->getRHS()->getType()->getAs<MemberPointerType>();
2691
2692 llvm::Value *AddV =
2693 CGM.getCXXABI().EmitMemberDataPointerAddress(*this, BaseV, OffsetV, MPT);
2694
2695 return MakeAddrLValue(AddV, MPT->getPointeeType());
2696 }
2697
2698 static void
EmitAtomicOp(CodeGenFunction & CGF,AtomicExpr * E,llvm::Value * Dest,llvm::Value * Ptr,llvm::Value * Val1,llvm::Value * Val2,uint64_t Size,unsigned Align,llvm::AtomicOrdering Order)2699 EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
2700 llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
2701 uint64_t Size, unsigned Align, llvm::AtomicOrdering Order) {
2702 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
2703 llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
2704
2705 switch (E->getOp()) {
2706 case AtomicExpr::AO__c11_atomic_init:
2707 llvm_unreachable("Already handled!");
2708
2709 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
2710 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
2711 case AtomicExpr::AO__atomic_compare_exchange:
2712 case AtomicExpr::AO__atomic_compare_exchange_n: {
2713 // Note that cmpxchg only supports specifying one ordering and
2714 // doesn't support weak cmpxchg, at least at the moment.
2715 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
2716 LoadVal1->setAlignment(Align);
2717 llvm::LoadInst *LoadVal2 = CGF.Builder.CreateLoad(Val2);
2718 LoadVal2->setAlignment(Align);
2719 llvm::AtomicCmpXchgInst *CXI =
2720 CGF.Builder.CreateAtomicCmpXchg(Ptr, LoadVal1, LoadVal2, Order);
2721 CXI->setVolatile(E->isVolatile());
2722 llvm::StoreInst *StoreVal1 = CGF.Builder.CreateStore(CXI, Val1);
2723 StoreVal1->setAlignment(Align);
2724 llvm::Value *Cmp = CGF.Builder.CreateICmpEQ(CXI, LoadVal1);
2725 CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
2726 return;
2727 }
2728
2729 case AtomicExpr::AO__c11_atomic_load:
2730 case AtomicExpr::AO__atomic_load_n:
2731 case AtomicExpr::AO__atomic_load: {
2732 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
2733 Load->setAtomic(Order);
2734 Load->setAlignment(Size);
2735 Load->setVolatile(E->isVolatile());
2736 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
2737 StoreDest->setAlignment(Align);
2738 return;
2739 }
2740
2741 case AtomicExpr::AO__c11_atomic_store:
2742 case AtomicExpr::AO__atomic_store:
2743 case AtomicExpr::AO__atomic_store_n: {
2744 assert(!Dest && "Store does not return a value");
2745 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
2746 LoadVal1->setAlignment(Align);
2747 llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
2748 Store->setAtomic(Order);
2749 Store->setAlignment(Size);
2750 Store->setVolatile(E->isVolatile());
2751 return;
2752 }
2753
2754 case AtomicExpr::AO__c11_atomic_exchange:
2755 case AtomicExpr::AO__atomic_exchange_n:
2756 case AtomicExpr::AO__atomic_exchange:
2757 Op = llvm::AtomicRMWInst::Xchg;
2758 break;
2759
2760 case AtomicExpr::AO__atomic_add_fetch:
2761 PostOp = llvm::Instruction::Add;
2762 // Fall through.
2763 case AtomicExpr::AO__c11_atomic_fetch_add:
2764 case AtomicExpr::AO__atomic_fetch_add:
2765 Op = llvm::AtomicRMWInst::Add;
2766 break;
2767
2768 case AtomicExpr::AO__atomic_sub_fetch:
2769 PostOp = llvm::Instruction::Sub;
2770 // Fall through.
2771 case AtomicExpr::AO__c11_atomic_fetch_sub:
2772 case AtomicExpr::AO__atomic_fetch_sub:
2773 Op = llvm::AtomicRMWInst::Sub;
2774 break;
2775
2776 case AtomicExpr::AO__atomic_and_fetch:
2777 PostOp = llvm::Instruction::And;
2778 // Fall through.
2779 case AtomicExpr::AO__c11_atomic_fetch_and:
2780 case AtomicExpr::AO__atomic_fetch_and:
2781 Op = llvm::AtomicRMWInst::And;
2782 break;
2783
2784 case AtomicExpr::AO__atomic_or_fetch:
2785 PostOp = llvm::Instruction::Or;
2786 // Fall through.
2787 case AtomicExpr::AO__c11_atomic_fetch_or:
2788 case AtomicExpr::AO__atomic_fetch_or:
2789 Op = llvm::AtomicRMWInst::Or;
2790 break;
2791
2792 case AtomicExpr::AO__atomic_xor_fetch:
2793 PostOp = llvm::Instruction::Xor;
2794 // Fall through.
2795 case AtomicExpr::AO__c11_atomic_fetch_xor:
2796 case AtomicExpr::AO__atomic_fetch_xor:
2797 Op = llvm::AtomicRMWInst::Xor;
2798 break;
2799
2800 case AtomicExpr::AO__atomic_nand_fetch:
2801 PostOp = llvm::Instruction::And;
2802 // Fall through.
2803 case AtomicExpr::AO__atomic_fetch_nand:
2804 Op = llvm::AtomicRMWInst::Nand;
2805 break;
2806 }
2807
2808 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
2809 LoadVal1->setAlignment(Align);
2810 llvm::AtomicRMWInst *RMWI =
2811 CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
2812 RMWI->setVolatile(E->isVolatile());
2813
2814 // For __atomic_*_fetch operations, perform the operation again to
2815 // determine the value which was written.
2816 llvm::Value *Result = RMWI;
2817 if (PostOp)
2818 Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
2819 if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
2820 Result = CGF.Builder.CreateNot(Result);
2821 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
2822 StoreDest->setAlignment(Align);
2823 }
2824
2825 // This function emits any expression (scalar, complex, or aggregate)
2826 // into a temporary alloca.
2827 static llvm::Value *
EmitValToTemp(CodeGenFunction & CGF,Expr * E)2828 EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
2829 llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
2830 CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
2831 /*Init*/ true);
2832 return DeclPtr;
2833 }
2834
ConvertTempToRValue(CodeGenFunction & CGF,QualType Ty,llvm::Value * Dest)2835 static RValue ConvertTempToRValue(CodeGenFunction &CGF, QualType Ty,
2836 llvm::Value *Dest) {
2837 if (Ty->isAnyComplexType())
2838 return RValue::getComplex(CGF.LoadComplexFromAddr(Dest, false));
2839 if (CGF.hasAggregateLLVMType(Ty))
2840 return RValue::getAggregate(Dest);
2841 return RValue::get(CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(Dest, Ty)));
2842 }
2843
EmitAtomicExpr(AtomicExpr * E,llvm::Value * Dest)2844 RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
2845 QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
2846 QualType MemTy = AtomicTy;
2847 if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
2848 MemTy = AT->getValueType();
2849 CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
2850 uint64_t Size = sizeChars.getQuantity();
2851 CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
2852 unsigned Align = alignChars.getQuantity();
2853 unsigned MaxInlineWidth =
2854 getContext().getTargetInfo().getMaxAtomicInlineWidth();
2855 bool UseLibcall = (Size != Align || Size > MaxInlineWidth);
2856
2857
2858
2859 llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0;
2860 Ptr = EmitScalarExpr(E->getPtr());
2861
2862 if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
2863 assert(!Dest && "Init does not return a value");
2864 if (!hasAggregateLLVMType(E->getVal1()->getType())) {
2865 QualType PointeeType
2866 = E->getPtr()->getType()->getAs<PointerType>()->getPointeeType();
2867 EmitScalarInit(EmitScalarExpr(E->getVal1()),
2868 LValue::MakeAddr(Ptr, PointeeType, alignChars,
2869 getContext()));
2870 } else if (E->getType()->isAnyComplexType()) {
2871 EmitComplexExprIntoAddr(E->getVal1(), Ptr, E->isVolatile());
2872 } else {
2873 AggValueSlot Slot = AggValueSlot::forAddr(Ptr, alignChars,
2874 AtomicTy.getQualifiers(),
2875 AggValueSlot::IsNotDestructed,
2876 AggValueSlot::DoesNotNeedGCBarriers,
2877 AggValueSlot::IsNotAliased);
2878 EmitAggExpr(E->getVal1(), Slot);
2879 }
2880 return RValue::get(0);
2881 }
2882
2883 Order = EmitScalarExpr(E->getOrder());
2884
2885 switch (E->getOp()) {
2886 case AtomicExpr::AO__c11_atomic_init:
2887 llvm_unreachable("Already handled!");
2888
2889 case AtomicExpr::AO__c11_atomic_load:
2890 case AtomicExpr::AO__atomic_load_n:
2891 break;
2892
2893 case AtomicExpr::AO__atomic_load:
2894 Dest = EmitScalarExpr(E->getVal1());
2895 break;
2896
2897 case AtomicExpr::AO__atomic_store:
2898 Val1 = EmitScalarExpr(E->getVal1());
2899 break;
2900
2901 case AtomicExpr::AO__atomic_exchange:
2902 Val1 = EmitScalarExpr(E->getVal1());
2903 Dest = EmitScalarExpr(E->getVal2());
2904 break;
2905
2906 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
2907 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
2908 case AtomicExpr::AO__atomic_compare_exchange_n:
2909 case AtomicExpr::AO__atomic_compare_exchange:
2910 Val1 = EmitScalarExpr(E->getVal1());
2911 if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
2912 Val2 = EmitScalarExpr(E->getVal2());
2913 else
2914 Val2 = EmitValToTemp(*this, E->getVal2());
2915 OrderFail = EmitScalarExpr(E->getOrderFail());
2916 // Evaluate and discard the 'weak' argument.
2917 if (E->getNumSubExprs() == 6)
2918 EmitScalarExpr(E->getWeak());
2919 break;
2920
2921 case AtomicExpr::AO__c11_atomic_fetch_add:
2922 case AtomicExpr::AO__c11_atomic_fetch_sub:
2923 if (MemTy->isPointerType()) {
2924 // For pointer arithmetic, we're required to do a bit of math:
2925 // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
2926 // ... but only for the C11 builtins. The GNU builtins expect the
2927 // user to multiply by sizeof(T).
2928 QualType Val1Ty = E->getVal1()->getType();
2929 llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
2930 CharUnits PointeeIncAmt =
2931 getContext().getTypeSizeInChars(MemTy->getPointeeType());
2932 Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
2933 Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
2934 EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
2935 break;
2936 }
2937 // Fall through.
2938 case AtomicExpr::AO__atomic_fetch_add:
2939 case AtomicExpr::AO__atomic_fetch_sub:
2940 case AtomicExpr::AO__atomic_add_fetch:
2941 case AtomicExpr::AO__atomic_sub_fetch:
2942 case AtomicExpr::AO__c11_atomic_store:
2943 case AtomicExpr::AO__c11_atomic_exchange:
2944 case AtomicExpr::AO__atomic_store_n:
2945 case AtomicExpr::AO__atomic_exchange_n:
2946 case AtomicExpr::AO__c11_atomic_fetch_and:
2947 case AtomicExpr::AO__c11_atomic_fetch_or:
2948 case AtomicExpr::AO__c11_atomic_fetch_xor:
2949 case AtomicExpr::AO__atomic_fetch_and:
2950 case AtomicExpr::AO__atomic_fetch_or:
2951 case AtomicExpr::AO__atomic_fetch_xor:
2952 case AtomicExpr::AO__atomic_fetch_nand:
2953 case AtomicExpr::AO__atomic_and_fetch:
2954 case AtomicExpr::AO__atomic_or_fetch:
2955 case AtomicExpr::AO__atomic_xor_fetch:
2956 case AtomicExpr::AO__atomic_nand_fetch:
2957 Val1 = EmitValToTemp(*this, E->getVal1());
2958 break;
2959 }
2960
2961 if (!E->getType()->isVoidType() && !Dest)
2962 Dest = CreateMemTemp(E->getType(), ".atomicdst");
2963
2964 // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
2965 if (UseLibcall) {
2966
2967 llvm::SmallVector<QualType, 5> Params;
2968 CallArgList Args;
2969 // Size is always the first parameter
2970 Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
2971 getContext().getSizeType());
2972 // Atomic address is always the second parameter
2973 Args.add(RValue::get(EmitCastToVoidPtr(Ptr)),
2974 getContext().VoidPtrTy);
2975
2976 const char* LibCallName;
2977 QualType RetTy = getContext().VoidTy;
2978 switch (E->getOp()) {
2979 // There is only one libcall for compare an exchange, because there is no
2980 // optimisation benefit possible from a libcall version of a weak compare
2981 // and exchange.
2982 // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
2983 // void *desired, int success, int failure)
2984 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
2985 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
2986 case AtomicExpr::AO__atomic_compare_exchange:
2987 case AtomicExpr::AO__atomic_compare_exchange_n:
2988 LibCallName = "__atomic_compare_exchange";
2989 RetTy = getContext().BoolTy;
2990 Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
2991 getContext().VoidPtrTy);
2992 Args.add(RValue::get(EmitCastToVoidPtr(Val2)),
2993 getContext().VoidPtrTy);
2994 Args.add(RValue::get(Order),
2995 getContext().IntTy);
2996 Order = OrderFail;
2997 break;
2998 // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
2999 // int order)
3000 case AtomicExpr::AO__c11_atomic_exchange:
3001 case AtomicExpr::AO__atomic_exchange_n:
3002 case AtomicExpr::AO__atomic_exchange:
3003 LibCallName = "__atomic_exchange";
3004 Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
3005 getContext().VoidPtrTy);
3006 Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
3007 getContext().VoidPtrTy);
3008 break;
3009 // void __atomic_store(size_t size, void *mem, void *val, int order)
3010 case AtomicExpr::AO__c11_atomic_store:
3011 case AtomicExpr::AO__atomic_store:
3012 case AtomicExpr::AO__atomic_store_n:
3013 LibCallName = "__atomic_store";
3014 Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
3015 getContext().VoidPtrTy);
3016 break;
3017 // void __atomic_load(size_t size, void *mem, void *return, int order)
3018 case AtomicExpr::AO__c11_atomic_load:
3019 case AtomicExpr::AO__atomic_load:
3020 case AtomicExpr::AO__atomic_load_n:
3021 LibCallName = "__atomic_load";
3022 Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
3023 getContext().VoidPtrTy);
3024 break;
3025 #if 0
3026 // These are only defined for 1-16 byte integers. It is not clear what
3027 // their semantics would be on anything else...
3028 case AtomicExpr::Add: LibCallName = "__atomic_fetch_add_generic"; break;
3029 case AtomicExpr::Sub: LibCallName = "__atomic_fetch_sub_generic"; break;
3030 case AtomicExpr::And: LibCallName = "__atomic_fetch_and_generic"; break;
3031 case AtomicExpr::Or: LibCallName = "__atomic_fetch_or_generic"; break;
3032 case AtomicExpr::Xor: LibCallName = "__atomic_fetch_xor_generic"; break;
3033 #endif
3034 default: return EmitUnsupportedRValue(E, "atomic library call");
3035 }
3036 // order is always the last parameter
3037 Args.add(RValue::get(Order),
3038 getContext().IntTy);
3039
3040 const CGFunctionInfo &FuncInfo =
3041 CGM.getTypes().arrangeFunctionCall(RetTy, Args,
3042 FunctionType::ExtInfo(), RequiredArgs::All);
3043 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
3044 llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
3045 RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
3046 if (E->isCmpXChg())
3047 return Res;
3048 if (E->getType()->isVoidType())
3049 return RValue::get(0);
3050 return ConvertTempToRValue(*this, E->getType(), Dest);
3051 }
3052
3053 llvm::Type *IPtrTy =
3054 llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo();
3055 llvm::Value *OrigDest = Dest;
3056 Ptr = Builder.CreateBitCast(Ptr, IPtrTy);
3057 if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy);
3058 if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy);
3059 if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy);
3060
3061 if (isa<llvm::ConstantInt>(Order)) {
3062 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
3063 switch (ord) {
3064 case 0: // memory_order_relaxed
3065 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3066 llvm::Monotonic);
3067 break;
3068 case 1: // memory_order_consume
3069 case 2: // memory_order_acquire
3070 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3071 llvm::Acquire);
3072 break;
3073 case 3: // memory_order_release
3074 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3075 llvm::Release);
3076 break;
3077 case 4: // memory_order_acq_rel
3078 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3079 llvm::AcquireRelease);
3080 break;
3081 case 5: // memory_order_seq_cst
3082 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3083 llvm::SequentiallyConsistent);
3084 break;
3085 default: // invalid order
3086 // We should not ever get here normally, but it's hard to
3087 // enforce that in general.
3088 break;
3089 }
3090 if (E->getType()->isVoidType())
3091 return RValue::get(0);
3092 return ConvertTempToRValue(*this, E->getType(), OrigDest);
3093 }
3094
3095 // Long case, when Order isn't obviously constant.
3096
3097 bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
3098 E->getOp() == AtomicExpr::AO__atomic_store ||
3099 E->getOp() == AtomicExpr::AO__atomic_store_n;
3100 bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
3101 E->getOp() == AtomicExpr::AO__atomic_load ||
3102 E->getOp() == AtomicExpr::AO__atomic_load_n;
3103
3104 // Create all the relevant BB's
3105 llvm::BasicBlock *MonotonicBB = 0, *AcquireBB = 0, *ReleaseBB = 0,
3106 *AcqRelBB = 0, *SeqCstBB = 0;
3107 MonotonicBB = createBasicBlock("monotonic", CurFn);
3108 if (!IsStore)
3109 AcquireBB = createBasicBlock("acquire", CurFn);
3110 if (!IsLoad)
3111 ReleaseBB = createBasicBlock("release", CurFn);
3112 if (!IsLoad && !IsStore)
3113 AcqRelBB = createBasicBlock("acqrel", CurFn);
3114 SeqCstBB = createBasicBlock("seqcst", CurFn);
3115 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
3116
3117 // Create the switch for the split
3118 // MonotonicBB is arbitrarily chosen as the default case; in practice, this
3119 // doesn't matter unless someone is crazy enough to use something that
3120 // doesn't fold to a constant for the ordering.
3121 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
3122 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
3123
3124 // Emit all the different atomics
3125 Builder.SetInsertPoint(MonotonicBB);
3126 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3127 llvm::Monotonic);
3128 Builder.CreateBr(ContBB);
3129 if (!IsStore) {
3130 Builder.SetInsertPoint(AcquireBB);
3131 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3132 llvm::Acquire);
3133 Builder.CreateBr(ContBB);
3134 SI->addCase(Builder.getInt32(1), AcquireBB);
3135 SI->addCase(Builder.getInt32(2), AcquireBB);
3136 }
3137 if (!IsLoad) {
3138 Builder.SetInsertPoint(ReleaseBB);
3139 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3140 llvm::Release);
3141 Builder.CreateBr(ContBB);
3142 SI->addCase(Builder.getInt32(3), ReleaseBB);
3143 }
3144 if (!IsLoad && !IsStore) {
3145 Builder.SetInsertPoint(AcqRelBB);
3146 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3147 llvm::AcquireRelease);
3148 Builder.CreateBr(ContBB);
3149 SI->addCase(Builder.getInt32(4), AcqRelBB);
3150 }
3151 Builder.SetInsertPoint(SeqCstBB);
3152 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3153 llvm::SequentiallyConsistent);
3154 Builder.CreateBr(ContBB);
3155 SI->addCase(Builder.getInt32(5), SeqCstBB);
3156
3157 // Cleanup and return
3158 Builder.SetInsertPoint(ContBB);
3159 if (E->getType()->isVoidType())
3160 return RValue::get(0);
3161 return ConvertTempToRValue(*this, E->getType(), OrigDest);
3162 }
3163
SetFPAccuracy(llvm::Value * Val,float Accuracy)3164 void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) {
3165 assert(Val->getType()->isFPOrFPVectorTy());
3166 if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val))
3167 return;
3168
3169 llvm::MDBuilder MDHelper(getLLVMContext());
3170 llvm::MDNode *Node = MDHelper.createFPMath(Accuracy);
3171
3172 cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpmath, Node);
3173 }
3174
3175 namespace {
3176 struct LValueOrRValue {
3177 LValue LV;
3178 RValue RV;
3179 };
3180 }
3181
emitPseudoObjectExpr(CodeGenFunction & CGF,const PseudoObjectExpr * E,bool forLValue,AggValueSlot slot)3182 static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF,
3183 const PseudoObjectExpr *E,
3184 bool forLValue,
3185 AggValueSlot slot) {
3186 llvm::SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques;
3187
3188 // Find the result expression, if any.
3189 const Expr *resultExpr = E->getResultExpr();
3190 LValueOrRValue result;
3191
3192 for (PseudoObjectExpr::const_semantics_iterator
3193 i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
3194 const Expr *semantic = *i;
3195
3196 // If this semantic expression is an opaque value, bind it
3197 // to the result of its source expression.
3198 if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
3199
3200 // If this is the result expression, we may need to evaluate
3201 // directly into the slot.
3202 typedef CodeGenFunction::OpaqueValueMappingData OVMA;
3203 OVMA opaqueData;
3204 if (ov == resultExpr && ov->isRValue() && !forLValue &&
3205 CodeGenFunction::hasAggregateLLVMType(ov->getType()) &&
3206 !ov->getType()->isAnyComplexType()) {
3207 CGF.EmitAggExpr(ov->getSourceExpr(), slot);
3208
3209 LValue LV = CGF.MakeAddrLValue(slot.getAddr(), ov->getType());
3210 opaqueData = OVMA::bind(CGF, ov, LV);
3211 result.RV = slot.asRValue();
3212
3213 // Otherwise, emit as normal.
3214 } else {
3215 opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
3216
3217 // If this is the result, also evaluate the result now.
3218 if (ov == resultExpr) {
3219 if (forLValue)
3220 result.LV = CGF.EmitLValue(ov);
3221 else
3222 result.RV = CGF.EmitAnyExpr(ov, slot);
3223 }
3224 }
3225
3226 opaques.push_back(opaqueData);
3227
3228 // Otherwise, if the expression is the result, evaluate it
3229 // and remember the result.
3230 } else if (semantic == resultExpr) {
3231 if (forLValue)
3232 result.LV = CGF.EmitLValue(semantic);
3233 else
3234 result.RV = CGF.EmitAnyExpr(semantic, slot);
3235
3236 // Otherwise, evaluate the expression in an ignored context.
3237 } else {
3238 CGF.EmitIgnoredExpr(semantic);
3239 }
3240 }
3241
3242 // Unbind all the opaques now.
3243 for (unsigned i = 0, e = opaques.size(); i != e; ++i)
3244 opaques[i].unbind(CGF);
3245
3246 return result;
3247 }
3248
EmitPseudoObjectRValue(const PseudoObjectExpr * E,AggValueSlot slot)3249 RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E,
3250 AggValueSlot slot) {
3251 return emitPseudoObjectExpr(*this, E, false, slot).RV;
3252 }
3253
EmitPseudoObjectLValue(const PseudoObjectExpr * E)3254 LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) {
3255 return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV;
3256 }
3257