• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the code for emitting atomic operations.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CodeGenFunction.h"
15 #include "CGCall.h"
16 #include "CGRecordLayout.h"
17 #include "CodeGenModule.h"
18 #include "clang/AST/ASTContext.h"
19 #include "clang/CodeGen/CGFunctionInfo.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/IR/DataLayout.h"
22 #include "llvm/IR/Intrinsics.h"
23 #include "llvm/IR/Operator.h"
24 
25 using namespace clang;
26 using namespace CodeGen;
27 
28 namespace {
29   class AtomicInfo {
30     CodeGenFunction &CGF;
31     QualType AtomicTy;
32     QualType ValueTy;
33     uint64_t AtomicSizeInBits;
34     uint64_t ValueSizeInBits;
35     CharUnits AtomicAlign;
36     CharUnits ValueAlign;
37     CharUnits LValueAlign;
38     TypeEvaluationKind EvaluationKind;
39     bool UseLibcall;
40     LValue LVal;
41     CGBitFieldInfo BFI;
42   public:
AtomicInfo(CodeGenFunction & CGF,LValue & lvalue)43     AtomicInfo(CodeGenFunction &CGF, LValue &lvalue)
44         : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
45           EvaluationKind(TEK_Scalar), UseLibcall(true) {
46       assert(!lvalue.isGlobalReg());
47       ASTContext &C = CGF.getContext();
48       if (lvalue.isSimple()) {
49         AtomicTy = lvalue.getType();
50         if (auto *ATy = AtomicTy->getAs<AtomicType>())
51           ValueTy = ATy->getValueType();
52         else
53           ValueTy = AtomicTy;
54         EvaluationKind = CGF.getEvaluationKind(ValueTy);
55 
56         uint64_t ValueAlignInBits;
57         uint64_t AtomicAlignInBits;
58         TypeInfo ValueTI = C.getTypeInfo(ValueTy);
59         ValueSizeInBits = ValueTI.Width;
60         ValueAlignInBits = ValueTI.Align;
61 
62         TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
63         AtomicSizeInBits = AtomicTI.Width;
64         AtomicAlignInBits = AtomicTI.Align;
65 
66         assert(ValueSizeInBits <= AtomicSizeInBits);
67         assert(ValueAlignInBits <= AtomicAlignInBits);
68 
69         AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
70         ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
71         if (lvalue.getAlignment().isZero())
72           lvalue.setAlignment(AtomicAlign);
73 
74         LVal = lvalue;
75       } else if (lvalue.isBitField()) {
76         ValueTy = lvalue.getType();
77         ValueSizeInBits = C.getTypeSize(ValueTy);
78         auto &OrigBFI = lvalue.getBitFieldInfo();
79         auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment());
80         AtomicSizeInBits = C.toBits(
81             C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
82                 .alignTo(lvalue.getAlignment()));
83         auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldPointer());
84         auto OffsetInChars =
85             (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
86             lvalue.getAlignment();
87         VoidPtrAddr = CGF.Builder.CreateConstGEP1_64(
88             VoidPtrAddr, OffsetInChars.getQuantity());
89         auto Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
90             VoidPtrAddr,
91             CGF.Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(),
92             "atomic_bitfield_base");
93         BFI = OrigBFI;
94         BFI.Offset = Offset;
95         BFI.StorageSize = AtomicSizeInBits;
96         BFI.StorageOffset += OffsetInChars;
97         LVal = LValue::MakeBitfield(Address(Addr, lvalue.getAlignment()),
98                                     BFI, lvalue.getType(),
99                                     lvalue.getAlignmentSource());
100         LVal.setTBAAInfo(lvalue.getTBAAInfo());
101         AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
102         if (AtomicTy.isNull()) {
103           llvm::APInt Size(
104               /*numBits=*/32,
105               C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
106           AtomicTy = C.getConstantArrayType(C.CharTy, Size, ArrayType::Normal,
107                                             /*IndexTypeQuals=*/0);
108         }
109         AtomicAlign = ValueAlign = lvalue.getAlignment();
110       } else if (lvalue.isVectorElt()) {
111         ValueTy = lvalue.getType()->getAs<VectorType>()->getElementType();
112         ValueSizeInBits = C.getTypeSize(ValueTy);
113         AtomicTy = lvalue.getType();
114         AtomicSizeInBits = C.getTypeSize(AtomicTy);
115         AtomicAlign = ValueAlign = lvalue.getAlignment();
116         LVal = lvalue;
117       } else {
118         assert(lvalue.isExtVectorElt());
119         ValueTy = lvalue.getType();
120         ValueSizeInBits = C.getTypeSize(ValueTy);
121         AtomicTy = ValueTy = CGF.getContext().getExtVectorType(
122             lvalue.getType(), lvalue.getExtVectorAddress()
123                                   .getElementType()->getVectorNumElements());
124         AtomicSizeInBits = C.getTypeSize(AtomicTy);
125         AtomicAlign = ValueAlign = lvalue.getAlignment();
126         LVal = lvalue;
127       }
128       UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
129           AtomicSizeInBits, C.toBits(lvalue.getAlignment()));
130     }
131 
getAtomicType() const132     QualType getAtomicType() const { return AtomicTy; }
getValueType() const133     QualType getValueType() const { return ValueTy; }
getAtomicAlignment() const134     CharUnits getAtomicAlignment() const { return AtomicAlign; }
getValueAlignment() const135     CharUnits getValueAlignment() const { return ValueAlign; }
getAtomicSizeInBits() const136     uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
getValueSizeInBits() const137     uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
getEvaluationKind() const138     TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
shouldUseLibcall() const139     bool shouldUseLibcall() const { return UseLibcall; }
getAtomicLValue() const140     const LValue &getAtomicLValue() const { return LVal; }
getAtomicPointer() const141     llvm::Value *getAtomicPointer() const {
142       if (LVal.isSimple())
143         return LVal.getPointer();
144       else if (LVal.isBitField())
145         return LVal.getBitFieldPointer();
146       else if (LVal.isVectorElt())
147         return LVal.getVectorPointer();
148       assert(LVal.isExtVectorElt());
149       return LVal.getExtVectorPointer();
150     }
getAtomicAddress() const151     Address getAtomicAddress() const {
152       return Address(getAtomicPointer(), getAtomicAlignment());
153     }
154 
getAtomicAddressAsAtomicIntPointer() const155     Address getAtomicAddressAsAtomicIntPointer() const {
156       return emitCastToAtomicIntPointer(getAtomicAddress());
157     }
158 
159     /// Is the atomic size larger than the underlying value type?
160     ///
161     /// Note that the absence of padding does not mean that atomic
162     /// objects are completely interchangeable with non-atomic
163     /// objects: we might have promoted the alignment of a type
164     /// without making it bigger.
hasPadding() const165     bool hasPadding() const {
166       return (ValueSizeInBits != AtomicSizeInBits);
167     }
168 
169     bool emitMemSetZeroIfNecessary() const;
170 
getAtomicSizeValue() const171     llvm::Value *getAtomicSizeValue() const {
172       CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
173       return CGF.CGM.getSize(size);
174     }
175 
176     /// Cast the given pointer to an integer pointer suitable for atomic
177     /// operations if the source.
178     Address emitCastToAtomicIntPointer(Address Addr) const;
179 
180     /// If Addr is compatible with the iN that will be used for an atomic
181     /// operation, bitcast it. Otherwise, create a temporary that is suitable
182     /// and copy the value across.
183     Address convertToAtomicIntPointer(Address Addr) const;
184 
185     /// Turn an atomic-layout object into an r-value.
186     RValue convertAtomicTempToRValue(Address addr, AggValueSlot resultSlot,
187                                      SourceLocation loc, bool AsValue) const;
188 
189     /// \brief Converts a rvalue to integer value.
190     llvm::Value *convertRValueToInt(RValue RVal) const;
191 
192     RValue ConvertIntToValueOrAtomic(llvm::Value *IntVal,
193                                      AggValueSlot ResultSlot,
194                                      SourceLocation Loc, bool AsValue) const;
195 
196     /// Copy an atomic r-value into atomic-layout memory.
197     void emitCopyIntoMemory(RValue rvalue) const;
198 
199     /// Project an l-value down to the value field.
projectValue() const200     LValue projectValue() const {
201       assert(LVal.isSimple());
202       Address addr = getAtomicAddress();
203       if (hasPadding())
204         addr = CGF.Builder.CreateStructGEP(addr, 0, CharUnits());
205 
206       return LValue::MakeAddr(addr, getValueType(), CGF.getContext(),
207                               LVal.getAlignmentSource(), LVal.getTBAAInfo());
208     }
209 
210     /// \brief Emits atomic load.
211     /// \returns Loaded value.
212     RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
213                           bool AsValue, llvm::AtomicOrdering AO,
214                           bool IsVolatile);
215 
216     /// \brief Emits atomic compare-and-exchange sequence.
217     /// \param Expected Expected value.
218     /// \param Desired Desired value.
219     /// \param Success Atomic ordering for success operation.
220     /// \param Failure Atomic ordering for failed operation.
221     /// \param IsWeak true if atomic operation is weak, false otherwise.
222     /// \returns Pair of values: previous value from storage (value type) and
223     /// boolean flag (i1 type) with true if success and false otherwise.
224     std::pair<RValue, llvm::Value *>
225     EmitAtomicCompareExchange(RValue Expected, RValue Desired,
226                               llvm::AtomicOrdering Success =
227                                   llvm::AtomicOrdering::SequentiallyConsistent,
228                               llvm::AtomicOrdering Failure =
229                                   llvm::AtomicOrdering::SequentiallyConsistent,
230                               bool IsWeak = false);
231 
232     /// \brief Emits atomic update.
233     /// \param AO Atomic ordering.
234     /// \param UpdateOp Update operation for the current lvalue.
235     void EmitAtomicUpdate(llvm::AtomicOrdering AO,
236                           const llvm::function_ref<RValue(RValue)> &UpdateOp,
237                           bool IsVolatile);
238     /// \brief Emits atomic update.
239     /// \param AO Atomic ordering.
240     void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
241                           bool IsVolatile);
242 
243     /// Materialize an atomic r-value in atomic-layout memory.
244     Address materializeRValue(RValue rvalue) const;
245 
246     /// \brief Creates temp alloca for intermediate operations on atomic value.
247     Address CreateTempAlloca() const;
248   private:
249     bool requiresMemSetZero(llvm::Type *type) const;
250 
251 
252     /// \brief Emits atomic load as a libcall.
253     void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
254                                llvm::AtomicOrdering AO, bool IsVolatile);
255     /// \brief Emits atomic load as LLVM instruction.
256     llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile);
257     /// \brief Emits atomic compare-and-exchange op as a libcall.
258     llvm::Value *EmitAtomicCompareExchangeLibcall(
259         llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
260         llvm::AtomicOrdering Success =
261             llvm::AtomicOrdering::SequentiallyConsistent,
262         llvm::AtomicOrdering Failure =
263             llvm::AtomicOrdering::SequentiallyConsistent);
264     /// \brief Emits atomic compare-and-exchange op as LLVM instruction.
265     std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
266         llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
267         llvm::AtomicOrdering Success =
268             llvm::AtomicOrdering::SequentiallyConsistent,
269         llvm::AtomicOrdering Failure =
270             llvm::AtomicOrdering::SequentiallyConsistent,
271         bool IsWeak = false);
272     /// \brief Emit atomic update as libcalls.
273     void
274     EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
275                             const llvm::function_ref<RValue(RValue)> &UpdateOp,
276                             bool IsVolatile);
277     /// \brief Emit atomic update as LLVM instructions.
278     void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
279                             const llvm::function_ref<RValue(RValue)> &UpdateOp,
280                             bool IsVolatile);
281     /// \brief Emit atomic update as libcalls.
282     void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal,
283                                  bool IsVolatile);
284     /// \brief Emit atomic update as LLVM instructions.
285     void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal,
286                             bool IsVolatile);
287   };
288 }
289 
CreateTempAlloca() const290 Address AtomicInfo::CreateTempAlloca() const {
291   Address TempAlloca = CGF.CreateMemTemp(
292       (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
293                                                                 : AtomicTy,
294       getAtomicAlignment(),
295       "atomic-temp");
296   // Cast to pointer to value type for bitfields.
297   if (LVal.isBitField())
298     return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
299         TempAlloca, getAtomicAddress().getType());
300   return TempAlloca;
301 }
302 
emitAtomicLibcall(CodeGenFunction & CGF,StringRef fnName,QualType resultType,CallArgList & args)303 static RValue emitAtomicLibcall(CodeGenFunction &CGF,
304                                 StringRef fnName,
305                                 QualType resultType,
306                                 CallArgList &args) {
307   const CGFunctionInfo &fnInfo =
308     CGF.CGM.getTypes().arrangeBuiltinFunctionCall(resultType, args);
309   llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
310   llvm::Constant *fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
311   return CGF.EmitCall(fnInfo, fn, ReturnValueSlot(), args);
312 }
313 
314 /// Does a store of the given IR type modify the full expected width?
isFullSizeType(CodeGenModule & CGM,llvm::Type * type,uint64_t expectedSize)315 static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
316                            uint64_t expectedSize) {
317   return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
318 }
319 
320 /// Does the atomic type require memsetting to zero before initialization?
321 ///
322 /// The IR type is provided as a way of making certain queries faster.
requiresMemSetZero(llvm::Type * type) const323 bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
324   // If the atomic type has size padding, we definitely need a memset.
325   if (hasPadding()) return true;
326 
327   // Otherwise, do some simple heuristics to try to avoid it:
328   switch (getEvaluationKind()) {
329   // For scalars and complexes, check whether the store size of the
330   // type uses the full size.
331   case TEK_Scalar:
332     return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
333   case TEK_Complex:
334     return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
335                            AtomicSizeInBits / 2);
336 
337   // Padding in structs has an undefined bit pattern.  User beware.
338   case TEK_Aggregate:
339     return false;
340   }
341   llvm_unreachable("bad evaluation kind");
342 }
343 
emitMemSetZeroIfNecessary() const344 bool AtomicInfo::emitMemSetZeroIfNecessary() const {
345   assert(LVal.isSimple());
346   llvm::Value *addr = LVal.getPointer();
347   if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
348     return false;
349 
350   CGF.Builder.CreateMemSet(
351       addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
352       CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
353       LVal.getAlignment().getQuantity());
354   return true;
355 }
356 
emitAtomicCmpXchg(CodeGenFunction & CGF,AtomicExpr * E,bool IsWeak,Address Dest,Address Ptr,Address Val1,Address Val2,uint64_t Size,llvm::AtomicOrdering SuccessOrder,llvm::AtomicOrdering FailureOrder)357 static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
358                               Address Dest, Address Ptr,
359                               Address Val1, Address Val2,
360                               uint64_t Size,
361                               llvm::AtomicOrdering SuccessOrder,
362                               llvm::AtomicOrdering FailureOrder) {
363   // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
364   llvm::Value *Expected = CGF.Builder.CreateLoad(Val1);
365   llvm::Value *Desired = CGF.Builder.CreateLoad(Val2);
366 
367   llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
368       Ptr.getPointer(), Expected, Desired, SuccessOrder, FailureOrder);
369   Pair->setVolatile(E->isVolatile());
370   Pair->setWeak(IsWeak);
371 
372   // Cmp holds the result of the compare-exchange operation: true on success,
373   // false on failure.
374   llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
375   llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
376 
377   // This basic block is used to hold the store instruction if the operation
378   // failed.
379   llvm::BasicBlock *StoreExpectedBB =
380       CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
381 
382   // This basic block is the exit point of the operation, we should end up
383   // here regardless of whether or not the operation succeeded.
384   llvm::BasicBlock *ContinueBB =
385       CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
386 
387   // Update Expected if Expected isn't equal to Old, otherwise branch to the
388   // exit point.
389   CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
390 
391   CGF.Builder.SetInsertPoint(StoreExpectedBB);
392   // Update the memory at Expected with Old's value.
393   CGF.Builder.CreateStore(Old, Val1);
394   // Finally, branch to the exit point.
395   CGF.Builder.CreateBr(ContinueBB);
396 
397   CGF.Builder.SetInsertPoint(ContinueBB);
398   // Update the memory at Dest with Cmp's value.
399   CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
400 }
401 
402 /// Given an ordering required on success, emit all possible cmpxchg
403 /// instructions to cope with the provided (but possibly only dynamically known)
404 /// FailureOrder.
emitAtomicCmpXchgFailureSet(CodeGenFunction & CGF,AtomicExpr * E,bool IsWeak,Address Dest,Address Ptr,Address Val1,Address Val2,llvm::Value * FailureOrderVal,uint64_t Size,llvm::AtomicOrdering SuccessOrder)405 static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
406                                         bool IsWeak, Address Dest, Address Ptr,
407                                         Address Val1, Address Val2,
408                                         llvm::Value *FailureOrderVal,
409                                         uint64_t Size,
410                                         llvm::AtomicOrdering SuccessOrder) {
411   llvm::AtomicOrdering FailureOrder;
412   if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
413     auto FOS = FO->getSExtValue();
414     if (!llvm::isValidAtomicOrderingCABI(FOS))
415       FailureOrder = llvm::AtomicOrdering::Monotonic;
416     else
417       switch ((llvm::AtomicOrderingCABI)FOS) {
418       case llvm::AtomicOrderingCABI::relaxed:
419       case llvm::AtomicOrderingCABI::release:
420       case llvm::AtomicOrderingCABI::acq_rel:
421         FailureOrder = llvm::AtomicOrdering::Monotonic;
422         break;
423       case llvm::AtomicOrderingCABI::consume:
424       case llvm::AtomicOrderingCABI::acquire:
425         FailureOrder = llvm::AtomicOrdering::Acquire;
426         break;
427       case llvm::AtomicOrderingCABI::seq_cst:
428         FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;
429         break;
430       }
431     if (isStrongerThan(FailureOrder, SuccessOrder)) {
432       // Don't assert on undefined behavior "failure argument shall be no
433       // stronger than the success argument".
434       FailureOrder =
435           llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
436     }
437     emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
438                       FailureOrder);
439     return;
440   }
441 
442   // Create all the relevant BB's
443   llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
444                    *SeqCstBB = nullptr;
445   MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
446   if (SuccessOrder != llvm::AtomicOrdering::Monotonic &&
447       SuccessOrder != llvm::AtomicOrdering::Release)
448     AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
449   if (SuccessOrder == llvm::AtomicOrdering::SequentiallyConsistent)
450     SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
451 
452   llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
453 
454   llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
455 
456   // Emit all the different atomics
457 
458   // MonotonicBB is arbitrarily chosen as the default case; in practice, this
459   // doesn't matter unless someone is crazy enough to use something that
460   // doesn't fold to a constant for the ordering.
461   CGF.Builder.SetInsertPoint(MonotonicBB);
462   emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
463                     Size, SuccessOrder, llvm::AtomicOrdering::Monotonic);
464   CGF.Builder.CreateBr(ContBB);
465 
466   if (AcquireBB) {
467     CGF.Builder.SetInsertPoint(AcquireBB);
468     emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
469                       Size, SuccessOrder, llvm::AtomicOrdering::Acquire);
470     CGF.Builder.CreateBr(ContBB);
471     SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
472                 AcquireBB);
473     SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
474                 AcquireBB);
475   }
476   if (SeqCstBB) {
477     CGF.Builder.SetInsertPoint(SeqCstBB);
478     emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
479                       llvm::AtomicOrdering::SequentiallyConsistent);
480     CGF.Builder.CreateBr(ContBB);
481     SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
482                 SeqCstBB);
483   }
484 
485   CGF.Builder.SetInsertPoint(ContBB);
486 }
487 
EmitAtomicOp(CodeGenFunction & CGF,AtomicExpr * E,Address Dest,Address Ptr,Address Val1,Address Val2,llvm::Value * IsWeak,llvm::Value * FailureOrder,uint64_t Size,llvm::AtomicOrdering Order)488 static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
489                          Address Ptr, Address Val1, Address Val2,
490                          llvm::Value *IsWeak, llvm::Value *FailureOrder,
491                          uint64_t Size, llvm::AtomicOrdering Order) {
492   llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
493   llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
494 
495   switch (E->getOp()) {
496   case AtomicExpr::AO__c11_atomic_init:
497     llvm_unreachable("Already handled!");
498 
499   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
500     emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
501                                 FailureOrder, Size, Order);
502     return;
503   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
504     emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
505                                 FailureOrder, Size, Order);
506     return;
507   case AtomicExpr::AO__atomic_compare_exchange:
508   case AtomicExpr::AO__atomic_compare_exchange_n: {
509     if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
510       emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
511                                   Val1, Val2, FailureOrder, Size, Order);
512     } else {
513       // Create all the relevant BB's
514       llvm::BasicBlock *StrongBB =
515           CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
516       llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
517       llvm::BasicBlock *ContBB =
518           CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
519 
520       llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
521       SI->addCase(CGF.Builder.getInt1(false), StrongBB);
522 
523       CGF.Builder.SetInsertPoint(StrongBB);
524       emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
525                                   FailureOrder, Size, Order);
526       CGF.Builder.CreateBr(ContBB);
527 
528       CGF.Builder.SetInsertPoint(WeakBB);
529       emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
530                                   FailureOrder, Size, Order);
531       CGF.Builder.CreateBr(ContBB);
532 
533       CGF.Builder.SetInsertPoint(ContBB);
534     }
535     return;
536   }
537   case AtomicExpr::AO__c11_atomic_load:
538   case AtomicExpr::AO__atomic_load_n:
539   case AtomicExpr::AO__atomic_load: {
540     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
541     Load->setAtomic(Order);
542     Load->setVolatile(E->isVolatile());
543     CGF.Builder.CreateStore(Load, Dest);
544     return;
545   }
546 
547   case AtomicExpr::AO__c11_atomic_store:
548   case AtomicExpr::AO__atomic_store:
549   case AtomicExpr::AO__atomic_store_n: {
550     llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
551     llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
552     Store->setAtomic(Order);
553     Store->setVolatile(E->isVolatile());
554     return;
555   }
556 
557   case AtomicExpr::AO__c11_atomic_exchange:
558   case AtomicExpr::AO__atomic_exchange_n:
559   case AtomicExpr::AO__atomic_exchange:
560     Op = llvm::AtomicRMWInst::Xchg;
561     break;
562 
563   case AtomicExpr::AO__atomic_add_fetch:
564     PostOp = llvm::Instruction::Add;
565     // Fall through.
566   case AtomicExpr::AO__c11_atomic_fetch_add:
567   case AtomicExpr::AO__atomic_fetch_add:
568     Op = llvm::AtomicRMWInst::Add;
569     break;
570 
571   case AtomicExpr::AO__atomic_sub_fetch:
572     PostOp = llvm::Instruction::Sub;
573     // Fall through.
574   case AtomicExpr::AO__c11_atomic_fetch_sub:
575   case AtomicExpr::AO__atomic_fetch_sub:
576     Op = llvm::AtomicRMWInst::Sub;
577     break;
578 
579   case AtomicExpr::AO__atomic_and_fetch:
580     PostOp = llvm::Instruction::And;
581     // Fall through.
582   case AtomicExpr::AO__c11_atomic_fetch_and:
583   case AtomicExpr::AO__atomic_fetch_and:
584     Op = llvm::AtomicRMWInst::And;
585     break;
586 
587   case AtomicExpr::AO__atomic_or_fetch:
588     PostOp = llvm::Instruction::Or;
589     // Fall through.
590   case AtomicExpr::AO__c11_atomic_fetch_or:
591   case AtomicExpr::AO__atomic_fetch_or:
592     Op = llvm::AtomicRMWInst::Or;
593     break;
594 
595   case AtomicExpr::AO__atomic_xor_fetch:
596     PostOp = llvm::Instruction::Xor;
597     // Fall through.
598   case AtomicExpr::AO__c11_atomic_fetch_xor:
599   case AtomicExpr::AO__atomic_fetch_xor:
600     Op = llvm::AtomicRMWInst::Xor;
601     break;
602 
603   case AtomicExpr::AO__atomic_nand_fetch:
604     PostOp = llvm::Instruction::And; // the NOT is special cased below
605   // Fall through.
606   case AtomicExpr::AO__atomic_fetch_nand:
607     Op = llvm::AtomicRMWInst::Nand;
608     break;
609   }
610 
611   llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
612   llvm::AtomicRMWInst *RMWI =
613       CGF.Builder.CreateAtomicRMW(Op, Ptr.getPointer(), LoadVal1, Order);
614   RMWI->setVolatile(E->isVolatile());
615 
616   // For __atomic_*_fetch operations, perform the operation again to
617   // determine the value which was written.
618   llvm::Value *Result = RMWI;
619   if (PostOp)
620     Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
621   if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
622     Result = CGF.Builder.CreateNot(Result);
623   CGF.Builder.CreateStore(Result, Dest);
624 }
625 
626 // This function emits any expression (scalar, complex, or aggregate)
627 // into a temporary alloca.
628 static Address
EmitValToTemp(CodeGenFunction & CGF,Expr * E)629 EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
630   Address DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
631   CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
632                        /*Init*/ true);
633   return DeclPtr;
634 }
635 
636 static void
AddDirectArgument(CodeGenFunction & CGF,CallArgList & Args,bool UseOptimizedLibcall,llvm::Value * Val,QualType ValTy,SourceLocation Loc,CharUnits SizeInChars)637 AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
638                   bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
639                   SourceLocation Loc, CharUnits SizeInChars) {
640   if (UseOptimizedLibcall) {
641     // Load value and pass it to the function directly.
642     CharUnits Align = CGF.getContext().getTypeAlignInChars(ValTy);
643     int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
644     ValTy =
645         CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
646     llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(),
647                                                 SizeInBits)->getPointerTo();
648     Address Ptr = Address(CGF.Builder.CreateBitCast(Val, IPtrTy), Align);
649     Val = CGF.EmitLoadOfScalar(Ptr, false,
650                                CGF.getContext().getPointerType(ValTy),
651                                Loc);
652     // Coerce the value into an appropriately sized integer type.
653     Args.add(RValue::get(Val), ValTy);
654   } else {
655     // Non-optimized functions always take a reference.
656     Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
657                          CGF.getContext().VoidPtrTy);
658   }
659 }
660 
EmitAtomicExpr(AtomicExpr * E)661 RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
662   QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
663   QualType MemTy = AtomicTy;
664   if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
665     MemTy = AT->getValueType();
666   CharUnits sizeChars, alignChars;
667   std::tie(sizeChars, alignChars) = getContext().getTypeInfoInChars(AtomicTy);
668   uint64_t Size = sizeChars.getQuantity();
669   unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth();
670   bool UseLibcall = (sizeChars != alignChars ||
671                      getContext().toBits(sizeChars) > MaxInlineWidthInBits);
672 
673   llvm::Value *IsWeak = nullptr, *OrderFail = nullptr;
674 
675   Address Val1 = Address::invalid();
676   Address Val2 = Address::invalid();
677   Address Dest = Address::invalid();
678   Address Ptr(EmitScalarExpr(E->getPtr()), alignChars);
679 
680   if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
681     LValue lvalue = MakeAddrLValue(Ptr, AtomicTy);
682     EmitAtomicInit(E->getVal1(), lvalue);
683     return RValue::get(nullptr);
684   }
685 
686   llvm::Value *Order = EmitScalarExpr(E->getOrder());
687 
688   switch (E->getOp()) {
689   case AtomicExpr::AO__c11_atomic_init:
690     llvm_unreachable("Already handled above with EmitAtomicInit!");
691 
692   case AtomicExpr::AO__c11_atomic_load:
693   case AtomicExpr::AO__atomic_load_n:
694     break;
695 
696   case AtomicExpr::AO__atomic_load:
697     Dest = EmitPointerWithAlignment(E->getVal1());
698     break;
699 
700   case AtomicExpr::AO__atomic_store:
701     Val1 = EmitPointerWithAlignment(E->getVal1());
702     break;
703 
704   case AtomicExpr::AO__atomic_exchange:
705     Val1 = EmitPointerWithAlignment(E->getVal1());
706     Dest = EmitPointerWithAlignment(E->getVal2());
707     break;
708 
709   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
710   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
711   case AtomicExpr::AO__atomic_compare_exchange_n:
712   case AtomicExpr::AO__atomic_compare_exchange:
713     Val1 = EmitPointerWithAlignment(E->getVal1());
714     if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
715       Val2 = EmitPointerWithAlignment(E->getVal2());
716     else
717       Val2 = EmitValToTemp(*this, E->getVal2());
718     OrderFail = EmitScalarExpr(E->getOrderFail());
719     if (E->getNumSubExprs() == 6)
720       IsWeak = EmitScalarExpr(E->getWeak());
721     break;
722 
723   case AtomicExpr::AO__c11_atomic_fetch_add:
724   case AtomicExpr::AO__c11_atomic_fetch_sub:
725     if (MemTy->isPointerType()) {
726       // For pointer arithmetic, we're required to do a bit of math:
727       // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
728       // ... but only for the C11 builtins. The GNU builtins expect the
729       // user to multiply by sizeof(T).
730       QualType Val1Ty = E->getVal1()->getType();
731       llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
732       CharUnits PointeeIncAmt =
733           getContext().getTypeSizeInChars(MemTy->getPointeeType());
734       Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
735       auto Temp = CreateMemTemp(Val1Ty, ".atomictmp");
736       Val1 = Temp;
737       EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Temp, Val1Ty));
738       break;
739     }
740     // Fall through.
741   case AtomicExpr::AO__atomic_fetch_add:
742   case AtomicExpr::AO__atomic_fetch_sub:
743   case AtomicExpr::AO__atomic_add_fetch:
744   case AtomicExpr::AO__atomic_sub_fetch:
745   case AtomicExpr::AO__c11_atomic_store:
746   case AtomicExpr::AO__c11_atomic_exchange:
747   case AtomicExpr::AO__atomic_store_n:
748   case AtomicExpr::AO__atomic_exchange_n:
749   case AtomicExpr::AO__c11_atomic_fetch_and:
750   case AtomicExpr::AO__c11_atomic_fetch_or:
751   case AtomicExpr::AO__c11_atomic_fetch_xor:
752   case AtomicExpr::AO__atomic_fetch_and:
753   case AtomicExpr::AO__atomic_fetch_or:
754   case AtomicExpr::AO__atomic_fetch_xor:
755   case AtomicExpr::AO__atomic_fetch_nand:
756   case AtomicExpr::AO__atomic_and_fetch:
757   case AtomicExpr::AO__atomic_or_fetch:
758   case AtomicExpr::AO__atomic_xor_fetch:
759   case AtomicExpr::AO__atomic_nand_fetch:
760     Val1 = EmitValToTemp(*this, E->getVal1());
761     break;
762   }
763 
764   QualType RValTy = E->getType().getUnqualifiedType();
765 
766   // The inlined atomics only function on iN types, where N is a power of 2. We
767   // need to make sure (via temporaries if necessary) that all incoming values
768   // are compatible.
769   LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy);
770   AtomicInfo Atomics(*this, AtomicVal);
771 
772   Ptr = Atomics.emitCastToAtomicIntPointer(Ptr);
773   if (Val1.isValid()) Val1 = Atomics.convertToAtomicIntPointer(Val1);
774   if (Val2.isValid()) Val2 = Atomics.convertToAtomicIntPointer(Val2);
775   if (Dest.isValid())
776     Dest = Atomics.emitCastToAtomicIntPointer(Dest);
777   else if (E->isCmpXChg())
778     Dest = CreateMemTemp(RValTy, "cmpxchg.bool");
779   else if (!RValTy->isVoidType())
780     Dest = Atomics.emitCastToAtomicIntPointer(Atomics.CreateTempAlloca());
781 
782   // Use a library call.  See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
783   if (UseLibcall) {
784     bool UseOptimizedLibcall = false;
785     switch (E->getOp()) {
786     case AtomicExpr::AO__c11_atomic_init:
787       llvm_unreachable("Already handled above with EmitAtomicInit!");
788 
789     case AtomicExpr::AO__c11_atomic_fetch_add:
790     case AtomicExpr::AO__atomic_fetch_add:
791     case AtomicExpr::AO__c11_atomic_fetch_and:
792     case AtomicExpr::AO__atomic_fetch_and:
793     case AtomicExpr::AO__c11_atomic_fetch_or:
794     case AtomicExpr::AO__atomic_fetch_or:
795     case AtomicExpr::AO__atomic_fetch_nand:
796     case AtomicExpr::AO__c11_atomic_fetch_sub:
797     case AtomicExpr::AO__atomic_fetch_sub:
798     case AtomicExpr::AO__c11_atomic_fetch_xor:
799     case AtomicExpr::AO__atomic_fetch_xor:
800     case AtomicExpr::AO__atomic_add_fetch:
801     case AtomicExpr::AO__atomic_and_fetch:
802     case AtomicExpr::AO__atomic_nand_fetch:
803     case AtomicExpr::AO__atomic_or_fetch:
804     case AtomicExpr::AO__atomic_sub_fetch:
805     case AtomicExpr::AO__atomic_xor_fetch:
806       // For these, only library calls for certain sizes exist.
807       UseOptimizedLibcall = true;
808       break;
809 
810     case AtomicExpr::AO__c11_atomic_load:
811     case AtomicExpr::AO__c11_atomic_store:
812     case AtomicExpr::AO__c11_atomic_exchange:
813     case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
814     case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
815     case AtomicExpr::AO__atomic_load_n:
816     case AtomicExpr::AO__atomic_load:
817     case AtomicExpr::AO__atomic_store_n:
818     case AtomicExpr::AO__atomic_store:
819     case AtomicExpr::AO__atomic_exchange_n:
820     case AtomicExpr::AO__atomic_exchange:
821     case AtomicExpr::AO__atomic_compare_exchange_n:
822     case AtomicExpr::AO__atomic_compare_exchange:
823       // Only use optimized library calls for sizes for which they exist.
824       if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
825         UseOptimizedLibcall = true;
826       break;
827     }
828 
829     CallArgList Args;
830     if (!UseOptimizedLibcall) {
831       // For non-optimized library calls, the size is the first parameter
832       Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
833                getContext().getSizeType());
834     }
835     // Atomic address is the first or second parameter
836     Args.add(RValue::get(EmitCastToVoidPtr(Ptr.getPointer())),
837              getContext().VoidPtrTy);
838 
839     std::string LibCallName;
840     QualType LoweredMemTy =
841       MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
842     QualType RetTy;
843     bool HaveRetTy = false;
844     llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
845     switch (E->getOp()) {
846     case AtomicExpr::AO__c11_atomic_init:
847       llvm_unreachable("Already handled!");
848 
849     // There is only one libcall for compare an exchange, because there is no
850     // optimisation benefit possible from a libcall version of a weak compare
851     // and exchange.
852     // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
853     //                                void *desired, int success, int failure)
854     // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
855     //                                  int success, int failure)
856     case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
857     case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
858     case AtomicExpr::AO__atomic_compare_exchange:
859     case AtomicExpr::AO__atomic_compare_exchange_n:
860       LibCallName = "__atomic_compare_exchange";
861       RetTy = getContext().BoolTy;
862       HaveRetTy = true;
863       Args.add(RValue::get(EmitCastToVoidPtr(Val1.getPointer())),
864                getContext().VoidPtrTy);
865       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2.getPointer(),
866                         MemTy, E->getExprLoc(), sizeChars);
867       Args.add(RValue::get(Order), getContext().IntTy);
868       Order = OrderFail;
869       break;
870     // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
871     //                        int order)
872     // T __atomic_exchange_N(T *mem, T val, int order)
873     case AtomicExpr::AO__c11_atomic_exchange:
874     case AtomicExpr::AO__atomic_exchange_n:
875     case AtomicExpr::AO__atomic_exchange:
876       LibCallName = "__atomic_exchange";
877       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
878                         MemTy, E->getExprLoc(), sizeChars);
879       break;
880     // void __atomic_store(size_t size, void *mem, void *val, int order)
881     // void __atomic_store_N(T *mem, T val, int order)
882     case AtomicExpr::AO__c11_atomic_store:
883     case AtomicExpr::AO__atomic_store:
884     case AtomicExpr::AO__atomic_store_n:
885       LibCallName = "__atomic_store";
886       RetTy = getContext().VoidTy;
887       HaveRetTy = true;
888       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
889                         MemTy, E->getExprLoc(), sizeChars);
890       break;
891     // void __atomic_load(size_t size, void *mem, void *return, int order)
892     // T __atomic_load_N(T *mem, int order)
893     case AtomicExpr::AO__c11_atomic_load:
894     case AtomicExpr::AO__atomic_load:
895     case AtomicExpr::AO__atomic_load_n:
896       LibCallName = "__atomic_load";
897       break;
898     // T __atomic_add_fetch_N(T *mem, T val, int order)
899     // T __atomic_fetch_add_N(T *mem, T val, int order)
900     case AtomicExpr::AO__atomic_add_fetch:
901       PostOp = llvm::Instruction::Add;
902     // Fall through.
903     case AtomicExpr::AO__c11_atomic_fetch_add:
904     case AtomicExpr::AO__atomic_fetch_add:
905       LibCallName = "__atomic_fetch_add";
906       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
907                         LoweredMemTy, E->getExprLoc(), sizeChars);
908       break;
909     // T __atomic_and_fetch_N(T *mem, T val, int order)
910     // T __atomic_fetch_and_N(T *mem, T val, int order)
911     case AtomicExpr::AO__atomic_and_fetch:
912       PostOp = llvm::Instruction::And;
913     // Fall through.
914     case AtomicExpr::AO__c11_atomic_fetch_and:
915     case AtomicExpr::AO__atomic_fetch_and:
916       LibCallName = "__atomic_fetch_and";
917       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
918                         MemTy, E->getExprLoc(), sizeChars);
919       break;
920     // T __atomic_or_fetch_N(T *mem, T val, int order)
921     // T __atomic_fetch_or_N(T *mem, T val, int order)
922     case AtomicExpr::AO__atomic_or_fetch:
923       PostOp = llvm::Instruction::Or;
924     // Fall through.
925     case AtomicExpr::AO__c11_atomic_fetch_or:
926     case AtomicExpr::AO__atomic_fetch_or:
927       LibCallName = "__atomic_fetch_or";
928       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
929                         MemTy, E->getExprLoc(), sizeChars);
930       break;
931     // T __atomic_sub_fetch_N(T *mem, T val, int order)
932     // T __atomic_fetch_sub_N(T *mem, T val, int order)
933     case AtomicExpr::AO__atomic_sub_fetch:
934       PostOp = llvm::Instruction::Sub;
935     // Fall through.
936     case AtomicExpr::AO__c11_atomic_fetch_sub:
937     case AtomicExpr::AO__atomic_fetch_sub:
938       LibCallName = "__atomic_fetch_sub";
939       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
940                         LoweredMemTy, E->getExprLoc(), sizeChars);
941       break;
942     // T __atomic_xor_fetch_N(T *mem, T val, int order)
943     // T __atomic_fetch_xor_N(T *mem, T val, int order)
944     case AtomicExpr::AO__atomic_xor_fetch:
945       PostOp = llvm::Instruction::Xor;
946     // Fall through.
947     case AtomicExpr::AO__c11_atomic_fetch_xor:
948     case AtomicExpr::AO__atomic_fetch_xor:
949       LibCallName = "__atomic_fetch_xor";
950       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
951                         MemTy, E->getExprLoc(), sizeChars);
952       break;
953     // T __atomic_nand_fetch_N(T *mem, T val, int order)
954     // T __atomic_fetch_nand_N(T *mem, T val, int order)
955     case AtomicExpr::AO__atomic_nand_fetch:
956       PostOp = llvm::Instruction::And; // the NOT is special cased below
957     // Fall through.
958     case AtomicExpr::AO__atomic_fetch_nand:
959       LibCallName = "__atomic_fetch_nand";
960       AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
961                         MemTy, E->getExprLoc(), sizeChars);
962       break;
963     }
964 
965     // Optimized functions have the size in their name.
966     if (UseOptimizedLibcall)
967       LibCallName += "_" + llvm::utostr(Size);
968     // By default, assume we return a value of the atomic type.
969     if (!HaveRetTy) {
970       if (UseOptimizedLibcall) {
971         // Value is returned directly.
972         // The function returns an appropriately sized integer type.
973         RetTy = getContext().getIntTypeForBitwidth(
974             getContext().toBits(sizeChars), /*Signed=*/false);
975       } else {
976         // Value is returned through parameter before the order.
977         RetTy = getContext().VoidTy;
978         Args.add(RValue::get(EmitCastToVoidPtr(Dest.getPointer())),
979                  getContext().VoidPtrTy);
980       }
981     }
982     // order is always the last parameter
983     Args.add(RValue::get(Order),
984              getContext().IntTy);
985 
986     // PostOp is only needed for the atomic_*_fetch operations, and
987     // thus is only needed for and implemented in the
988     // UseOptimizedLibcall codepath.
989     assert(UseOptimizedLibcall || !PostOp);
990 
991     RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
992     // The value is returned directly from the libcall.
993     if (E->isCmpXChg())
994       return Res;
995 
996     // The value is returned directly for optimized libcalls but the expr
997     // provided an out-param.
998     if (UseOptimizedLibcall && Res.getScalarVal()) {
999       llvm::Value *ResVal = Res.getScalarVal();
1000       if (PostOp) {
1001         llvm::Value *LoadVal1 = Args[1].RV.getScalarVal();
1002         ResVal = Builder.CreateBinOp(PostOp, ResVal, LoadVal1);
1003       }
1004       if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
1005         ResVal = Builder.CreateNot(ResVal);
1006 
1007       Builder.CreateStore(
1008           ResVal,
1009           Builder.CreateBitCast(Dest, ResVal->getType()->getPointerTo()));
1010     }
1011 
1012     if (RValTy->isVoidType())
1013       return RValue::get(nullptr);
1014 
1015     return convertTempToRValue(
1016         Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo()),
1017         RValTy, E->getExprLoc());
1018   }
1019 
1020   bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
1021                  E->getOp() == AtomicExpr::AO__atomic_store ||
1022                  E->getOp() == AtomicExpr::AO__atomic_store_n;
1023   bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
1024                 E->getOp() == AtomicExpr::AO__atomic_load ||
1025                 E->getOp() == AtomicExpr::AO__atomic_load_n;
1026 
1027   if (isa<llvm::ConstantInt>(Order)) {
1028     auto ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1029     // We should not ever get to a case where the ordering isn't a valid C ABI
1030     // value, but it's hard to enforce that in general.
1031     if (llvm::isValidAtomicOrderingCABI(ord))
1032       switch ((llvm::AtomicOrderingCABI)ord) {
1033       case llvm::AtomicOrderingCABI::relaxed:
1034         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1035                      llvm::AtomicOrdering::Monotonic);
1036         break;
1037       case llvm::AtomicOrderingCABI::consume:
1038       case llvm::AtomicOrderingCABI::acquire:
1039         if (IsStore)
1040           break; // Avoid crashing on code with undefined behavior
1041         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1042                      llvm::AtomicOrdering::Acquire);
1043         break;
1044       case llvm::AtomicOrderingCABI::release:
1045         if (IsLoad)
1046           break; // Avoid crashing on code with undefined behavior
1047         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1048                      llvm::AtomicOrdering::Release);
1049         break;
1050       case llvm::AtomicOrderingCABI::acq_rel:
1051         if (IsLoad || IsStore)
1052           break; // Avoid crashing on code with undefined behavior
1053         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1054                      llvm::AtomicOrdering::AcquireRelease);
1055         break;
1056       case llvm::AtomicOrderingCABI::seq_cst:
1057         EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1058                      llvm::AtomicOrdering::SequentiallyConsistent);
1059         break;
1060       }
1061     if (RValTy->isVoidType())
1062       return RValue::get(nullptr);
1063 
1064     return convertTempToRValue(
1065         Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo()),
1066         RValTy, E->getExprLoc());
1067   }
1068 
1069   // Long case, when Order isn't obviously constant.
1070 
1071   // Create all the relevant BB's
1072   llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
1073                    *ReleaseBB = nullptr, *AcqRelBB = nullptr,
1074                    *SeqCstBB = nullptr;
1075   MonotonicBB = createBasicBlock("monotonic", CurFn);
1076   if (!IsStore)
1077     AcquireBB = createBasicBlock("acquire", CurFn);
1078   if (!IsLoad)
1079     ReleaseBB = createBasicBlock("release", CurFn);
1080   if (!IsLoad && !IsStore)
1081     AcqRelBB = createBasicBlock("acqrel", CurFn);
1082   SeqCstBB = createBasicBlock("seqcst", CurFn);
1083   llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
1084 
1085   // Create the switch for the split
1086   // MonotonicBB is arbitrarily chosen as the default case; in practice, this
1087   // doesn't matter unless someone is crazy enough to use something that
1088   // doesn't fold to a constant for the ordering.
1089   Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
1090   llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
1091 
1092   // Emit all the different atomics
1093   Builder.SetInsertPoint(MonotonicBB);
1094   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
1095                Size, llvm::AtomicOrdering::Monotonic);
1096   Builder.CreateBr(ContBB);
1097   if (!IsStore) {
1098     Builder.SetInsertPoint(AcquireBB);
1099     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
1100                  Size, llvm::AtomicOrdering::Acquire);
1101     Builder.CreateBr(ContBB);
1102     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
1103                 AcquireBB);
1104     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
1105                 AcquireBB);
1106   }
1107   if (!IsLoad) {
1108     Builder.SetInsertPoint(ReleaseBB);
1109     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
1110                  Size, llvm::AtomicOrdering::Release);
1111     Builder.CreateBr(ContBB);
1112     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::release),
1113                 ReleaseBB);
1114   }
1115   if (!IsLoad && !IsStore) {
1116     Builder.SetInsertPoint(AcqRelBB);
1117     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
1118                  Size, llvm::AtomicOrdering::AcquireRelease);
1119     Builder.CreateBr(ContBB);
1120     SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acq_rel),
1121                 AcqRelBB);
1122   }
1123   Builder.SetInsertPoint(SeqCstBB);
1124   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
1125                Size, llvm::AtomicOrdering::SequentiallyConsistent);
1126   Builder.CreateBr(ContBB);
1127   SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
1128               SeqCstBB);
1129 
1130   // Cleanup and return
1131   Builder.SetInsertPoint(ContBB);
1132   if (RValTy->isVoidType())
1133     return RValue::get(nullptr);
1134 
1135   assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
1136   return convertTempToRValue(
1137       Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo()),
1138       RValTy, E->getExprLoc());
1139 }
1140 
emitCastToAtomicIntPointer(Address addr) const1141 Address AtomicInfo::emitCastToAtomicIntPointer(Address addr) const {
1142   unsigned addrspace =
1143     cast<llvm::PointerType>(addr.getPointer()->getType())->getAddressSpace();
1144   llvm::IntegerType *ty =
1145     llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
1146   return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
1147 }
1148 
convertToAtomicIntPointer(Address Addr) const1149 Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const {
1150   llvm::Type *Ty = Addr.getElementType();
1151   uint64_t SourceSizeInBits = CGF.CGM.getDataLayout().getTypeSizeInBits(Ty);
1152   if (SourceSizeInBits != AtomicSizeInBits) {
1153     Address Tmp = CreateTempAlloca();
1154     CGF.Builder.CreateMemCpy(Tmp, Addr,
1155                              std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
1156     Addr = Tmp;
1157   }
1158 
1159   return emitCastToAtomicIntPointer(Addr);
1160 }
1161 
convertAtomicTempToRValue(Address addr,AggValueSlot resultSlot,SourceLocation loc,bool asValue) const1162 RValue AtomicInfo::convertAtomicTempToRValue(Address addr,
1163                                              AggValueSlot resultSlot,
1164                                              SourceLocation loc,
1165                                              bool asValue) const {
1166   if (LVal.isSimple()) {
1167     if (EvaluationKind == TEK_Aggregate)
1168       return resultSlot.asRValue();
1169 
1170     // Drill into the padding structure if we have one.
1171     if (hasPadding())
1172       addr = CGF.Builder.CreateStructGEP(addr, 0, CharUnits());
1173 
1174     // Otherwise, just convert the temporary to an r-value using the
1175     // normal conversion routine.
1176     return CGF.convertTempToRValue(addr, getValueType(), loc);
1177   }
1178   if (!asValue)
1179     // Get RValue from temp memory as atomic for non-simple lvalues
1180     return RValue::get(CGF.Builder.CreateLoad(addr));
1181   if (LVal.isBitField())
1182     return CGF.EmitLoadOfBitfieldLValue(
1183         LValue::MakeBitfield(addr, LVal.getBitFieldInfo(), LVal.getType(),
1184                              LVal.getAlignmentSource()));
1185   if (LVal.isVectorElt())
1186     return CGF.EmitLoadOfLValue(
1187         LValue::MakeVectorElt(addr, LVal.getVectorIdx(), LVal.getType(),
1188                               LVal.getAlignmentSource()), loc);
1189   assert(LVal.isExtVectorElt());
1190   return CGF.EmitLoadOfExtVectorElementLValue(LValue::MakeExtVectorElt(
1191       addr, LVal.getExtVectorElts(), LVal.getType(),
1192       LVal.getAlignmentSource()));
1193 }
1194 
ConvertIntToValueOrAtomic(llvm::Value * IntVal,AggValueSlot ResultSlot,SourceLocation Loc,bool AsValue) const1195 RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
1196                                              AggValueSlot ResultSlot,
1197                                              SourceLocation Loc,
1198                                              bool AsValue) const {
1199   // Try not to in some easy cases.
1200   assert(IntVal->getType()->isIntegerTy() && "Expected integer value");
1201   if (getEvaluationKind() == TEK_Scalar &&
1202       (((!LVal.isBitField() ||
1203          LVal.getBitFieldInfo().Size == ValueSizeInBits) &&
1204         !hasPadding()) ||
1205        !AsValue)) {
1206     auto *ValTy = AsValue
1207                       ? CGF.ConvertTypeForMem(ValueTy)
1208                       : getAtomicAddress().getType()->getPointerElementType();
1209     if (ValTy->isIntegerTy()) {
1210       assert(IntVal->getType() == ValTy && "Different integer types.");
1211       return RValue::get(CGF.EmitFromMemory(IntVal, ValueTy));
1212     } else if (ValTy->isPointerTy())
1213       return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy));
1214     else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
1215       return RValue::get(CGF.Builder.CreateBitCast(IntVal, ValTy));
1216   }
1217 
1218   // Create a temporary.  This needs to be big enough to hold the
1219   // atomic integer.
1220   Address Temp = Address::invalid();
1221   bool TempIsVolatile = false;
1222   if (AsValue && getEvaluationKind() == TEK_Aggregate) {
1223     assert(!ResultSlot.isIgnored());
1224     Temp = ResultSlot.getAddress();
1225     TempIsVolatile = ResultSlot.isVolatile();
1226   } else {
1227     Temp = CreateTempAlloca();
1228   }
1229 
1230   // Slam the integer into the temporary.
1231   Address CastTemp = emitCastToAtomicIntPointer(Temp);
1232   CGF.Builder.CreateStore(IntVal, CastTemp)
1233       ->setVolatile(TempIsVolatile);
1234 
1235   return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);
1236 }
1237 
EmitAtomicLoadLibcall(llvm::Value * AddForLoaded,llvm::AtomicOrdering AO,bool)1238 void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
1239                                        llvm::AtomicOrdering AO, bool) {
1240   // void __atomic_load(size_t size, void *mem, void *return, int order);
1241   CallArgList Args;
1242   Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1243   Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
1244            CGF.getContext().VoidPtrTy);
1245   Args.add(RValue::get(CGF.EmitCastToVoidPtr(AddForLoaded)),
1246            CGF.getContext().VoidPtrTy);
1247   Args.add(
1248       RValue::get(llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(AO))),
1249       CGF.getContext().IntTy);
1250   emitAtomicLibcall(CGF, "__atomic_load", CGF.getContext().VoidTy, Args);
1251 }
1252 
EmitAtomicLoadOp(llvm::AtomicOrdering AO,bool IsVolatile)1253 llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1254                                           bool IsVolatile) {
1255   // Okay, we're doing this natively.
1256   Address Addr = getAtomicAddressAsAtomicIntPointer();
1257   llvm::LoadInst *Load = CGF.Builder.CreateLoad(Addr, "atomic-load");
1258   Load->setAtomic(AO);
1259 
1260   // Other decoration.
1261   if (IsVolatile)
1262     Load->setVolatile(true);
1263   if (LVal.getTBAAInfo())
1264     CGF.CGM.DecorateInstructionWithTBAA(Load, LVal.getTBAAInfo());
1265   return Load;
1266 }
1267 
1268 /// An LValue is a candidate for having its loads and stores be made atomic if
1269 /// we are operating under /volatile:ms *and* the LValue itself is volatile and
1270 /// performing such an operation can be performed without a libcall.
LValueIsSuitableForInlineAtomic(LValue LV)1271 bool CodeGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) {
1272   if (!CGM.getCodeGenOpts().MSVolatile) return false;
1273   AtomicInfo AI(*this, LV);
1274   bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType());
1275   // An atomic is inline if we don't need to use a libcall.
1276   bool AtomicIsInline = !AI.shouldUseLibcall();
1277   // MSVC doesn't seem to do this for types wider than a pointer.
1278   if (getContext().getTypeSize(LV.getType()) >
1279       getContext().getTypeSize(getContext().getIntPtrType()))
1280     return false;
1281   return IsVolatile && AtomicIsInline;
1282 }
1283 
EmitAtomicLoad(LValue LV,SourceLocation SL,AggValueSlot Slot)1284 RValue CodeGenFunction::EmitAtomicLoad(LValue LV, SourceLocation SL,
1285                                        AggValueSlot Slot) {
1286   llvm::AtomicOrdering AO;
1287   bool IsVolatile = LV.isVolatileQualified();
1288   if (LV.getType()->isAtomicType()) {
1289     AO = llvm::AtomicOrdering::SequentiallyConsistent;
1290   } else {
1291     AO = llvm::AtomicOrdering::Acquire;
1292     IsVolatile = true;
1293   }
1294   return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);
1295 }
1296 
EmitAtomicLoad(AggValueSlot ResultSlot,SourceLocation Loc,bool AsValue,llvm::AtomicOrdering AO,bool IsVolatile)1297 RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
1298                                   bool AsValue, llvm::AtomicOrdering AO,
1299                                   bool IsVolatile) {
1300   // Check whether we should use a library call.
1301   if (shouldUseLibcall()) {
1302     Address TempAddr = Address::invalid();
1303     if (LVal.isSimple() && !ResultSlot.isIgnored()) {
1304       assert(getEvaluationKind() == TEK_Aggregate);
1305       TempAddr = ResultSlot.getAddress();
1306     } else
1307       TempAddr = CreateTempAlloca();
1308 
1309     EmitAtomicLoadLibcall(TempAddr.getPointer(), AO, IsVolatile);
1310 
1311     // Okay, turn that back into the original value or whole atomic (for
1312     // non-simple lvalues) type.
1313     return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
1314   }
1315 
1316   // Okay, we're doing this natively.
1317   auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
1318 
1319   // If we're ignoring an aggregate return, don't do anything.
1320   if (getEvaluationKind() == TEK_Aggregate && ResultSlot.isIgnored())
1321     return RValue::getAggregate(Address::invalid(), false);
1322 
1323   // Okay, turn that back into the original value or atomic (for non-simple
1324   // lvalues) type.
1325   return ConvertIntToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
1326 }
1327 
1328 /// Emit a load from an l-value of atomic type.  Note that the r-value
1329 /// we produce is an r-value of the atomic *value* type.
EmitAtomicLoad(LValue src,SourceLocation loc,llvm::AtomicOrdering AO,bool IsVolatile,AggValueSlot resultSlot)1330 RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
1331                                        llvm::AtomicOrdering AO, bool IsVolatile,
1332                                        AggValueSlot resultSlot) {
1333   AtomicInfo Atomics(*this, src);
1334   return Atomics.EmitAtomicLoad(resultSlot, loc, /*AsValue=*/true, AO,
1335                                 IsVolatile);
1336 }
1337 
1338 /// Copy an r-value into memory as part of storing to an atomic type.
1339 /// This needs to create a bit-pattern suitable for atomic operations.
emitCopyIntoMemory(RValue rvalue) const1340 void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
1341   assert(LVal.isSimple());
1342   // If we have an r-value, the rvalue should be of the atomic type,
1343   // which means that the caller is responsible for having zeroed
1344   // any padding.  Just do an aggregate copy of that type.
1345   if (rvalue.isAggregate()) {
1346     CGF.EmitAggregateCopy(getAtomicAddress(),
1347                           rvalue.getAggregateAddress(),
1348                           getAtomicType(),
1349                           (rvalue.isVolatileQualified()
1350                            || LVal.isVolatileQualified()));
1351     return;
1352   }
1353 
1354   // Okay, otherwise we're copying stuff.
1355 
1356   // Zero out the buffer if necessary.
1357   emitMemSetZeroIfNecessary();
1358 
1359   // Drill past the padding if present.
1360   LValue TempLVal = projectValue();
1361 
1362   // Okay, store the rvalue in.
1363   if (rvalue.isScalar()) {
1364     CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true);
1365   } else {
1366     CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal, /*init*/ true);
1367   }
1368 }
1369 
1370 
1371 /// Materialize an r-value into memory for the purposes of storing it
1372 /// to an atomic type.
materializeRValue(RValue rvalue) const1373 Address AtomicInfo::materializeRValue(RValue rvalue) const {
1374   // Aggregate r-values are already in memory, and EmitAtomicStore
1375   // requires them to be values of the atomic type.
1376   if (rvalue.isAggregate())
1377     return rvalue.getAggregateAddress();
1378 
1379   // Otherwise, make a temporary and materialize into it.
1380   LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType());
1381   AtomicInfo Atomics(CGF, TempLV);
1382   Atomics.emitCopyIntoMemory(rvalue);
1383   return TempLV.getAddress();
1384 }
1385 
convertRValueToInt(RValue RVal) const1386 llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
1387   // If we've got a scalar value of the right size, try to avoid going
1388   // through memory.
1389   if (RVal.isScalar() && (!hasPadding() || !LVal.isSimple())) {
1390     llvm::Value *Value = RVal.getScalarVal();
1391     if (isa<llvm::IntegerType>(Value->getType()))
1392       return CGF.EmitToMemory(Value, ValueTy);
1393     else {
1394       llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1395           CGF.getLLVMContext(),
1396           LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
1397       if (isa<llvm::PointerType>(Value->getType()))
1398         return CGF.Builder.CreatePtrToInt(Value, InputIntTy);
1399       else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
1400         return CGF.Builder.CreateBitCast(Value, InputIntTy);
1401     }
1402   }
1403   // Otherwise, we need to go through memory.
1404   // Put the r-value in memory.
1405   Address Addr = materializeRValue(RVal);
1406 
1407   // Cast the temporary to the atomic int type and pull a value out.
1408   Addr = emitCastToAtomicIntPointer(Addr);
1409   return CGF.Builder.CreateLoad(Addr);
1410 }
1411 
EmitAtomicCompareExchangeOp(llvm::Value * ExpectedVal,llvm::Value * DesiredVal,llvm::AtomicOrdering Success,llvm::AtomicOrdering Failure,bool IsWeak)1412 std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1413     llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
1414     llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) {
1415   // Do the atomic store.
1416   Address Addr = getAtomicAddressAsAtomicIntPointer();
1417   auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr.getPointer(),
1418                                                ExpectedVal, DesiredVal,
1419                                                Success, Failure);
1420   // Other decoration.
1421   Inst->setVolatile(LVal.isVolatileQualified());
1422   Inst->setWeak(IsWeak);
1423 
1424   // Okay, turn that back into the original value type.
1425   auto *PreviousVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/0);
1426   auto *SuccessFailureVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/1);
1427   return std::make_pair(PreviousVal, SuccessFailureVal);
1428 }
1429 
1430 llvm::Value *
EmitAtomicCompareExchangeLibcall(llvm::Value * ExpectedAddr,llvm::Value * DesiredAddr,llvm::AtomicOrdering Success,llvm::AtomicOrdering Failure)1431 AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
1432                                              llvm::Value *DesiredAddr,
1433                                              llvm::AtomicOrdering Success,
1434                                              llvm::AtomicOrdering Failure) {
1435   // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
1436   // void *desired, int success, int failure);
1437   CallArgList Args;
1438   Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1439   Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
1440            CGF.getContext().VoidPtrTy);
1441   Args.add(RValue::get(CGF.EmitCastToVoidPtr(ExpectedAddr)),
1442            CGF.getContext().VoidPtrTy);
1443   Args.add(RValue::get(CGF.EmitCastToVoidPtr(DesiredAddr)),
1444            CGF.getContext().VoidPtrTy);
1445   Args.add(RValue::get(
1446                llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Success))),
1447            CGF.getContext().IntTy);
1448   Args.add(RValue::get(
1449                llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Failure))),
1450            CGF.getContext().IntTy);
1451   auto SuccessFailureRVal = emitAtomicLibcall(CGF, "__atomic_compare_exchange",
1452                                               CGF.getContext().BoolTy, Args);
1453 
1454   return SuccessFailureRVal.getScalarVal();
1455 }
1456 
EmitAtomicCompareExchange(RValue Expected,RValue Desired,llvm::AtomicOrdering Success,llvm::AtomicOrdering Failure,bool IsWeak)1457 std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
1458     RValue Expected, RValue Desired, llvm::AtomicOrdering Success,
1459     llvm::AtomicOrdering Failure, bool IsWeak) {
1460   if (isStrongerThan(Failure, Success))
1461     // Don't assert on undefined behavior "failure argument shall be no stronger
1462     // than the success argument".
1463     Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);
1464 
1465   // Check whether we should use a library call.
1466   if (shouldUseLibcall()) {
1467     // Produce a source address.
1468     Address ExpectedAddr = materializeRValue(Expected);
1469     Address DesiredAddr = materializeRValue(Desired);
1470     auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1471                                                  DesiredAddr.getPointer(),
1472                                                  Success, Failure);
1473     return std::make_pair(
1474         convertAtomicTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
1475                                   SourceLocation(), /*AsValue=*/false),
1476         Res);
1477   }
1478 
1479   // If we've got a scalar value of the right size, try to avoid going
1480   // through memory.
1481   auto *ExpectedVal = convertRValueToInt(Expected);
1482   auto *DesiredVal = convertRValueToInt(Desired);
1483   auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
1484                                          Failure, IsWeak);
1485   return std::make_pair(
1486       ConvertIntToValueOrAtomic(Res.first, AggValueSlot::ignored(),
1487                                 SourceLocation(), /*AsValue=*/false),
1488       Res.second);
1489 }
1490 
1491 static void
EmitAtomicUpdateValue(CodeGenFunction & CGF,AtomicInfo & Atomics,RValue OldRVal,const llvm::function_ref<RValue (RValue)> & UpdateOp,Address DesiredAddr)1492 EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal,
1493                       const llvm::function_ref<RValue(RValue)> &UpdateOp,
1494                       Address DesiredAddr) {
1495   RValue UpRVal;
1496   LValue AtomicLVal = Atomics.getAtomicLValue();
1497   LValue DesiredLVal;
1498   if (AtomicLVal.isSimple()) {
1499     UpRVal = OldRVal;
1500     DesiredLVal = CGF.MakeAddrLValue(DesiredAddr, AtomicLVal.getType());
1501   } else {
1502     // Build new lvalue for temp address
1503     Address Ptr = Atomics.materializeRValue(OldRVal);
1504     LValue UpdateLVal;
1505     if (AtomicLVal.isBitField()) {
1506       UpdateLVal =
1507           LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(),
1508                                AtomicLVal.getType(),
1509                                AtomicLVal.getAlignmentSource());
1510       DesiredLVal =
1511           LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1512                                AtomicLVal.getType(),
1513                                AtomicLVal.getAlignmentSource());
1514     } else if (AtomicLVal.isVectorElt()) {
1515       UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(),
1516                                          AtomicLVal.getType(),
1517                                          AtomicLVal.getAlignmentSource());
1518       DesiredLVal = LValue::MakeVectorElt(
1519           DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(),
1520           AtomicLVal.getAlignmentSource());
1521     } else {
1522       assert(AtomicLVal.isExtVectorElt());
1523       UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(),
1524                                             AtomicLVal.getType(),
1525                                             AtomicLVal.getAlignmentSource());
1526       DesiredLVal = LValue::MakeExtVectorElt(
1527           DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1528           AtomicLVal.getAlignmentSource());
1529     }
1530     UpdateLVal.setTBAAInfo(AtomicLVal.getTBAAInfo());
1531     DesiredLVal.setTBAAInfo(AtomicLVal.getTBAAInfo());
1532     UpRVal = CGF.EmitLoadOfLValue(UpdateLVal, SourceLocation());
1533   }
1534   // Store new value in the corresponding memory area
1535   RValue NewRVal = UpdateOp(UpRVal);
1536   if (NewRVal.isScalar()) {
1537     CGF.EmitStoreThroughLValue(NewRVal, DesiredLVal);
1538   } else {
1539     assert(NewRVal.isComplex());
1540     CGF.EmitStoreOfComplex(NewRVal.getComplexVal(), DesiredLVal,
1541                            /*isInit=*/false);
1542   }
1543 }
1544 
EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,const llvm::function_ref<RValue (RValue)> & UpdateOp,bool IsVolatile)1545 void AtomicInfo::EmitAtomicUpdateLibcall(
1546     llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1547     bool IsVolatile) {
1548   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1549 
1550   Address ExpectedAddr = CreateTempAlloca();
1551 
1552   EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
1553   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1554   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1555   CGF.EmitBlock(ContBB);
1556   Address DesiredAddr = CreateTempAlloca();
1557   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1558       requiresMemSetZero(getAtomicAddress().getElementType())) {
1559     auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1560     CGF.Builder.CreateStore(OldVal, DesiredAddr);
1561   }
1562   auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
1563                                            AggValueSlot::ignored(),
1564                                            SourceLocation(), /*AsValue=*/false);
1565   EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr);
1566   auto *Res =
1567       EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1568                                        DesiredAddr.getPointer(),
1569                                        AO, Failure);
1570   CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1571   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1572 }
1573 
EmitAtomicUpdateOp(llvm::AtomicOrdering AO,const llvm::function_ref<RValue (RValue)> & UpdateOp,bool IsVolatile)1574 void AtomicInfo::EmitAtomicUpdateOp(
1575     llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1576     bool IsVolatile) {
1577   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1578 
1579   // Do the atomic load.
1580   auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
1581   // For non-simple lvalues perform compare-and-swap procedure.
1582   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1583   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1584   auto *CurBB = CGF.Builder.GetInsertBlock();
1585   CGF.EmitBlock(ContBB);
1586   llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1587                                              /*NumReservedValues=*/2);
1588   PHI->addIncoming(OldVal, CurBB);
1589   Address NewAtomicAddr = CreateTempAlloca();
1590   Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1591   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1592       requiresMemSetZero(getAtomicAddress().getElementType())) {
1593     CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1594   }
1595   auto OldRVal = ConvertIntToValueOrAtomic(PHI, AggValueSlot::ignored(),
1596                                            SourceLocation(), /*AsValue=*/false);
1597   EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, NewAtomicAddr);
1598   auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1599   // Try to write new value using cmpxchg operation
1600   auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1601   PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1602   CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1603   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1604 }
1605 
EmitAtomicUpdateValue(CodeGenFunction & CGF,AtomicInfo & Atomics,RValue UpdateRVal,Address DesiredAddr)1606 static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics,
1607                                   RValue UpdateRVal, Address DesiredAddr) {
1608   LValue AtomicLVal = Atomics.getAtomicLValue();
1609   LValue DesiredLVal;
1610   // Build new lvalue for temp address
1611   if (AtomicLVal.isBitField()) {
1612     DesiredLVal =
1613         LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1614                              AtomicLVal.getType(),
1615                              AtomicLVal.getAlignmentSource());
1616   } else if (AtomicLVal.isVectorElt()) {
1617     DesiredLVal =
1618         LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(),
1619                               AtomicLVal.getType(),
1620                               AtomicLVal.getAlignmentSource());
1621   } else {
1622     assert(AtomicLVal.isExtVectorElt());
1623     DesiredLVal = LValue::MakeExtVectorElt(
1624         DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1625         AtomicLVal.getAlignmentSource());
1626   }
1627   DesiredLVal.setTBAAInfo(AtomicLVal.getTBAAInfo());
1628   // Store new value in the corresponding memory area
1629   assert(UpdateRVal.isScalar());
1630   CGF.EmitStoreThroughLValue(UpdateRVal, DesiredLVal);
1631 }
1632 
EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,RValue UpdateRVal,bool IsVolatile)1633 void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
1634                                          RValue UpdateRVal, bool IsVolatile) {
1635   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1636 
1637   Address ExpectedAddr = CreateTempAlloca();
1638 
1639   EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
1640   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1641   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1642   CGF.EmitBlock(ContBB);
1643   Address DesiredAddr = CreateTempAlloca();
1644   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1645       requiresMemSetZero(getAtomicAddress().getElementType())) {
1646     auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1647     CGF.Builder.CreateStore(OldVal, DesiredAddr);
1648   }
1649   EmitAtomicUpdateValue(CGF, *this, UpdateRVal, DesiredAddr);
1650   auto *Res =
1651       EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1652                                        DesiredAddr.getPointer(),
1653                                        AO, Failure);
1654   CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1655   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1656 }
1657 
EmitAtomicUpdateOp(llvm::AtomicOrdering AO,RValue UpdateRVal,bool IsVolatile)1658 void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
1659                                     bool IsVolatile) {
1660   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1661 
1662   // Do the atomic load.
1663   auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
1664   // For non-simple lvalues perform compare-and-swap procedure.
1665   auto *ContBB = CGF.createBasicBlock("atomic_cont");
1666   auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1667   auto *CurBB = CGF.Builder.GetInsertBlock();
1668   CGF.EmitBlock(ContBB);
1669   llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1670                                              /*NumReservedValues=*/2);
1671   PHI->addIncoming(OldVal, CurBB);
1672   Address NewAtomicAddr = CreateTempAlloca();
1673   Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1674   if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1675       requiresMemSetZero(getAtomicAddress().getElementType())) {
1676     CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1677   }
1678   EmitAtomicUpdateValue(CGF, *this, UpdateRVal, NewAtomicAddr);
1679   auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1680   // Try to write new value using cmpxchg operation
1681   auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1682   PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1683   CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1684   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1685 }
1686 
EmitAtomicUpdate(llvm::AtomicOrdering AO,const llvm::function_ref<RValue (RValue)> & UpdateOp,bool IsVolatile)1687 void AtomicInfo::EmitAtomicUpdate(
1688     llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1689     bool IsVolatile) {
1690   if (shouldUseLibcall()) {
1691     EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
1692   } else {
1693     EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
1694   }
1695 }
1696 
EmitAtomicUpdate(llvm::AtomicOrdering AO,RValue UpdateRVal,bool IsVolatile)1697 void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
1698                                   bool IsVolatile) {
1699   if (shouldUseLibcall()) {
1700     EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
1701   } else {
1702     EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
1703   }
1704 }
1705 
EmitAtomicStore(RValue rvalue,LValue lvalue,bool isInit)1706 void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue lvalue,
1707                                       bool isInit) {
1708   bool IsVolatile = lvalue.isVolatileQualified();
1709   llvm::AtomicOrdering AO;
1710   if (lvalue.getType()->isAtomicType()) {
1711     AO = llvm::AtomicOrdering::SequentiallyConsistent;
1712   } else {
1713     AO = llvm::AtomicOrdering::Release;
1714     IsVolatile = true;
1715   }
1716   return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);
1717 }
1718 
1719 /// Emit a store to an l-value of atomic type.
1720 ///
1721 /// Note that the r-value is expected to be an r-value *of the atomic
1722 /// type*; this means that for aggregate r-values, it should include
1723 /// storage for any padding that was necessary.
EmitAtomicStore(RValue rvalue,LValue dest,llvm::AtomicOrdering AO,bool IsVolatile,bool isInit)1724 void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
1725                                       llvm::AtomicOrdering AO, bool IsVolatile,
1726                                       bool isInit) {
1727   // If this is an aggregate r-value, it should agree in type except
1728   // maybe for address-space qualification.
1729   assert(!rvalue.isAggregate() ||
1730          rvalue.getAggregateAddress().getElementType()
1731            == dest.getAddress().getElementType());
1732 
1733   AtomicInfo atomics(*this, dest);
1734   LValue LVal = atomics.getAtomicLValue();
1735 
1736   // If this is an initialization, just put the value there normally.
1737   if (LVal.isSimple()) {
1738     if (isInit) {
1739       atomics.emitCopyIntoMemory(rvalue);
1740       return;
1741     }
1742 
1743     // Check whether we should use a library call.
1744     if (atomics.shouldUseLibcall()) {
1745       // Produce a source address.
1746       Address srcAddr = atomics.materializeRValue(rvalue);
1747 
1748       // void __atomic_store(size_t size, void *mem, void *val, int order)
1749       CallArgList args;
1750       args.add(RValue::get(atomics.getAtomicSizeValue()),
1751                getContext().getSizeType());
1752       args.add(RValue::get(EmitCastToVoidPtr(atomics.getAtomicPointer())),
1753                getContext().VoidPtrTy);
1754       args.add(RValue::get(EmitCastToVoidPtr(srcAddr.getPointer())),
1755                getContext().VoidPtrTy);
1756       args.add(
1757           RValue::get(llvm::ConstantInt::get(IntTy, (int)llvm::toCABI(AO))),
1758           getContext().IntTy);
1759       emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
1760       return;
1761     }
1762 
1763     // Okay, we're doing this natively.
1764     llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
1765 
1766     // Do the atomic store.
1767     Address addr =
1768         atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress());
1769     intValue = Builder.CreateIntCast(
1770         intValue, addr.getElementType(), /*isSigned=*/false);
1771     llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
1772 
1773     // Initializations don't need to be atomic.
1774     if (!isInit)
1775       store->setAtomic(AO);
1776 
1777     // Other decoration.
1778     if (IsVolatile)
1779       store->setVolatile(true);
1780     if (dest.getTBAAInfo())
1781       CGM.DecorateInstructionWithTBAA(store, dest.getTBAAInfo());
1782     return;
1783   }
1784 
1785   // Emit simple atomic update operation.
1786   atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
1787 }
1788 
1789 /// Emit a compare-and-exchange op for atomic type.
1790 ///
EmitAtomicCompareExchange(LValue Obj,RValue Expected,RValue Desired,SourceLocation Loc,llvm::AtomicOrdering Success,llvm::AtomicOrdering Failure,bool IsWeak,AggValueSlot Slot)1791 std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange(
1792     LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
1793     llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
1794     AggValueSlot Slot) {
1795   // If this is an aggregate r-value, it should agree in type except
1796   // maybe for address-space qualification.
1797   assert(!Expected.isAggregate() ||
1798          Expected.getAggregateAddress().getElementType() ==
1799              Obj.getAddress().getElementType());
1800   assert(!Desired.isAggregate() ||
1801          Desired.getAggregateAddress().getElementType() ==
1802              Obj.getAddress().getElementType());
1803   AtomicInfo Atomics(*this, Obj);
1804 
1805   return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
1806                                            IsWeak);
1807 }
1808 
EmitAtomicUpdate(LValue LVal,llvm::AtomicOrdering AO,const llvm::function_ref<RValue (RValue)> & UpdateOp,bool IsVolatile)1809 void CodeGenFunction::EmitAtomicUpdate(
1810     LValue LVal, llvm::AtomicOrdering AO,
1811     const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile) {
1812   AtomicInfo Atomics(*this, LVal);
1813   Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
1814 }
1815 
EmitAtomicInit(Expr * init,LValue dest)1816 void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
1817   AtomicInfo atomics(*this, dest);
1818 
1819   switch (atomics.getEvaluationKind()) {
1820   case TEK_Scalar: {
1821     llvm::Value *value = EmitScalarExpr(init);
1822     atomics.emitCopyIntoMemory(RValue::get(value));
1823     return;
1824   }
1825 
1826   case TEK_Complex: {
1827     ComplexPairTy value = EmitComplexExpr(init);
1828     atomics.emitCopyIntoMemory(RValue::getComplex(value));
1829     return;
1830   }
1831 
1832   case TEK_Aggregate: {
1833     // Fix up the destination if the initializer isn't an expression
1834     // of atomic type.
1835     bool Zeroed = false;
1836     if (!init->getType()->isAtomicType()) {
1837       Zeroed = atomics.emitMemSetZeroIfNecessary();
1838       dest = atomics.projectValue();
1839     }
1840 
1841     // Evaluate the expression directly into the destination.
1842     AggValueSlot slot = AggValueSlot::forLValue(dest,
1843                                         AggValueSlot::IsNotDestructed,
1844                                         AggValueSlot::DoesNotNeedGCBarriers,
1845                                         AggValueSlot::IsNotAliased,
1846                                         Zeroed ? AggValueSlot::IsZeroed :
1847                                                  AggValueSlot::IsNotZeroed);
1848 
1849     EmitAggExpr(init, slot);
1850     return;
1851   }
1852   }
1853   llvm_unreachable("bad evaluation kind");
1854 }
1855