1 // Copyright (c) 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 // This file is an internal atomic implementation, use atomicops.h instead.
6 //
7 // This implementation uses C++11 atomics' member functions. The code base is
8 // currently written assuming atomicity revolves around accesses instead of
9 // C++11's memory locations. The burden is on the programmer to ensure that all
10 // memory locations accessed atomically are never accessed non-atomically (tsan
11 // should help with this).
12 //
13 // TODO(jfb) Modify the atomicops.h API and user code to declare atomic
14 // locations as truly atomic. See the static_assert below.
15 //
16 // Of note in this implementation:
17 // * All NoBarrier variants are implemented as relaxed.
18 // * All Barrier variants are implemented as sequentially-consistent.
19 // * Compare exchange's failure ordering is always the same as the success one
20 // (except for release, which fails as relaxed): using a weaker ordering is
21 // only valid under certain uses of compare exchange.
22 // * Acquire store doesn't exist in the C11 memory model, it is instead
23 // implemented as a relaxed store followed by a sequentially consistent
24 // fence.
25 // * Release load doesn't exist in the C11 memory model, it is instead
26 // implemented as sequentially consistent fence followed by a relaxed load.
27 // * Atomic increment is expected to return the post-incremented value, whereas
28 // C11 fetch add returns the previous value. The implementation therefore
29 // needs to increment twice (which the compiler should be able to detect and
30 // optimize).
31
32 #ifndef BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
33 #define BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
34
35 #include <atomic>
36
37 #include "build/build_config.h"
38
39 namespace base {
40 namespace subtle {
41
42 // This implementation is transitional and maintains the original API for
43 // atomicops.h. This requires casting memory locations to the atomic types, and
44 // assumes that the API and the C++11 implementation are layout-compatible,
45 // which isn't true for all implementations or hardware platforms. The static
46 // assertion should detect this issue, were it to fire then this header
47 // shouldn't be used.
48 //
49 // TODO(jfb) If this header manages to stay committed then the API should be
50 // modified, and all call sites updated.
51 typedef volatile std::atomic<Atomic32>* AtomicLocation32;
52 static_assert(sizeof(*(AtomicLocation32) nullptr) == sizeof(Atomic32),
53 "incompatible 32-bit atomic layout");
54
MemoryBarrier()55 inline void MemoryBarrier() {
56 #if defined(__GLIBCXX__)
57 // Work around libstdc++ bug 51038 where atomic_thread_fence was declared but
58 // not defined, leading to the linker complaining about undefined references.
59 __atomic_thread_fence(std::memory_order_seq_cst);
60 #else
61 std::atomic_thread_fence(std::memory_order_seq_cst);
62 #endif
63 }
64
NoBarrier_CompareAndSwap(volatile Atomic32 * ptr,Atomic32 old_value,Atomic32 new_value)65 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
66 Atomic32 old_value,
67 Atomic32 new_value) {
68 ((AtomicLocation32)ptr)
69 ->compare_exchange_strong(old_value,
70 new_value,
71 std::memory_order_relaxed,
72 std::memory_order_relaxed);
73 return old_value;
74 }
75
NoBarrier_AtomicExchange(volatile Atomic32 * ptr,Atomic32 new_value)76 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
77 Atomic32 new_value) {
78 return ((AtomicLocation32)ptr)
79 ->exchange(new_value, std::memory_order_relaxed);
80 }
81
NoBarrier_AtomicIncrement(volatile Atomic32 * ptr,Atomic32 increment)82 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
83 Atomic32 increment) {
84 return increment +
85 ((AtomicLocation32)ptr)
86 ->fetch_add(increment, std::memory_order_relaxed);
87 }
88
Barrier_AtomicIncrement(volatile Atomic32 * ptr,Atomic32 increment)89 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
90 Atomic32 increment) {
91 return increment + ((AtomicLocation32)ptr)->fetch_add(increment);
92 }
93
Acquire_CompareAndSwap(volatile Atomic32 * ptr,Atomic32 old_value,Atomic32 new_value)94 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
95 Atomic32 old_value,
96 Atomic32 new_value) {
97 ((AtomicLocation32)ptr)
98 ->compare_exchange_strong(old_value,
99 new_value,
100 std::memory_order_acquire,
101 std::memory_order_acquire);
102 return old_value;
103 }
104
Release_CompareAndSwap(volatile Atomic32 * ptr,Atomic32 old_value,Atomic32 new_value)105 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
106 Atomic32 old_value,
107 Atomic32 new_value) {
108 ((AtomicLocation32)ptr)
109 ->compare_exchange_strong(old_value,
110 new_value,
111 std::memory_order_release,
112 std::memory_order_relaxed);
113 return old_value;
114 }
115
NoBarrier_Store(volatile Atomic32 * ptr,Atomic32 value)116 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
117 ((AtomicLocation32)ptr)->store(value, std::memory_order_relaxed);
118 }
119
Acquire_Store(volatile Atomic32 * ptr,Atomic32 value)120 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
121 ((AtomicLocation32)ptr)->store(value, std::memory_order_relaxed);
122 MemoryBarrier();
123 }
124
Release_Store(volatile Atomic32 * ptr,Atomic32 value)125 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
126 ((AtomicLocation32)ptr)->store(value, std::memory_order_release);
127 }
128
NoBarrier_Load(volatile const Atomic32 * ptr)129 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
130 return ((AtomicLocation32)ptr)->load(std::memory_order_relaxed);
131 }
132
Acquire_Load(volatile const Atomic32 * ptr)133 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
134 return ((AtomicLocation32)ptr)->load(std::memory_order_acquire);
135 }
136
Release_Load(volatile const Atomic32 * ptr)137 inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
138 MemoryBarrier();
139 return ((AtomicLocation32)ptr)->load(std::memory_order_relaxed);
140 }
141
142 #if defined(ARCH_CPU_64_BITS)
143
144 typedef volatile std::atomic<Atomic64>* AtomicLocation64;
145 static_assert(sizeof(*(AtomicLocation64) nullptr) == sizeof(Atomic64),
146 "incompatible 64-bit atomic layout");
147
NoBarrier_CompareAndSwap(volatile Atomic64 * ptr,Atomic64 old_value,Atomic64 new_value)148 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
149 Atomic64 old_value,
150 Atomic64 new_value) {
151 ((AtomicLocation64)ptr)
152 ->compare_exchange_strong(old_value,
153 new_value,
154 std::memory_order_relaxed,
155 std::memory_order_relaxed);
156 return old_value;
157 }
158
NoBarrier_AtomicExchange(volatile Atomic64 * ptr,Atomic64 new_value)159 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
160 Atomic64 new_value) {
161 return ((AtomicLocation64)ptr)
162 ->exchange(new_value, std::memory_order_relaxed);
163 }
164
NoBarrier_AtomicIncrement(volatile Atomic64 * ptr,Atomic64 increment)165 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
166 Atomic64 increment) {
167 return increment +
168 ((AtomicLocation64)ptr)
169 ->fetch_add(increment, std::memory_order_relaxed);
170 }
171
Barrier_AtomicIncrement(volatile Atomic64 * ptr,Atomic64 increment)172 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
173 Atomic64 increment) {
174 return increment + ((AtomicLocation64)ptr)->fetch_add(increment);
175 }
176
Acquire_CompareAndSwap(volatile Atomic64 * ptr,Atomic64 old_value,Atomic64 new_value)177 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
178 Atomic64 old_value,
179 Atomic64 new_value) {
180 ((AtomicLocation64)ptr)
181 ->compare_exchange_strong(old_value,
182 new_value,
183 std::memory_order_acquire,
184 std::memory_order_acquire);
185 return old_value;
186 }
187
Release_CompareAndSwap(volatile Atomic64 * ptr,Atomic64 old_value,Atomic64 new_value)188 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
189 Atomic64 old_value,
190 Atomic64 new_value) {
191 ((AtomicLocation64)ptr)
192 ->compare_exchange_strong(old_value,
193 new_value,
194 std::memory_order_release,
195 std::memory_order_relaxed);
196 return old_value;
197 }
198
NoBarrier_Store(volatile Atomic64 * ptr,Atomic64 value)199 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
200 ((AtomicLocation64)ptr)->store(value, std::memory_order_relaxed);
201 }
202
Acquire_Store(volatile Atomic64 * ptr,Atomic64 value)203 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
204 ((AtomicLocation64)ptr)->store(value, std::memory_order_relaxed);
205 MemoryBarrier();
206 }
207
Release_Store(volatile Atomic64 * ptr,Atomic64 value)208 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
209 ((AtomicLocation64)ptr)->store(value, std::memory_order_release);
210 }
211
NoBarrier_Load(volatile const Atomic64 * ptr)212 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
213 return ((AtomicLocation64)ptr)->load(std::memory_order_relaxed);
214 }
215
Acquire_Load(volatile const Atomic64 * ptr)216 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
217 return ((AtomicLocation64)ptr)->load(std::memory_order_acquire);
218 }
219
Release_Load(volatile const Atomic64 * ptr)220 inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
221 MemoryBarrier();
222 return ((AtomicLocation64)ptr)->load(std::memory_order_relaxed);
223 }
224
225 #endif // defined(ARCH_CPU_64_BITS)
226 } // namespace subtle
227 } // namespace base
228
229 #endif // BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
230