1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 // The routines exported by this module are subtle. If you use them, even if
6 // you get the code right, it will depend on careful reasoning about atomicity
7 // and memory ordering; it will be less readable, and harder to maintain. If
8 // you plan to use these routines, you should have a good reason, such as solid
9 // evidence that performance would otherwise suffer, or there being no
10 // alternative. You should assume only properties explicitly guaranteed by the
11 // specifications in this file. You are almost certainly _not_ writing code
12 // just for the x86; if you assume x86 semantics, x86 hardware bugs and
13 // implementations on other archtectures will cause your code to break. If you
14 // do not know what you are doing, avoid these routines, and use a Mutex.
15 //
16 // It is incorrect to make direct assignments to/from an atomic variable.
17 // You should use one of the Load or Store routines. The Relaxed versions
18 // are provided when no fences are needed:
19 // Relaxed_Store()
20 // Relaxed_Load()
21 // Although there are currently no compiler enforcement, you are encouraged
22 // to use these.
23 //
24
25 #ifndef V8_BASE_ATOMICOPS_H_
26 #define V8_BASE_ATOMICOPS_H_
27
28 #include <stdint.h>
29
30 #include <atomic>
31
32 // Small C++ header which defines implementation specific macros used to
33 // identify the STL implementation.
34 // - libc++: captures __config for _LIBCPP_VERSION
35 // - libstdc++: captures bits/c++config.h for __GLIBCXX__
36 #include <cstddef>
37
38 #include "src/base/base-export.h"
39 #include "src/base/build_config.h"
40 #include "src/base/macros.h"
41
42 #if defined(V8_OS_STARBOARD)
43 #include "starboard/atomic.h"
44 #endif // V8_OS_STARBOARD
45
46 namespace v8 {
47 namespace base {
48
49 #ifdef V8_OS_STARBOARD
50 using Atomic8 = SbAtomic8;
51 using Atomic16 = int16_t;
52 using Atomic32 = SbAtomic32;
53 #if SB_IS_64_BIT
54 using Atomic64 = SbAtomic64;
55 #endif
56 #else
57 using Atomic8 = char;
58 using Atomic16 = int16_t;
59 using Atomic32 = int32_t;
60 #if defined(V8_HOST_ARCH_64_BIT)
61 // We need to be able to go between Atomic64 and AtomicWord implicitly. This
62 // means Atomic64 and AtomicWord should be the same type on 64-bit.
63 #if defined(__ILP32__)
64 using Atomic64 = int64_t;
65 #else
66 using Atomic64 = intptr_t;
67 #endif // defined(__ILP32__)
68 #endif // defined(V8_HOST_ARCH_64_BIT)
69 #endif // V8_OS_STARBOARD
70
71 // Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or
72 // Atomic64 routines below, depending on your architecture.
73 #if defined(V8_OS_STARBOARD)
74 using AtomicWord = SbAtomicPtr;
75 #else
76 using AtomicWord = intptr_t;
77 #endif
78
79 namespace helper {
80 template <typename T>
to_std_atomic(volatile T * ptr)81 volatile std::atomic<T>* to_std_atomic(volatile T* ptr) {
82 return reinterpret_cast<volatile std::atomic<T>*>(ptr);
83 }
84 template <typename T>
to_std_atomic_const(volatile const T * ptr)85 volatile const std::atomic<T>* to_std_atomic_const(volatile const T* ptr) {
86 return reinterpret_cast<volatile const std::atomic<T>*>(ptr);
87 }
88 } // namespace helper
89
SeqCst_MemoryFence()90 inline void SeqCst_MemoryFence() {
91 std::atomic_thread_fence(std::memory_order_seq_cst);
92 }
93
94 // Atomically execute:
95 // result = *ptr;
96 // if (result == old_value)
97 // *ptr = new_value;
98 // return result;
99 //
100 // I.e. replace |*ptr| with |new_value| if |*ptr| used to be |old_value|.
101 // Always return the value of |*ptr| before the operation.
102 // Acquire, Relaxed, Release correspond to standard C++ memory orders.
Relaxed_CompareAndSwap(volatile Atomic8 * ptr,Atomic8 old_value,Atomic8 new_value)103 inline Atomic8 Relaxed_CompareAndSwap(volatile Atomic8* ptr, Atomic8 old_value,
104 Atomic8 new_value) {
105 std::atomic_compare_exchange_strong_explicit(
106 helper::to_std_atomic(ptr), &old_value, new_value,
107 std::memory_order_relaxed, std::memory_order_relaxed);
108 return old_value;
109 }
110
Relaxed_CompareAndSwap(volatile Atomic16 * ptr,Atomic16 old_value,Atomic16 new_value)111 inline Atomic16 Relaxed_CompareAndSwap(volatile Atomic16* ptr,
112 Atomic16 old_value, Atomic16 new_value) {
113 std::atomic_compare_exchange_strong_explicit(
114 helper::to_std_atomic(ptr), &old_value, new_value,
115 std::memory_order_relaxed, std::memory_order_relaxed);
116 return old_value;
117 }
118
Relaxed_CompareAndSwap(volatile Atomic32 * ptr,Atomic32 old_value,Atomic32 new_value)119 inline Atomic32 Relaxed_CompareAndSwap(volatile Atomic32* ptr,
120 Atomic32 old_value, Atomic32 new_value) {
121 std::atomic_compare_exchange_strong_explicit(
122 helper::to_std_atomic(ptr), &old_value, new_value,
123 std::memory_order_relaxed, std::memory_order_relaxed);
124 return old_value;
125 }
126
Relaxed_AtomicExchange(volatile Atomic32 * ptr,Atomic32 new_value)127 inline Atomic32 Relaxed_AtomicExchange(volatile Atomic32* ptr,
128 Atomic32 new_value) {
129 return std::atomic_exchange_explicit(helper::to_std_atomic(ptr), new_value,
130 std::memory_order_relaxed);
131 }
132
SeqCst_AtomicExchange(volatile Atomic32 * ptr,Atomic32 new_value)133 inline Atomic32 SeqCst_AtomicExchange(volatile Atomic32* ptr,
134 Atomic32 new_value) {
135 return std::atomic_exchange_explicit(helper::to_std_atomic(ptr), new_value,
136 std::memory_order_seq_cst);
137 }
138
Relaxed_AtomicIncrement(volatile Atomic32 * ptr,Atomic32 increment)139 inline Atomic32 Relaxed_AtomicIncrement(volatile Atomic32* ptr,
140 Atomic32 increment) {
141 return increment + std::atomic_fetch_add_explicit(helper::to_std_atomic(ptr),
142 increment,
143 std::memory_order_relaxed);
144 }
145
Acquire_CompareAndSwap(volatile Atomic32 * ptr,Atomic32 old_value,Atomic32 new_value)146 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
147 Atomic32 old_value, Atomic32 new_value) {
148 atomic_compare_exchange_strong_explicit(
149 helper::to_std_atomic(ptr), &old_value, new_value,
150 std::memory_order_acquire, std::memory_order_acquire);
151 return old_value;
152 }
153
Release_CompareAndSwap(volatile Atomic8 * ptr,Atomic8 old_value,Atomic8 new_value)154 inline Atomic8 Release_CompareAndSwap(volatile Atomic8* ptr, Atomic8 old_value,
155 Atomic8 new_value) {
156 bool result = atomic_compare_exchange_strong_explicit(
157 helper::to_std_atomic(ptr), &old_value, new_value,
158 std::memory_order_release, std::memory_order_relaxed);
159 USE(result); // Make gcc compiler happy.
160 return old_value;
161 }
162
Release_CompareAndSwap(volatile Atomic32 * ptr,Atomic32 old_value,Atomic32 new_value)163 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
164 Atomic32 old_value, Atomic32 new_value) {
165 atomic_compare_exchange_strong_explicit(
166 helper::to_std_atomic(ptr), &old_value, new_value,
167 std::memory_order_release, std::memory_order_relaxed);
168 return old_value;
169 }
170
AcquireRelease_CompareAndSwap(volatile Atomic32 * ptr,Atomic32 old_value,Atomic32 new_value)171 inline Atomic32 AcquireRelease_CompareAndSwap(volatile Atomic32* ptr,
172 Atomic32 old_value,
173 Atomic32 new_value) {
174 atomic_compare_exchange_strong_explicit(
175 helper::to_std_atomic(ptr), &old_value, new_value,
176 std::memory_order_acq_rel, std::memory_order_acquire);
177 return old_value;
178 }
179
Relaxed_Store(volatile Atomic8 * ptr,Atomic8 value)180 inline void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value) {
181 std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
182 std::memory_order_relaxed);
183 }
184
Relaxed_Store(volatile Atomic16 * ptr,Atomic16 value)185 inline void Relaxed_Store(volatile Atomic16* ptr, Atomic16 value) {
186 std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
187 std::memory_order_relaxed);
188 }
189
Relaxed_Store(volatile Atomic32 * ptr,Atomic32 value)190 inline void Relaxed_Store(volatile Atomic32* ptr, Atomic32 value) {
191 std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
192 std::memory_order_relaxed);
193 }
194
Release_Store(volatile Atomic8 * ptr,Atomic8 value)195 inline void Release_Store(volatile Atomic8* ptr, Atomic8 value) {
196 std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
197 std::memory_order_release);
198 }
199
Release_Store(volatile Atomic16 * ptr,Atomic16 value)200 inline void Release_Store(volatile Atomic16* ptr, Atomic16 value) {
201 std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
202 std::memory_order_release);
203 }
204
Release_Store(volatile Atomic32 * ptr,Atomic32 value)205 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
206 std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
207 std::memory_order_release);
208 }
209
SeqCst_Store(volatile Atomic8 * ptr,Atomic8 value)210 inline void SeqCst_Store(volatile Atomic8* ptr, Atomic8 value) {
211 std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
212 std::memory_order_seq_cst);
213 }
214
SeqCst_Store(volatile Atomic16 * ptr,Atomic16 value)215 inline void SeqCst_Store(volatile Atomic16* ptr, Atomic16 value) {
216 std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
217 std::memory_order_seq_cst);
218 }
219
SeqCst_Store(volatile Atomic32 * ptr,Atomic32 value)220 inline void SeqCst_Store(volatile Atomic32* ptr, Atomic32 value) {
221 std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
222 std::memory_order_seq_cst);
223 }
224
Relaxed_Load(volatile const Atomic8 * ptr)225 inline Atomic8 Relaxed_Load(volatile const Atomic8* ptr) {
226 return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
227 std::memory_order_relaxed);
228 }
229
Relaxed_Load(volatile const Atomic16 * ptr)230 inline Atomic16 Relaxed_Load(volatile const Atomic16* ptr) {
231 return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
232 std::memory_order_relaxed);
233 }
234
Relaxed_Load(volatile const Atomic32 * ptr)235 inline Atomic32 Relaxed_Load(volatile const Atomic32* ptr) {
236 return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
237 std::memory_order_relaxed);
238 }
239
Acquire_Load(volatile const Atomic8 * ptr)240 inline Atomic8 Acquire_Load(volatile const Atomic8* ptr) {
241 return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
242 std::memory_order_acquire);
243 }
244
Acquire_Load(volatile const Atomic32 * ptr)245 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
246 return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
247 std::memory_order_acquire);
248 }
249
SeqCst_Load(volatile const Atomic8 * ptr)250 inline Atomic8 SeqCst_Load(volatile const Atomic8* ptr) {
251 return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
252 std::memory_order_seq_cst);
253 }
254
SeqCst_Load(volatile const Atomic32 * ptr)255 inline Atomic32 SeqCst_Load(volatile const Atomic32* ptr) {
256 return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
257 std::memory_order_seq_cst);
258 }
259
260 #if defined(V8_HOST_ARCH_64_BIT)
261
Relaxed_CompareAndSwap(volatile Atomic64 * ptr,Atomic64 old_value,Atomic64 new_value)262 inline Atomic64 Relaxed_CompareAndSwap(volatile Atomic64* ptr,
263 Atomic64 old_value, Atomic64 new_value) {
264 std::atomic_compare_exchange_strong_explicit(
265 helper::to_std_atomic(ptr), &old_value, new_value,
266 std::memory_order_relaxed, std::memory_order_relaxed);
267 return old_value;
268 }
269
Relaxed_AtomicExchange(volatile Atomic64 * ptr,Atomic64 new_value)270 inline Atomic64 Relaxed_AtomicExchange(volatile Atomic64* ptr,
271 Atomic64 new_value) {
272 return std::atomic_exchange_explicit(helper::to_std_atomic(ptr), new_value,
273 std::memory_order_relaxed);
274 }
275
SeqCst_AtomicExchange(volatile Atomic64 * ptr,Atomic64 new_value)276 inline Atomic64 SeqCst_AtomicExchange(volatile Atomic64* ptr,
277 Atomic64 new_value) {
278 return std::atomic_exchange_explicit(helper::to_std_atomic(ptr), new_value,
279 std::memory_order_seq_cst);
280 }
281
Relaxed_AtomicIncrement(volatile Atomic64 * ptr,Atomic64 increment)282 inline Atomic64 Relaxed_AtomicIncrement(volatile Atomic64* ptr,
283 Atomic64 increment) {
284 return increment + std::atomic_fetch_add_explicit(helper::to_std_atomic(ptr),
285 increment,
286 std::memory_order_relaxed);
287 }
288
Acquire_CompareAndSwap(volatile Atomic64 * ptr,Atomic64 old_value,Atomic64 new_value)289 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
290 Atomic64 old_value, Atomic64 new_value) {
291 std::atomic_compare_exchange_strong_explicit(
292 helper::to_std_atomic(ptr), &old_value, new_value,
293 std::memory_order_acquire, std::memory_order_acquire);
294 return old_value;
295 }
296
Release_CompareAndSwap(volatile Atomic64 * ptr,Atomic64 old_value,Atomic64 new_value)297 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
298 Atomic64 old_value, Atomic64 new_value) {
299 std::atomic_compare_exchange_strong_explicit(
300 helper::to_std_atomic(ptr), &old_value, new_value,
301 std::memory_order_release, std::memory_order_relaxed);
302 return old_value;
303 }
304
AcquireRelease_CompareAndSwap(volatile Atomic64 * ptr,Atomic64 old_value,Atomic64 new_value)305 inline Atomic64 AcquireRelease_CompareAndSwap(volatile Atomic64* ptr,
306 Atomic64 old_value,
307 Atomic64 new_value) {
308 std::atomic_compare_exchange_strong_explicit(
309 helper::to_std_atomic(ptr), &old_value, new_value,
310 std::memory_order_acq_rel, std::memory_order_acquire);
311 return old_value;
312 }
313
Relaxed_Store(volatile Atomic64 * ptr,Atomic64 value)314 inline void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value) {
315 std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
316 std::memory_order_relaxed);
317 }
318
Release_Store(volatile Atomic64 * ptr,Atomic64 value)319 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
320 std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
321 std::memory_order_release);
322 }
323
SeqCst_Store(volatile Atomic64 * ptr,Atomic64 value)324 inline void SeqCst_Store(volatile Atomic64* ptr, Atomic64 value) {
325 std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
326 std::memory_order_seq_cst);
327 }
328
Relaxed_Load(volatile const Atomic64 * ptr)329 inline Atomic64 Relaxed_Load(volatile const Atomic64* ptr) {
330 return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
331 std::memory_order_relaxed);
332 }
333
Acquire_Load(volatile const Atomic64 * ptr)334 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
335 return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
336 std::memory_order_acquire);
337 }
338
SeqCst_Load(volatile const Atomic64 * ptr)339 inline Atomic64 SeqCst_Load(volatile const Atomic64* ptr) {
340 return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
341 std::memory_order_seq_cst);
342 }
343
344 #endif // defined(V8_HOST_ARCH_64_BIT)
345
Relaxed_Memcpy(volatile Atomic8 * dst,volatile const Atomic8 * src,size_t bytes)346 inline void Relaxed_Memcpy(volatile Atomic8* dst, volatile const Atomic8* src,
347 size_t bytes) {
348 constexpr size_t kAtomicWordSize = sizeof(AtomicWord);
349 while (bytes > 0 &&
350 !IsAligned(reinterpret_cast<uintptr_t>(dst), kAtomicWordSize)) {
351 Relaxed_Store(dst++, Relaxed_Load(src++));
352 --bytes;
353 }
354 if (IsAligned(reinterpret_cast<uintptr_t>(src), kAtomicWordSize) &&
355 IsAligned(reinterpret_cast<uintptr_t>(dst), kAtomicWordSize)) {
356 while (bytes >= kAtomicWordSize) {
357 Relaxed_Store(
358 reinterpret_cast<volatile AtomicWord*>(dst),
359 Relaxed_Load(reinterpret_cast<const volatile AtomicWord*>(src)));
360 dst += kAtomicWordSize;
361 src += kAtomicWordSize;
362 bytes -= kAtomicWordSize;
363 }
364 }
365 while (bytes > 0) {
366 Relaxed_Store(dst++, Relaxed_Load(src++));
367 --bytes;
368 }
369 }
370
Relaxed_Memmove(volatile Atomic8 * dst,volatile const Atomic8 * src,size_t bytes)371 inline void Relaxed_Memmove(volatile Atomic8* dst, volatile const Atomic8* src,
372 size_t bytes) {
373 // Use Relaxed_Memcpy if copying forwards is safe. This is the case if there
374 // is no overlap, or {dst} lies before {src}.
375 // This single check checks for both:
376 if (reinterpret_cast<uintptr_t>(dst) - reinterpret_cast<uintptr_t>(src) >=
377 bytes) {
378 Relaxed_Memcpy(dst, src, bytes);
379 return;
380 }
381
382 // Otherwise copy backwards.
383 dst += bytes;
384 src += bytes;
385 constexpr size_t kAtomicWordSize = sizeof(AtomicWord);
386 while (bytes > 0 &&
387 !IsAligned(reinterpret_cast<uintptr_t>(dst), kAtomicWordSize)) {
388 Relaxed_Store(--dst, Relaxed_Load(--src));
389 --bytes;
390 }
391 if (IsAligned(reinterpret_cast<uintptr_t>(src), kAtomicWordSize) &&
392 IsAligned(reinterpret_cast<uintptr_t>(dst), kAtomicWordSize)) {
393 while (bytes >= kAtomicWordSize) {
394 dst -= kAtomicWordSize;
395 src -= kAtomicWordSize;
396 bytes -= kAtomicWordSize;
397 Relaxed_Store(
398 reinterpret_cast<volatile AtomicWord*>(dst),
399 Relaxed_Load(reinterpret_cast<const volatile AtomicWord*>(src)));
400 }
401 }
402 while (bytes > 0) {
403 Relaxed_Store(--dst, Relaxed_Load(--src));
404 --bytes;
405 }
406 }
407
408 namespace helper {
MemcmpNotEqualFundamental(Atomic8 u1,Atomic8 u2)409 inline int MemcmpNotEqualFundamental(Atomic8 u1, Atomic8 u2) {
410 DCHECK_NE(u1, u2);
411 return u1 < u2 ? -1 : 1;
412 }
MemcmpNotEqualFundamental(AtomicWord u1,AtomicWord u2)413 inline int MemcmpNotEqualFundamental(AtomicWord u1, AtomicWord u2) {
414 DCHECK_NE(u1, u2);
415 #if defined(V8_TARGET_BIG_ENDIAN)
416 return u1 < u2 ? -1 : 1;
417 #else
418 for (size_t i = 0; i < sizeof(AtomicWord); ++i) {
419 uint8_t byte1 = u1 & 0xFF;
420 uint8_t byte2 = u2 & 0xFF;
421 if (byte1 != byte2) return byte1 < byte2 ? -1 : 1;
422 u1 >>= 8;
423 u2 >>= 8;
424 }
425 UNREACHABLE();
426 #endif
427 }
428 } // namespace helper
429
Relaxed_Memcmp(volatile const Atomic8 * s1,volatile const Atomic8 * s2,size_t len)430 inline int Relaxed_Memcmp(volatile const Atomic8* s1,
431 volatile const Atomic8* s2, size_t len) {
432 constexpr size_t kAtomicWordSize = sizeof(AtomicWord);
433 while (len > 0 &&
434 !(IsAligned(reinterpret_cast<uintptr_t>(s1), kAtomicWordSize) &&
435 IsAligned(reinterpret_cast<uintptr_t>(s2), kAtomicWordSize))) {
436 Atomic8 u1 = Relaxed_Load(s1++);
437 Atomic8 u2 = Relaxed_Load(s2++);
438 if (u1 != u2) return helper::MemcmpNotEqualFundamental(u1, u2);
439 --len;
440 }
441
442 if (IsAligned(reinterpret_cast<uintptr_t>(s1), kAtomicWordSize) &&
443 IsAligned(reinterpret_cast<uintptr_t>(s2), kAtomicWordSize)) {
444 while (len >= kAtomicWordSize) {
445 AtomicWord u1 =
446 Relaxed_Load(reinterpret_cast<const volatile AtomicWord*>(s1));
447 AtomicWord u2 =
448 Relaxed_Load(reinterpret_cast<const volatile AtomicWord*>(s2));
449 if (u1 != u2) return helper::MemcmpNotEqualFundamental(u1, u2);
450 s1 += kAtomicWordSize;
451 s2 += kAtomicWordSize;
452 len -= kAtomicWordSize;
453 }
454 }
455
456 while (len > 0) {
457 Atomic8 u1 = Relaxed_Load(s1++);
458 Atomic8 u2 = Relaxed_Load(s2++);
459 if (u1 != u2) return helper::MemcmpNotEqualFundamental(u1, u2);
460 --len;
461 }
462
463 return 0;
464 }
465
466 } // namespace base
467 } // namespace v8
468
469 // On some platforms we need additional declarations to make
470 // AtomicWord compatible with our other Atomic* types.
471 #if defined(V8_OS_DARWIN) || defined(V8_OS_OPENBSD) || defined(V8_OS_AIX)
472 #include "src/base/atomicops_internals_atomicword_compat.h"
473 #endif
474
475 #endif // V8_BASE_ATOMICOPS_H_
476