• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2016 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 // This file is an internal atomic implementation, use atomicops.h instead.
6 //
7 // This implementation uses C++11 atomics' member functions. The code base is
8 // currently written assuming atomicity revolves around accesses instead of
9 // C++11's memory locations. The burden is on the programmer to ensure that all
10 // memory locations accessed atomically are never accessed non-atomically (tsan
11 // should help with this).
12 //
13 // Of note in this implementation:
14 //  * All NoBarrier variants are implemented as relaxed.
15 //  * All Barrier variants are implemented as sequentially-consistent.
16 //  * Compare exchange's failure ordering is always the same as the success one
17 //    (except for release, which fails as relaxed): using a weaker ordering is
18 //    only valid under certain uses of compare exchange.
19 //  * Acquire store doesn't exist in the C11 memory model, it is instead
20 //    implemented as a relaxed store followed by a sequentially consistent
21 //    fence.
22 //  * Release load doesn't exist in the C11 memory model, it is instead
23 //    implemented as sequentially consistent fence followed by a relaxed load.
24 //  * Atomic increment is expected to return the post-incremented value, whereas
25 //    C11 fetch add returns the previous value. The implementation therefore
26 //    needs to increment twice (which the compiler should be able to detect and
27 //    optimize).
28 
29 #ifndef V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
30 #define V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
31 
32 #include <atomic>
33 
34 #include "src/base/build_config.h"
35 #include "src/base/macros.h"
36 
37 namespace v8 {
38 namespace base {
39 
40 // This implementation is transitional and maintains the original API for
41 // atomicops.h.
42 
SeqCst_MemoryFence()43 inline void SeqCst_MemoryFence() {
44 #if defined(__GLIBCXX__)
45   // Work around libstdc++ bug 51038 where atomic_thread_fence was declared but
46   // not defined, leading to the linker complaining about undefined references.
47   __atomic_thread_fence(std::memory_order_seq_cst);
48 #else
49   std::atomic_thread_fence(std::memory_order_seq_cst);
50 #endif
51 }
52 
Relaxed_CompareAndSwap(volatile Atomic8 * ptr,Atomic8 old_value,Atomic8 new_value)53 inline Atomic8 Relaxed_CompareAndSwap(volatile Atomic8* ptr, Atomic8 old_value,
54                                       Atomic8 new_value) {
55   bool result = __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
56                                             __ATOMIC_RELAXED, __ATOMIC_RELAXED);
57   USE(result);  // Make gcc compiler happy.
58   return old_value;
59 }
60 
Relaxed_CompareAndSwap(volatile Atomic16 * ptr,Atomic16 old_value,Atomic16 new_value)61 inline Atomic16 Relaxed_CompareAndSwap(volatile Atomic16* ptr,
62                                        Atomic16 old_value, Atomic16 new_value) {
63   __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
64                               __ATOMIC_RELAXED, __ATOMIC_RELAXED);
65   return old_value;
66 }
67 
Relaxed_CompareAndSwap(volatile Atomic32 * ptr,Atomic32 old_value,Atomic32 new_value)68 inline Atomic32 Relaxed_CompareAndSwap(volatile Atomic32* ptr,
69                                        Atomic32 old_value, Atomic32 new_value) {
70   __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
71                               __ATOMIC_RELAXED, __ATOMIC_RELAXED);
72   return old_value;
73 }
74 
Relaxed_AtomicExchange(volatile Atomic32 * ptr,Atomic32 new_value)75 inline Atomic32 Relaxed_AtomicExchange(volatile Atomic32* ptr,
76                                        Atomic32 new_value) {
77   return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED);
78 }
79 
Relaxed_AtomicIncrement(volatile Atomic32 * ptr,Atomic32 increment)80 inline Atomic32 Relaxed_AtomicIncrement(volatile Atomic32* ptr,
81                                         Atomic32 increment) {
82   return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED);
83 }
84 
Acquire_CompareAndSwap(volatile Atomic32 * ptr,Atomic32 old_value,Atomic32 new_value)85 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
86                                        Atomic32 old_value, Atomic32 new_value) {
87   __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
88                               __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
89   return old_value;
90 }
91 
Release_CompareAndSwap(volatile Atomic8 * ptr,Atomic8 old_value,Atomic8 new_value)92 inline Atomic8 Release_CompareAndSwap(volatile Atomic8* ptr, Atomic8 old_value,
93                                       Atomic8 new_value) {
94   bool result = __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
95                                             __ATOMIC_RELEASE, __ATOMIC_RELAXED);
96   USE(result);  // Make gcc compiler happy.
97   return old_value;
98 }
99 
Release_CompareAndSwap(volatile Atomic32 * ptr,Atomic32 old_value,Atomic32 new_value)100 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
101                                        Atomic32 old_value, Atomic32 new_value) {
102   __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
103                               __ATOMIC_RELEASE, __ATOMIC_RELAXED);
104   return old_value;
105 }
106 
AcquireRelease_CompareAndSwap(volatile Atomic32 * ptr,Atomic32 old_value,Atomic32 new_value)107 inline Atomic32 AcquireRelease_CompareAndSwap(volatile Atomic32* ptr,
108                                               Atomic32 old_value,
109                                               Atomic32 new_value) {
110   __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
111                               __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE);
112   return old_value;
113 }
114 
Relaxed_Store(volatile Atomic8 * ptr,Atomic8 value)115 inline void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value) {
116   __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
117 }
118 
Relaxed_Store(volatile Atomic16 * ptr,Atomic16 value)119 inline void Relaxed_Store(volatile Atomic16* ptr, Atomic16 value) {
120   __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
121 }
122 
Relaxed_Store(volatile Atomic32 * ptr,Atomic32 value)123 inline void Relaxed_Store(volatile Atomic32* ptr, Atomic32 value) {
124   __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
125 }
126 
Release_Store(volatile Atomic32 * ptr,Atomic32 value)127 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
128   __atomic_store_n(ptr, value, __ATOMIC_RELEASE);
129 }
130 
Relaxed_Load(volatile const Atomic8 * ptr)131 inline Atomic8 Relaxed_Load(volatile const Atomic8* ptr) {
132   return __atomic_load_n(ptr, __ATOMIC_RELAXED);
133 }
134 
Relaxed_Load(volatile const Atomic16 * ptr)135 inline Atomic16 Relaxed_Load(volatile const Atomic16* ptr) {
136   return __atomic_load_n(ptr, __ATOMIC_RELAXED);
137 }
138 
Relaxed_Load(volatile const Atomic32 * ptr)139 inline Atomic32 Relaxed_Load(volatile const Atomic32* ptr) {
140   return __atomic_load_n(ptr, __ATOMIC_RELAXED);
141 }
142 
Acquire_Load(volatile const Atomic32 * ptr)143 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
144   return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
145 }
146 
147 #if defined(V8_HOST_ARCH_64_BIT)
148 
Relaxed_CompareAndSwap(volatile Atomic64 * ptr,Atomic64 old_value,Atomic64 new_value)149 inline Atomic64 Relaxed_CompareAndSwap(volatile Atomic64* ptr,
150                                        Atomic64 old_value, Atomic64 new_value) {
151   __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
152                               __ATOMIC_RELAXED, __ATOMIC_RELAXED);
153   return old_value;
154 }
155 
Relaxed_AtomicExchange(volatile Atomic64 * ptr,Atomic64 new_value)156 inline Atomic64 Relaxed_AtomicExchange(volatile Atomic64* ptr,
157                                        Atomic64 new_value) {
158   return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED);
159 }
160 
Relaxed_AtomicIncrement(volatile Atomic64 * ptr,Atomic64 increment)161 inline Atomic64 Relaxed_AtomicIncrement(volatile Atomic64* ptr,
162                                         Atomic64 increment) {
163   return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED);
164 }
165 
Acquire_CompareAndSwap(volatile Atomic64 * ptr,Atomic64 old_value,Atomic64 new_value)166 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
167                                        Atomic64 old_value, Atomic64 new_value) {
168   __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
169                               __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
170   return old_value;
171 }
172 
Release_CompareAndSwap(volatile Atomic64 * ptr,Atomic64 old_value,Atomic64 new_value)173 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
174                                        Atomic64 old_value, Atomic64 new_value) {
175   __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
176                               __ATOMIC_RELEASE, __ATOMIC_RELAXED);
177   return old_value;
178 }
179 
AcquireRelease_CompareAndSwap(volatile Atomic64 * ptr,Atomic64 old_value,Atomic64 new_value)180 inline Atomic64 AcquireRelease_CompareAndSwap(volatile Atomic64* ptr,
181                                               Atomic64 old_value,
182                                               Atomic64 new_value) {
183   __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
184                               __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE);
185   return old_value;
186 }
187 
Relaxed_Store(volatile Atomic64 * ptr,Atomic64 value)188 inline void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value) {
189   __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
190 }
191 
Release_Store(volatile Atomic64 * ptr,Atomic64 value)192 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
193   __atomic_store_n(ptr, value, __ATOMIC_RELEASE);
194 }
195 
Relaxed_Load(volatile const Atomic64 * ptr)196 inline Atomic64 Relaxed_Load(volatile const Atomic64* ptr) {
197   return __atomic_load_n(ptr, __ATOMIC_RELAXED);
198 }
199 
Acquire_Load(volatile const Atomic64 * ptr)200 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
201   return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
202 }
203 
204 #endif  // defined(V8_HOST_ARCH_64_BIT)
205 }  // namespace base
206 }  // namespace v8
207 
208 #endif  // V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
209