• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 
6 // This file is an internal atomic implementation for compiler-based
7 // ThreadSanitizer. Use base/atomicops.h instead.
8 
9 #ifndef V8_BASE_ATOMICOPS_INTERNALS_TSAN_H_
10 #define V8_BASE_ATOMICOPS_INTERNALS_TSAN_H_
11 
12 namespace v8 {
13 namespace base {
14 
15 #ifndef TSAN_INTERFACE_ATOMIC_H
16 #define TSAN_INTERFACE_ATOMIC_H
17 
18 
19 extern "C" {
20 typedef char  __tsan_atomic8;
21 typedef short __tsan_atomic16;  // NOLINT
22 typedef int   __tsan_atomic32;
23 typedef long  __tsan_atomic64;  // NOLINT
24 
25 #if defined(__SIZEOF_INT128__) \
26     || (__clang_major__ * 100 + __clang_minor__ >= 302)
27 typedef __int128 __tsan_atomic128;
28 #define __TSAN_HAS_INT128 1
29 #else
30 typedef char     __tsan_atomic128;
31 #define __TSAN_HAS_INT128 0
32 #endif
33 
34 typedef enum {
35   __tsan_memory_order_relaxed,
36   __tsan_memory_order_consume,
37   __tsan_memory_order_acquire,
38   __tsan_memory_order_release,
39   __tsan_memory_order_acq_rel,
40   __tsan_memory_order_seq_cst,
41 } __tsan_memory_order;
42 
43 __tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8* a,
44     __tsan_memory_order mo);
45 __tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16* a,
46     __tsan_memory_order mo);
47 __tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32* a,
48     __tsan_memory_order mo);
49 __tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64* a,
50     __tsan_memory_order mo);
51 __tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128* a,
52     __tsan_memory_order mo);
53 
54 void __tsan_atomic8_store(volatile __tsan_atomic8* a, __tsan_atomic8 v,
55     __tsan_memory_order mo);
56 void __tsan_atomic16_store(volatile __tsan_atomic16* a, __tsan_atomic16 v,
57     __tsan_memory_order mo);
58 void __tsan_atomic32_store(volatile __tsan_atomic32* a, __tsan_atomic32 v,
59     __tsan_memory_order mo);
60 void __tsan_atomic64_store(volatile __tsan_atomic64* a, __tsan_atomic64 v,
61     __tsan_memory_order mo);
62 void __tsan_atomic128_store(volatile __tsan_atomic128* a, __tsan_atomic128 v,
63     __tsan_memory_order mo);
64 
65 __tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8* a,
66     __tsan_atomic8 v, __tsan_memory_order mo);
67 __tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16* a,
68     __tsan_atomic16 v, __tsan_memory_order mo);
69 __tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32* a,
70     __tsan_atomic32 v, __tsan_memory_order mo);
71 __tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64* a,
72     __tsan_atomic64 v, __tsan_memory_order mo);
73 __tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128* a,
74     __tsan_atomic128 v, __tsan_memory_order mo);
75 
76 __tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8* a,
77     __tsan_atomic8 v, __tsan_memory_order mo);
78 __tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16* a,
79     __tsan_atomic16 v, __tsan_memory_order mo);
80 __tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32* a,
81     __tsan_atomic32 v, __tsan_memory_order mo);
82 __tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64* a,
83     __tsan_atomic64 v, __tsan_memory_order mo);
84 __tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128* a,
85     __tsan_atomic128 v, __tsan_memory_order mo);
86 
87 __tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8* a,
88     __tsan_atomic8 v, __tsan_memory_order mo);
89 __tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16* a,
90     __tsan_atomic16 v, __tsan_memory_order mo);
91 __tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32* a,
92     __tsan_atomic32 v, __tsan_memory_order mo);
93 __tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64* a,
94     __tsan_atomic64 v, __tsan_memory_order mo);
95 __tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128* a,
96     __tsan_atomic128 v, __tsan_memory_order mo);
97 
98 __tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8* a,
99     __tsan_atomic8 v, __tsan_memory_order mo);
100 __tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16* a,
101     __tsan_atomic16 v, __tsan_memory_order mo);
102 __tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32* a,
103     __tsan_atomic32 v, __tsan_memory_order mo);
104 __tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64* a,
105     __tsan_atomic64 v, __tsan_memory_order mo);
106 __tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128* a,
107     __tsan_atomic128 v, __tsan_memory_order mo);
108 
109 __tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8* a,
110     __tsan_atomic8 v, __tsan_memory_order mo);
111 __tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16* a,
112     __tsan_atomic16 v, __tsan_memory_order mo);
113 __tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32* a,
114     __tsan_atomic32 v, __tsan_memory_order mo);
115 __tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64* a,
116     __tsan_atomic64 v, __tsan_memory_order mo);
117 __tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128* a,
118     __tsan_atomic128 v, __tsan_memory_order mo);
119 
120 __tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8* a,
121     __tsan_atomic8 v, __tsan_memory_order mo);
122 __tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16* a,
123     __tsan_atomic16 v, __tsan_memory_order mo);
124 __tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32* a,
125     __tsan_atomic32 v, __tsan_memory_order mo);
126 __tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64* a,
127     __tsan_atomic64 v, __tsan_memory_order mo);
128 __tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128* a,
129     __tsan_atomic128 v, __tsan_memory_order mo);
130 
131 int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8* a,
132     __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo,
133     __tsan_memory_order fail_mo);
134 int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16* a,
135     __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo,
136     __tsan_memory_order fail_mo);
137 int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32* a,
138     __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo,
139     __tsan_memory_order fail_mo);
140 int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64* a,
141     __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo,
142     __tsan_memory_order fail_mo);
143 int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128* a,
144     __tsan_atomic128* c, __tsan_atomic128 v, __tsan_memory_order mo,
145     __tsan_memory_order fail_mo);
146 
147 int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8* a,
148     __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo,
149     __tsan_memory_order fail_mo);
150 int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16* a,
151     __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo,
152     __tsan_memory_order fail_mo);
153 int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32* a,
154     __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo,
155     __tsan_memory_order fail_mo);
156 int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64* a,
157     __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo,
158     __tsan_memory_order fail_mo);
159 int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128* a,
160     __tsan_atomic128* c, __tsan_atomic128 v, __tsan_memory_order mo,
161     __tsan_memory_order fail_mo);
162 
163 __tsan_atomic8 __tsan_atomic8_compare_exchange_val(
164     volatile __tsan_atomic8* a, __tsan_atomic8 c, __tsan_atomic8 v,
165     __tsan_memory_order mo, __tsan_memory_order fail_mo);
166 __tsan_atomic16 __tsan_atomic16_compare_exchange_val(
167     volatile __tsan_atomic16* a, __tsan_atomic16 c, __tsan_atomic16 v,
168     __tsan_memory_order mo, __tsan_memory_order fail_mo);
169 __tsan_atomic32 __tsan_atomic32_compare_exchange_val(
170     volatile __tsan_atomic32* a, __tsan_atomic32 c, __tsan_atomic32 v,
171     __tsan_memory_order mo, __tsan_memory_order fail_mo);
172 __tsan_atomic64 __tsan_atomic64_compare_exchange_val(
173     volatile __tsan_atomic64* a, __tsan_atomic64 c, __tsan_atomic64 v,
174     __tsan_memory_order mo, __tsan_memory_order fail_mo);
175 __tsan_atomic128 __tsan_atomic128_compare_exchange_val(
176     volatile __tsan_atomic128* a, __tsan_atomic128 c, __tsan_atomic128 v,
177     __tsan_memory_order mo, __tsan_memory_order fail_mo);
178 
179 void __tsan_atomic_thread_fence(__tsan_memory_order mo);
180 void __tsan_atomic_signal_fence(__tsan_memory_order mo);
181 }  // extern "C"
182 
183 #endif  // #ifndef TSAN_INTERFACE_ATOMIC_H
184 
NoBarrier_CompareAndSwap(volatile Atomic32 * ptr,Atomic32 old_value,Atomic32 new_value)185 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
186                                          Atomic32 old_value,
187                                          Atomic32 new_value) {
188   Atomic32 cmp = old_value;
189   __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
190       __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
191   return cmp;
192 }
193 
NoBarrier_AtomicExchange(volatile Atomic32 * ptr,Atomic32 new_value)194 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
195                                          Atomic32 new_value) {
196   return __tsan_atomic32_exchange(ptr, new_value,
197       __tsan_memory_order_relaxed);
198 }
199 
Acquire_AtomicExchange(volatile Atomic32 * ptr,Atomic32 new_value)200 inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
201                                        Atomic32 new_value) {
202   return __tsan_atomic32_exchange(ptr, new_value,
203       __tsan_memory_order_acquire);
204 }
205 
Release_AtomicExchange(volatile Atomic32 * ptr,Atomic32 new_value)206 inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
207                                        Atomic32 new_value) {
208   return __tsan_atomic32_exchange(ptr, new_value,
209       __tsan_memory_order_release);
210 }
211 
NoBarrier_AtomicIncrement(volatile Atomic32 * ptr,Atomic32 increment)212 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
213                                           Atomic32 increment) {
214   return increment + __tsan_atomic32_fetch_add(ptr, increment,
215       __tsan_memory_order_relaxed);
216 }
217 
Barrier_AtomicIncrement(volatile Atomic32 * ptr,Atomic32 increment)218 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
219                                         Atomic32 increment) {
220   return increment + __tsan_atomic32_fetch_add(ptr, increment,
221       __tsan_memory_order_acq_rel);
222 }
223 
Acquire_CompareAndSwap(volatile Atomic32 * ptr,Atomic32 old_value,Atomic32 new_value)224 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
225                                        Atomic32 old_value,
226                                        Atomic32 new_value) {
227   Atomic32 cmp = old_value;
228   __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
229       __tsan_memory_order_acquire, __tsan_memory_order_acquire);
230   return cmp;
231 }
232 
Release_CompareAndSwap(volatile Atomic32 * ptr,Atomic32 old_value,Atomic32 new_value)233 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
234                                        Atomic32 old_value,
235                                        Atomic32 new_value) {
236   Atomic32 cmp = old_value;
237   __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
238       __tsan_memory_order_release, __tsan_memory_order_relaxed);
239   return cmp;
240 }
241 
NoBarrier_Store(volatile Atomic8 * ptr,Atomic8 value)242 inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
243   __tsan_atomic8_store(ptr, value, __tsan_memory_order_relaxed);
244 }
245 
NoBarrier_Store(volatile Atomic32 * ptr,Atomic32 value)246 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
247   __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
248 }
249 
Acquire_Store(volatile Atomic32 * ptr,Atomic32 value)250 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
251   __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
252   __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
253 }
254 
Release_Store(volatile Atomic32 * ptr,Atomic32 value)255 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
256   __tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
257 }
258 
NoBarrier_Load(volatile const Atomic8 * ptr)259 inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
260   return __tsan_atomic8_load(ptr, __tsan_memory_order_relaxed);
261 }
262 
NoBarrier_Load(volatile const Atomic32 * ptr)263 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
264   return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
265 }
266 
Acquire_Load(volatile const Atomic32 * ptr)267 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
268   return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
269 }
270 
Release_Load(volatile const Atomic32 * ptr)271 inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
272   __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
273   return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
274 }
275 
NoBarrier_CompareAndSwap(volatile Atomic64 * ptr,Atomic64 old_value,Atomic64 new_value)276 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
277                                          Atomic64 old_value,
278                                          Atomic64 new_value) {
279   Atomic64 cmp = old_value;
280   __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
281       __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
282   return cmp;
283 }
284 
NoBarrier_AtomicExchange(volatile Atomic64 * ptr,Atomic64 new_value)285 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
286                                          Atomic64 new_value) {
287   return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed);
288 }
289 
Acquire_AtomicExchange(volatile Atomic64 * ptr,Atomic64 new_value)290 inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
291                                        Atomic64 new_value) {
292   return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire);
293 }
294 
Release_AtomicExchange(volatile Atomic64 * ptr,Atomic64 new_value)295 inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
296                                        Atomic64 new_value) {
297   return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release);
298 }
299 
NoBarrier_AtomicIncrement(volatile Atomic64 * ptr,Atomic64 increment)300 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
301                                           Atomic64 increment) {
302   return increment + __tsan_atomic64_fetch_add(ptr, increment,
303       __tsan_memory_order_relaxed);
304 }
305 
Barrier_AtomicIncrement(volatile Atomic64 * ptr,Atomic64 increment)306 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
307                                         Atomic64 increment) {
308   return increment + __tsan_atomic64_fetch_add(ptr, increment,
309       __tsan_memory_order_acq_rel);
310 }
311 
NoBarrier_Store(volatile Atomic64 * ptr,Atomic64 value)312 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
313   __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
314 }
315 
Acquire_Store(volatile Atomic64 * ptr,Atomic64 value)316 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
317   __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
318   __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
319 }
320 
Release_Store(volatile Atomic64 * ptr,Atomic64 value)321 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
322   __tsan_atomic64_store(ptr, value, __tsan_memory_order_release);
323 }
324 
NoBarrier_Load(volatile const Atomic64 * ptr)325 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
326   return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
327 }
328 
Acquire_Load(volatile const Atomic64 * ptr)329 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
330   return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire);
331 }
332 
Release_Load(volatile const Atomic64 * ptr)333 inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
334   __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
335   return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
336 }
337 
Acquire_CompareAndSwap(volatile Atomic64 * ptr,Atomic64 old_value,Atomic64 new_value)338 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
339                                        Atomic64 old_value,
340                                        Atomic64 new_value) {
341   Atomic64 cmp = old_value;
342   __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
343       __tsan_memory_order_acquire, __tsan_memory_order_acquire);
344   return cmp;
345 }
346 
Release_CompareAndSwap(volatile Atomic64 * ptr,Atomic64 old_value,Atomic64 new_value)347 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
348                                        Atomic64 old_value,
349                                        Atomic64 new_value) {
350   Atomic64 cmp = old_value;
351   __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
352       __tsan_memory_order_release, __tsan_memory_order_relaxed);
353   return cmp;
354 }
355 
MemoryBarrier()356 inline void MemoryBarrier() {
357   __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
358 }
359 
360 }  // namespace base
361 }  // namespace v8
362 
363 #endif  // V8_BASE_ATOMICOPS_INTERNALS_TSAN_H_
364