• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- tsan_interface_atomic.cc ------------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "sanitizer_common/sanitizer_placement_new.h"
15 #include "tsan_interface_atomic.h"
16 #include "tsan_flags.h"
17 #include "tsan_rtl.h"
18 
19 using namespace __tsan;  // NOLINT
20 
21 class ScopedAtomic {
22  public:
ScopedAtomic(ThreadState * thr,uptr pc,const char * func)23   ScopedAtomic(ThreadState *thr, uptr pc, const char *func)
24       : thr_(thr) {
25     CHECK_EQ(thr_->in_rtl, 1);  // 1 due to our own ScopedInRtl member.
26     DPrintf("#%d: %s\n", thr_->tid, func);
27   }
~ScopedAtomic()28   ~ScopedAtomic() {
29     CHECK_EQ(thr_->in_rtl, 1);
30   }
31  private:
32   ThreadState *thr_;
33   ScopedInRtl in_rtl_;
34 };
35 
36 // Some shortcuts.
37 typedef __tsan_memory_order morder;
38 typedef __tsan_atomic8 a8;
39 typedef __tsan_atomic16 a16;
40 typedef __tsan_atomic32 a32;
41 typedef __tsan_atomic64 a64;
42 const int mo_relaxed = __tsan_memory_order_relaxed;
43 const int mo_consume = __tsan_memory_order_consume;
44 const int mo_acquire = __tsan_memory_order_acquire;
45 const int mo_release = __tsan_memory_order_release;
46 const int mo_acq_rel = __tsan_memory_order_acq_rel;
47 const int mo_seq_cst = __tsan_memory_order_seq_cst;
48 
AtomicStatInc(ThreadState * thr,uptr size,morder mo,StatType t)49 static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
50   StatInc(thr, StatAtomic);
51   StatInc(thr, t);
52   StatInc(thr, size == 1 ? StatAtomic1
53              : size == 2 ? StatAtomic2
54              : size == 4 ? StatAtomic4
55              :             StatAtomic8);
56   StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
57              : mo == mo_consume ? StatAtomicConsume
58              : mo == mo_acquire ? StatAtomicAcquire
59              : mo == mo_release ? StatAtomicRelease
60              : mo == mo_acq_rel ? StatAtomicAcq_Rel
61              :                    StatAtomicSeq_Cst);
62 }
63 
64 #define SCOPED_ATOMIC(func, ...) \
65     mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
66     ThreadState *const thr = cur_thread(); \
67     const uptr pc = (uptr)__builtin_return_address(0); \
68     AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
69     ScopedAtomic sa(thr, pc, __FUNCTION__); \
70     return Atomic##func(thr, pc, __VA_ARGS__); \
71 /**/
72 
73 template<typename T>
AtomicLoad(ThreadState * thr,uptr pc,const volatile T * a,morder mo)74 static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
75     morder mo) {
76   CHECK(mo & (mo_relaxed | mo_consume | mo_acquire | mo_seq_cst));
77   T v = *a;
78   if (mo & (mo_consume | mo_acquire | mo_seq_cst))
79     Acquire(thr, pc, (uptr)a);
80   return v;
81 }
82 
83 template<typename T>
AtomicStore(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)84 static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
85     morder mo) {
86   CHECK(mo & (mo_relaxed | mo_release | mo_seq_cst));
87   if (mo & (mo_release | mo_seq_cst))
88     ReleaseStore(thr, pc, (uptr)a);
89   *a = v;
90 }
91 
92 template<typename T>
AtomicExchange(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)93 static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
94     morder mo) {
95   if (mo & (mo_release | mo_acq_rel | mo_seq_cst))
96     Release(thr, pc, (uptr)a);
97   v = __sync_lock_test_and_set(a, v);
98   if (mo & (mo_consume | mo_acquire | mo_acq_rel | mo_seq_cst))
99     Acquire(thr, pc, (uptr)a);
100   return v;
101 }
102 
103 template<typename T>
AtomicFetchAdd(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)104 static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
105     morder mo) {
106   if (mo & (mo_release | mo_acq_rel | mo_seq_cst))
107     Release(thr, pc, (uptr)a);
108   v = __sync_fetch_and_add(a, v);
109   if (mo & (mo_consume | mo_acquire | mo_acq_rel | mo_seq_cst))
110     Acquire(thr, pc, (uptr)a);
111   return v;
112 }
113 
114 template<typename T>
AtomicFetchAnd(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)115 static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
116     morder mo) {
117   if (mo & (mo_release | mo_acq_rel | mo_seq_cst))
118     Release(thr, pc, (uptr)a);
119   v = __sync_fetch_and_and(a, v);
120   if (mo & (mo_consume | mo_acquire | mo_acq_rel | mo_seq_cst))
121     Acquire(thr, pc, (uptr)a);
122   return v;
123 }
124 
125 template<typename T>
AtomicFetchOr(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)126 static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
127     morder mo) {
128   if (mo & (mo_release | mo_acq_rel | mo_seq_cst))
129     Release(thr, pc, (uptr)a);
130   v = __sync_fetch_and_or(a, v);
131   if (mo & (mo_consume | mo_acquire | mo_acq_rel | mo_seq_cst))
132     Acquire(thr, pc, (uptr)a);
133   return v;
134 }
135 
136 template<typename T>
AtomicFetchXor(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)137 static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
138     morder mo) {
139   if (mo & (mo_release | mo_acq_rel | mo_seq_cst))
140     Release(thr, pc, (uptr)a);
141   v = __sync_fetch_and_xor(a, v);
142   if (mo & (mo_consume | mo_acquire | mo_acq_rel | mo_seq_cst))
143     Acquire(thr, pc, (uptr)a);
144   return v;
145 }
146 
147 template<typename T>
AtomicCAS(ThreadState * thr,uptr pc,volatile T * a,T * c,T v,morder mo)148 static bool AtomicCAS(ThreadState *thr, uptr pc,
149     volatile T *a, T *c, T v, morder mo) {
150   if (mo & (mo_release | mo_acq_rel | mo_seq_cst))
151     Release(thr, pc, (uptr)a);
152   T cc = *c;
153   T pr = __sync_val_compare_and_swap(a, cc, v);
154   if (mo & (mo_consume | mo_acquire | mo_acq_rel | mo_seq_cst))
155     Acquire(thr, pc, (uptr)a);
156   if (pr == cc)
157     return true;
158   *c = pr;
159   return false;
160 }
161 
AtomicFence(ThreadState * thr,uptr pc,morder mo)162 static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
163   __sync_synchronize();
164 }
165 
__tsan_atomic8_load(const volatile a8 * a,morder mo)166 a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
167   SCOPED_ATOMIC(Load, a, mo);
168 }
169 
__tsan_atomic16_load(const volatile a16 * a,morder mo)170 a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
171   SCOPED_ATOMIC(Load, a, mo);
172 }
173 
__tsan_atomic32_load(const volatile a32 * a,morder mo)174 a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
175   SCOPED_ATOMIC(Load, a, mo);
176 }
177 
__tsan_atomic64_load(const volatile a64 * a,morder mo)178 a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
179   SCOPED_ATOMIC(Load, a, mo);
180 }
181 
__tsan_atomic8_store(volatile a8 * a,a8 v,morder mo)182 void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
183   SCOPED_ATOMIC(Store, a, v, mo);
184 }
185 
__tsan_atomic16_store(volatile a16 * a,a16 v,morder mo)186 void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
187   SCOPED_ATOMIC(Store, a, v, mo);
188 }
189 
__tsan_atomic32_store(volatile a32 * a,a32 v,morder mo)190 void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
191   SCOPED_ATOMIC(Store, a, v, mo);
192 }
193 
__tsan_atomic64_store(volatile a64 * a,a64 v,morder mo)194 void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
195   SCOPED_ATOMIC(Store, a, v, mo);
196 }
197 
__tsan_atomic8_exchange(volatile a8 * a,a8 v,morder mo)198 a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
199   SCOPED_ATOMIC(Exchange, a, v, mo);
200 }
201 
__tsan_atomic16_exchange(volatile a16 * a,a16 v,morder mo)202 a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
203   SCOPED_ATOMIC(Exchange, a, v, mo);
204 }
205 
__tsan_atomic32_exchange(volatile a32 * a,a32 v,morder mo)206 a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
207   SCOPED_ATOMIC(Exchange, a, v, mo);
208 }
209 
__tsan_atomic64_exchange(volatile a64 * a,a64 v,morder mo)210 a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
211   SCOPED_ATOMIC(Exchange, a, v, mo);
212 }
213 
__tsan_atomic8_fetch_add(volatile a8 * a,a8 v,morder mo)214 a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
215   SCOPED_ATOMIC(FetchAdd, a, v, mo);
216 }
217 
__tsan_atomic16_fetch_add(volatile a16 * a,a16 v,morder mo)218 a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
219   SCOPED_ATOMIC(FetchAdd, a, v, mo);
220 }
221 
__tsan_atomic32_fetch_add(volatile a32 * a,a32 v,morder mo)222 a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
223   SCOPED_ATOMIC(FetchAdd, a, v, mo);
224 }
225 
__tsan_atomic64_fetch_add(volatile a64 * a,a64 v,morder mo)226 a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
227   SCOPED_ATOMIC(FetchAdd, a, v, mo);
228 }
229 
__tsan_atomic8_fetch_and(volatile a8 * a,a8 v,morder mo)230 a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
231   SCOPED_ATOMIC(FetchAnd, a, v, mo);
232 }
233 
__tsan_atomic16_fetch_and(volatile a16 * a,a16 v,morder mo)234 a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
235   SCOPED_ATOMIC(FetchAnd, a, v, mo);
236 }
237 
__tsan_atomic32_fetch_and(volatile a32 * a,a32 v,morder mo)238 a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
239   SCOPED_ATOMIC(FetchAnd, a, v, mo);
240 }
241 
__tsan_atomic64_fetch_and(volatile a64 * a,a64 v,morder mo)242 a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
243   SCOPED_ATOMIC(FetchAnd, a, v, mo);
244 }
245 
__tsan_atomic8_fetch_or(volatile a8 * a,a8 v,morder mo)246 a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
247   SCOPED_ATOMIC(FetchOr, a, v, mo);
248 }
249 
__tsan_atomic16_fetch_or(volatile a16 * a,a16 v,morder mo)250 a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
251   SCOPED_ATOMIC(FetchOr, a, v, mo);
252 }
253 
__tsan_atomic32_fetch_or(volatile a32 * a,a32 v,morder mo)254 a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
255   SCOPED_ATOMIC(FetchOr, a, v, mo);
256 }
257 
__tsan_atomic64_fetch_or(volatile a64 * a,a64 v,morder mo)258 a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
259   SCOPED_ATOMIC(FetchOr, a, v, mo);
260 }
261 
__tsan_atomic8_fetch_xor(volatile a8 * a,a8 v,morder mo)262 a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
263   SCOPED_ATOMIC(FetchXor, a, v, mo);
264 }
265 
__tsan_atomic16_fetch_xor(volatile a16 * a,a16 v,morder mo)266 a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
267   SCOPED_ATOMIC(FetchXor, a, v, mo);
268 }
269 
__tsan_atomic32_fetch_xor(volatile a32 * a,a32 v,morder mo)270 a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
271   SCOPED_ATOMIC(FetchXor, a, v, mo);
272 }
273 
__tsan_atomic64_fetch_xor(volatile a64 * a,a64 v,morder mo)274 a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
275   SCOPED_ATOMIC(FetchXor, a, v, mo);
276 }
277 
__tsan_atomic8_compare_exchange_strong(volatile a8 * a,a8 * c,a8 v,morder mo)278 int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
279     morder mo) {
280   SCOPED_ATOMIC(CAS, a, c, v, mo);
281 }
282 
__tsan_atomic16_compare_exchange_strong(volatile a16 * a,a16 * c,a16 v,morder mo)283 int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
284     morder mo) {
285   SCOPED_ATOMIC(CAS, a, c, v, mo);
286 }
287 
__tsan_atomic32_compare_exchange_strong(volatile a32 * a,a32 * c,a32 v,morder mo)288 int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
289     morder mo) {
290   SCOPED_ATOMIC(CAS, a, c, v, mo);
291 }
292 
__tsan_atomic64_compare_exchange_strong(volatile a64 * a,a64 * c,a64 v,morder mo)293 int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
294     morder mo) {
295   SCOPED_ATOMIC(CAS, a, c, v, mo);
296 }
297 
__tsan_atomic8_compare_exchange_weak(volatile a8 * a,a8 * c,a8 v,morder mo)298 int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
299     morder mo) {
300   SCOPED_ATOMIC(CAS, a, c, v, mo);
301 }
302 
__tsan_atomic16_compare_exchange_weak(volatile a16 * a,a16 * c,a16 v,morder mo)303 int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
304     morder mo) {
305   SCOPED_ATOMIC(CAS, a, c, v, mo);
306 }
307 
__tsan_atomic32_compare_exchange_weak(volatile a32 * a,a32 * c,a32 v,morder mo)308 int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
309     morder mo) {
310   SCOPED_ATOMIC(CAS, a, c, v, mo);
311 }
312 
__tsan_atomic64_compare_exchange_weak(volatile a64 * a,a64 * c,a64 v,morder mo)313 int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
314     morder mo) {
315   SCOPED_ATOMIC(CAS, a, c, v, mo);
316 }
317 
__tsan_atomic_thread_fence(morder mo)318 void __tsan_atomic_thread_fence(morder mo) {
319   char* a;
320   SCOPED_ATOMIC(Fence, mo);
321 }
322