• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- tsan_rtl_mutex.cc -------------------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
15 #include <sanitizer_common/sanitizer_stackdepot.h>
16 
17 #include "tsan_rtl.h"
18 #include "tsan_flags.h"
19 #include "tsan_sync.h"
20 #include "tsan_report.h"
21 #include "tsan_symbolize.h"
22 #include "tsan_platform.h"
23 
24 namespace __tsan {
25 
26 void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r);
27 
28 struct Callback : DDCallback {
29   ThreadState *thr;
30   uptr pc;
31 
Callback__tsan::Callback32   Callback(ThreadState *thr, uptr pc)
33       : thr(thr)
34       , pc(pc) {
35     DDCallback::pt = thr->dd_pt;
36     DDCallback::lt = thr->dd_lt;
37   }
38 
Unwind__tsan::Callback39   virtual u32 Unwind() {
40     return CurrentStackId(thr, pc);
41   }
UniqueTid__tsan::Callback42   virtual int UniqueTid() {
43     return thr->unique_id;
44   }
45 };
46 
DDMutexInit(ThreadState * thr,uptr pc,SyncVar * s)47 void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) {
48   Callback cb(thr, pc);
49   ctx->dd->MutexInit(&cb, &s->dd);
50   s->dd.ctx = s->GetId();
51 }
52 
ReportMutexMisuse(ThreadState * thr,uptr pc,ReportType typ,uptr addr,u64 mid)53 static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
54     uptr addr, u64 mid) {
55   // In Go, these misuses are either impossible, or detected by std lib,
56   // or false positives (e.g. unlock in a different thread).
57   if (kGoMode)
58     return;
59   ThreadRegistryLock l(ctx->thread_registry);
60   ScopedReport rep(typ);
61   rep.AddMutex(mid);
62   StackTrace trace;
63   trace.ObtainCurrent(thr, pc);
64   rep.AddStack(&trace, true);
65   rep.AddLocation(addr, 1);
66   OutputReport(thr, rep);
67 }
68 
MutexCreate(ThreadState * thr,uptr pc,uptr addr,bool rw,bool recursive,bool linker_init)69 void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
70                  bool rw, bool recursive, bool linker_init) {
71   DPrintf("#%d: MutexCreate %zx\n", thr->tid, addr);
72   StatInc(thr, StatMutexCreate);
73   if (!linker_init && IsAppMem(addr)) {
74     CHECK(!thr->is_freeing);
75     thr->is_freeing = true;
76     MemoryWrite(thr, pc, addr, kSizeLog1);
77     thr->is_freeing = false;
78   }
79   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
80   s->is_rw = rw;
81   s->is_recursive = recursive;
82   s->is_linker_init = linker_init;
83   if (kCppMode && s->creation_stack_id == 0)
84     s->creation_stack_id = CurrentStackId(thr, pc);
85   s->mtx.Unlock();
86 }
87 
MutexDestroy(ThreadState * thr,uptr pc,uptr addr)88 void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
89   DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
90   StatInc(thr, StatMutexDestroy);
91 #ifndef TSAN_GO
92   // Global mutexes not marked as LINKER_INITIALIZED
93   // cause tons of not interesting reports, so just ignore it.
94   if (IsGlobalVar(addr))
95     return;
96 #endif
97   if (IsAppMem(addr)) {
98     CHECK(!thr->is_freeing);
99     thr->is_freeing = true;
100     MemoryWrite(thr, pc, addr, kSizeLog1);
101     thr->is_freeing = false;
102   }
103   SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
104   if (s == 0)
105     return;
106   if (flags()->detect_deadlocks) {
107     Callback cb(thr, pc);
108     ctx->dd->MutexDestroy(&cb, &s->dd);
109     ctx->dd->MutexInit(&cb, &s->dd);
110   }
111   bool unlock_locked = false;
112   if (flags()->report_destroy_locked
113       && s->owner_tid != SyncVar::kInvalidTid
114       && !s->is_broken) {
115     s->is_broken = true;
116     unlock_locked = true;
117   }
118   u64 mid = s->GetId();
119   u32 last_lock = s->last_lock;
120   if (!unlock_locked)
121     s->Reset();  // must not reset it before the report is printed
122   s->mtx.Unlock();
123   if (unlock_locked) {
124     ThreadRegistryLock l(ctx->thread_registry);
125     ScopedReport rep(ReportTypeMutexDestroyLocked);
126     rep.AddMutex(mid);
127     StackTrace trace;
128     trace.ObtainCurrent(thr, pc);
129     rep.AddStack(&trace);
130     FastState last(last_lock);
131     RestoreStack(last.tid(), last.epoch(), &trace, 0);
132     rep.AddStack(&trace, true);
133     rep.AddLocation(addr, 1);
134     OutputReport(thr, rep);
135   }
136   if (unlock_locked) {
137     SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
138     if (s != 0) {
139       s->Reset();
140       s->mtx.Unlock();
141     }
142   }
143   thr->mset.Remove(mid);
144   // s will be destroyed and freed in MetaMap::FreeBlock.
145 }
146 
MutexLock(ThreadState * thr,uptr pc,uptr addr,int rec,bool try_lock)147 void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec, bool try_lock) {
148   DPrintf("#%d: MutexLock %zx rec=%d\n", thr->tid, addr, rec);
149   CHECK_GT(rec, 0);
150   if (IsAppMem(addr))
151     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
152   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
153   thr->fast_state.IncrementEpoch();
154   TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
155   bool report_double_lock = false;
156   if (s->owner_tid == SyncVar::kInvalidTid) {
157     CHECK_EQ(s->recursion, 0);
158     s->owner_tid = thr->tid;
159     s->last_lock = thr->fast_state.raw();
160   } else if (s->owner_tid == thr->tid) {
161     CHECK_GT(s->recursion, 0);
162   } else if (flags()->report_mutex_bugs && !s->is_broken) {
163     s->is_broken = true;
164     report_double_lock = true;
165   }
166   if (s->recursion == 0) {
167     StatInc(thr, StatMutexLock);
168     AcquireImpl(thr, pc, &s->clock);
169     AcquireImpl(thr, pc, &s->read_clock);
170   } else if (!s->is_recursive) {
171     StatInc(thr, StatMutexRecLock);
172   }
173   s->recursion += rec;
174   thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
175   if (flags()->detect_deadlocks && (s->recursion - rec) == 0) {
176     Callback cb(thr, pc);
177     if (!try_lock)
178       ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
179     ctx->dd->MutexAfterLock(&cb, &s->dd, true, try_lock);
180   }
181   u64 mid = s->GetId();
182   s->mtx.Unlock();
183   // Can't touch s after this point.
184   if (report_double_lock)
185     ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid);
186   if (flags()->detect_deadlocks) {
187     Callback cb(thr, pc);
188     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
189   }
190 }
191 
MutexUnlock(ThreadState * thr,uptr pc,uptr addr,bool all)192 int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) {
193   DPrintf("#%d: MutexUnlock %zx all=%d\n", thr->tid, addr, all);
194   if (IsAppMem(addr))
195     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
196   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
197   thr->fast_state.IncrementEpoch();
198   TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
199   int rec = 0;
200   bool report_bad_unlock = false;
201   if (kCppMode && (s->recursion == 0 || s->owner_tid != thr->tid)) {
202     if (flags()->report_mutex_bugs && !s->is_broken) {
203       s->is_broken = true;
204       report_bad_unlock = true;
205     }
206   } else {
207     rec = all ? s->recursion : 1;
208     s->recursion -= rec;
209     if (s->recursion == 0) {
210       StatInc(thr, StatMutexUnlock);
211       s->owner_tid = SyncVar::kInvalidTid;
212       ReleaseStoreImpl(thr, pc, &s->clock);
213     } else {
214       StatInc(thr, StatMutexRecUnlock);
215     }
216   }
217   thr->mset.Del(s->GetId(), true);
218   if (flags()->detect_deadlocks && s->recursion == 0 && !report_bad_unlock) {
219     Callback cb(thr, pc);
220     ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true);
221   }
222   u64 mid = s->GetId();
223   s->mtx.Unlock();
224   // Can't touch s after this point.
225   if (report_bad_unlock)
226     ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
227   if (flags()->detect_deadlocks && !report_bad_unlock) {
228     Callback cb(thr, pc);
229     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
230   }
231   return rec;
232 }
233 
MutexReadLock(ThreadState * thr,uptr pc,uptr addr,bool trylock)234 void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool trylock) {
235   DPrintf("#%d: MutexReadLock %zx\n", thr->tid, addr);
236   StatInc(thr, StatMutexReadLock);
237   if (IsAppMem(addr))
238     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
239   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
240   thr->fast_state.IncrementEpoch();
241   TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
242   bool report_bad_lock = false;
243   if (s->owner_tid != SyncVar::kInvalidTid) {
244     if (flags()->report_mutex_bugs && !s->is_broken) {
245       s->is_broken = true;
246       report_bad_lock = true;
247     }
248   }
249   AcquireImpl(thr, pc, &s->clock);
250   s->last_lock = thr->fast_state.raw();
251   thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
252   if (flags()->detect_deadlocks && s->recursion == 0) {
253     Callback cb(thr, pc);
254     if (!trylock)
255       ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
256     ctx->dd->MutexAfterLock(&cb, &s->dd, false, trylock);
257   }
258   u64 mid = s->GetId();
259   s->mtx.ReadUnlock();
260   // Can't touch s after this point.
261   if (report_bad_lock)
262     ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid);
263   if (flags()->detect_deadlocks) {
264     Callback cb(thr, pc);
265     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
266   }
267 }
268 
MutexReadUnlock(ThreadState * thr,uptr pc,uptr addr)269 void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
270   DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
271   StatInc(thr, StatMutexReadUnlock);
272   if (IsAppMem(addr))
273     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
274   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
275   thr->fast_state.IncrementEpoch();
276   TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
277   bool report_bad_unlock = false;
278   if (s->owner_tid != SyncVar::kInvalidTid) {
279     if (flags()->report_mutex_bugs && !s->is_broken) {
280       s->is_broken = true;
281       report_bad_unlock = true;
282     }
283   }
284   ReleaseImpl(thr, pc, &s->read_clock);
285   if (flags()->detect_deadlocks && s->recursion == 0) {
286     Callback cb(thr, pc);
287     ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false);
288   }
289   u64 mid = s->GetId();
290   s->mtx.Unlock();
291   // Can't touch s after this point.
292   thr->mset.Del(mid, false);
293   if (report_bad_unlock)
294     ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr, mid);
295   if (flags()->detect_deadlocks) {
296     Callback cb(thr, pc);
297     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
298   }
299 }
300 
MutexReadOrWriteUnlock(ThreadState * thr,uptr pc,uptr addr)301 void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
302   DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
303   if (IsAppMem(addr))
304     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
305   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
306   bool write = true;
307   bool report_bad_unlock = false;
308   if (s->owner_tid == SyncVar::kInvalidTid) {
309     // Seems to be read unlock.
310     write = false;
311     StatInc(thr, StatMutexReadUnlock);
312     thr->fast_state.IncrementEpoch();
313     TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
314     ReleaseImpl(thr, pc, &s->read_clock);
315   } else if (s->owner_tid == thr->tid) {
316     // Seems to be write unlock.
317     thr->fast_state.IncrementEpoch();
318     TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
319     CHECK_GT(s->recursion, 0);
320     s->recursion--;
321     if (s->recursion == 0) {
322       StatInc(thr, StatMutexUnlock);
323       s->owner_tid = SyncVar::kInvalidTid;
324       ReleaseImpl(thr, pc, &s->clock);
325     } else {
326       StatInc(thr, StatMutexRecUnlock);
327     }
328   } else if (!s->is_broken) {
329     s->is_broken = true;
330     report_bad_unlock = true;
331   }
332   thr->mset.Del(s->GetId(), write);
333   if (flags()->detect_deadlocks && s->recursion == 0) {
334     Callback cb(thr, pc);
335     ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write);
336   }
337   u64 mid = s->GetId();
338   s->mtx.Unlock();
339   // Can't touch s after this point.
340   if (report_bad_unlock)
341     ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
342   if (flags()->detect_deadlocks) {
343     Callback cb(thr, pc);
344     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
345   }
346 }
347 
MutexRepair(ThreadState * thr,uptr pc,uptr addr)348 void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
349   DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
350   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
351   s->owner_tid = SyncVar::kInvalidTid;
352   s->recursion = 0;
353   s->mtx.Unlock();
354 }
355 
Acquire(ThreadState * thr,uptr pc,uptr addr)356 void Acquire(ThreadState *thr, uptr pc, uptr addr) {
357   DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
358   if (thr->ignore_sync)
359     return;
360   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
361   AcquireImpl(thr, pc, &s->clock);
362   s->mtx.ReadUnlock();
363 }
364 
UpdateClockCallback(ThreadContextBase * tctx_base,void * arg)365 static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) {
366   ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
367   ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
368   if (tctx->status == ThreadStatusRunning)
369     thr->clock.set(tctx->tid, tctx->thr->fast_state.epoch());
370   else
371     thr->clock.set(tctx->tid, tctx->epoch1);
372 }
373 
AcquireGlobal(ThreadState * thr,uptr pc)374 void AcquireGlobal(ThreadState *thr, uptr pc) {
375   DPrintf("#%d: AcquireGlobal\n", thr->tid);
376   if (thr->ignore_sync)
377     return;
378   ThreadRegistryLock l(ctx->thread_registry);
379   ctx->thread_registry->RunCallbackForEachThreadLocked(
380       UpdateClockCallback, thr);
381 }
382 
Release(ThreadState * thr,uptr pc,uptr addr)383 void Release(ThreadState *thr, uptr pc, uptr addr) {
384   DPrintf("#%d: Release %zx\n", thr->tid, addr);
385   if (thr->ignore_sync)
386     return;
387   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
388   thr->fast_state.IncrementEpoch();
389   // Can't increment epoch w/o writing to the trace as well.
390   TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
391   ReleaseImpl(thr, pc, &s->clock);
392   s->mtx.Unlock();
393 }
394 
ReleaseStore(ThreadState * thr,uptr pc,uptr addr)395 void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
396   DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
397   if (thr->ignore_sync)
398     return;
399   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
400   thr->fast_state.IncrementEpoch();
401   // Can't increment epoch w/o writing to the trace as well.
402   TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
403   ReleaseStoreImpl(thr, pc, &s->clock);
404   s->mtx.Unlock();
405 }
406 
407 #ifndef TSAN_GO
UpdateSleepClockCallback(ThreadContextBase * tctx_base,void * arg)408 static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
409   ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
410   ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
411   if (tctx->status == ThreadStatusRunning)
412     thr->last_sleep_clock.set(tctx->tid, tctx->thr->fast_state.epoch());
413   else
414     thr->last_sleep_clock.set(tctx->tid, tctx->epoch1);
415 }
416 
AfterSleep(ThreadState * thr,uptr pc)417 void AfterSleep(ThreadState *thr, uptr pc) {
418   DPrintf("#%d: AfterSleep %zx\n", thr->tid);
419   if (thr->ignore_sync)
420     return;
421   thr->last_sleep_stack_id = CurrentStackId(thr, pc);
422   ThreadRegistryLock l(ctx->thread_registry);
423   ctx->thread_registry->RunCallbackForEachThreadLocked(
424       UpdateSleepClockCallback, thr);
425 }
426 #endif
427 
AcquireImpl(ThreadState * thr,uptr pc,SyncClock * c)428 void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
429   if (thr->ignore_sync)
430     return;
431   thr->clock.set(thr->fast_state.epoch());
432   thr->clock.acquire(c);
433   StatInc(thr, StatSyncAcquire);
434 }
435 
ReleaseImpl(ThreadState * thr,uptr pc,SyncClock * c)436 void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
437   if (thr->ignore_sync)
438     return;
439   thr->clock.set(thr->fast_state.epoch());
440   thr->fast_synch_epoch = thr->fast_state.epoch();
441   thr->clock.release(c);
442   StatInc(thr, StatSyncRelease);
443 }
444 
ReleaseStoreImpl(ThreadState * thr,uptr pc,SyncClock * c)445 void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) {
446   if (thr->ignore_sync)
447     return;
448   thr->clock.set(thr->fast_state.epoch());
449   thr->fast_synch_epoch = thr->fast_state.epoch();
450   thr->clock.ReleaseStore(c);
451   StatInc(thr, StatSyncRelease);
452 }
453 
AcquireReleaseImpl(ThreadState * thr,uptr pc,SyncClock * c)454 void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
455   if (thr->ignore_sync)
456     return;
457   thr->clock.set(thr->fast_state.epoch());
458   thr->fast_synch_epoch = thr->fast_state.epoch();
459   thr->clock.acq_rel(c);
460   StatInc(thr, StatSyncAcquire);
461   StatInc(thr, StatSyncRelease);
462 }
463 
ReportDeadlock(ThreadState * thr,uptr pc,DDReport * r)464 void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
465   if (r == 0)
466     return;
467   ThreadRegistryLock l(ctx->thread_registry);
468   ScopedReport rep(ReportTypeDeadlock);
469   for (int i = 0; i < r->n; i++) {
470     rep.AddMutex(r->loop[i].mtx_ctx0);
471     rep.AddUniqueTid((int)r->loop[i].thr_ctx);
472     rep.AddThread((int)r->loop[i].thr_ctx);
473   }
474   InternalScopedBuffer<StackTrace> stacks(2 * DDReport::kMaxLoopSize);
475   uptr dummy_pc = 0x42;
476   for (int i = 0; i < r->n; i++) {
477     uptr size;
478     for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
479       u32 stk = r->loop[i].stk[j];
480       if (stk) {
481         const uptr *trace = StackDepotGet(stk, &size);
482         stacks[i].Init(const_cast<uptr *>(trace), size);
483       } else {
484         // Sometimes we fail to extract the stack trace (FIXME: investigate),
485         // but we should still produce some stack trace in the report.
486         stacks[i].Init(&dummy_pc, 1);
487       }
488       rep.AddStack(&stacks[i], true);
489     }
490   }
491   OutputReport(thr, rep);
492 }
493 
494 }  // namespace __tsan
495