1 //===-- tsan_rtl_mutex.cc -------------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
15 #include <sanitizer_common/sanitizer_stackdepot.h>
16
17 #include "tsan_rtl.h"
18 #include "tsan_flags.h"
19 #include "tsan_sync.h"
20 #include "tsan_report.h"
21 #include "tsan_symbolize.h"
22 #include "tsan_platform.h"
23
24 namespace __tsan {
25
26 void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r);
27
28 struct Callback : DDCallback {
29 ThreadState *thr;
30 uptr pc;
31
Callback__tsan::Callback32 Callback(ThreadState *thr, uptr pc)
33 : thr(thr)
34 , pc(pc) {
35 DDCallback::pt = thr->proc()->dd_pt;
36 DDCallback::lt = thr->dd_lt;
37 }
38
Unwind__tsan::Callback39 u32 Unwind() override { return CurrentStackId(thr, pc); }
UniqueTid__tsan::Callback40 int UniqueTid() override { return thr->unique_id; }
41 };
42
DDMutexInit(ThreadState * thr,uptr pc,SyncVar * s)43 void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) {
44 Callback cb(thr, pc);
45 ctx->dd->MutexInit(&cb, &s->dd);
46 s->dd.ctx = s->GetId();
47 }
48
ReportMutexMisuse(ThreadState * thr,uptr pc,ReportType typ,uptr addr,u64 mid)49 static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
50 uptr addr, u64 mid) {
51 // In Go, these misuses are either impossible, or detected by std lib,
52 // or false positives (e.g. unlock in a different thread).
53 if (kGoMode)
54 return;
55 ThreadRegistryLock l(ctx->thread_registry);
56 ScopedReport rep(typ);
57 rep.AddMutex(mid);
58 VarSizeStackTrace trace;
59 ObtainCurrentStack(thr, pc, &trace);
60 rep.AddStack(trace, true);
61 rep.AddLocation(addr, 1);
62 OutputReport(thr, rep);
63 }
64
MutexCreate(ThreadState * thr,uptr pc,uptr addr,bool rw,bool recursive,bool linker_init)65 void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
66 bool rw, bool recursive, bool linker_init) {
67 DPrintf("#%d: MutexCreate %zx\n", thr->tid, addr);
68 StatInc(thr, StatMutexCreate);
69 if (!linker_init && IsAppMem(addr)) {
70 CHECK(!thr->is_freeing);
71 thr->is_freeing = true;
72 MemoryWrite(thr, pc, addr, kSizeLog1);
73 thr->is_freeing = false;
74 }
75 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
76 s->is_rw = rw;
77 s->is_recursive = recursive;
78 s->is_linker_init = linker_init;
79 if (kCppMode && s->creation_stack_id == 0)
80 s->creation_stack_id = CurrentStackId(thr, pc);
81 s->mtx.Unlock();
82 }
83
MutexDestroy(ThreadState * thr,uptr pc,uptr addr)84 void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
85 DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
86 StatInc(thr, StatMutexDestroy);
87 SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
88 if (s == 0)
89 return;
90 if (s->is_linker_init) {
91 // Destroy is no-op for linker-initialized mutexes.
92 s->mtx.Unlock();
93 return;
94 }
95 if (common_flags()->detect_deadlocks) {
96 Callback cb(thr, pc);
97 ctx->dd->MutexDestroy(&cb, &s->dd);
98 ctx->dd->MutexInit(&cb, &s->dd);
99 }
100 bool unlock_locked = false;
101 if (flags()->report_destroy_locked
102 && s->owner_tid != SyncVar::kInvalidTid
103 && !s->is_broken) {
104 s->is_broken = true;
105 unlock_locked = true;
106 }
107 u64 mid = s->GetId();
108 u32 last_lock = s->last_lock;
109 if (!unlock_locked)
110 s->Reset(thr->proc()); // must not reset it before the report is printed
111 s->mtx.Unlock();
112 if (unlock_locked) {
113 ThreadRegistryLock l(ctx->thread_registry);
114 ScopedReport rep(ReportTypeMutexDestroyLocked);
115 rep.AddMutex(mid);
116 VarSizeStackTrace trace;
117 ObtainCurrentStack(thr, pc, &trace);
118 rep.AddStack(trace);
119 FastState last(last_lock);
120 RestoreStack(last.tid(), last.epoch(), &trace, 0);
121 rep.AddStack(trace, true);
122 rep.AddLocation(addr, 1);
123 OutputReport(thr, rep);
124
125 SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
126 if (s != 0) {
127 s->Reset(thr->proc());
128 s->mtx.Unlock();
129 }
130 }
131 thr->mset.Remove(mid);
132 // Imitate a memory write to catch unlock-destroy races.
133 // Do this outside of sync mutex, because it can report a race which locks
134 // sync mutexes.
135 if (IsAppMem(addr)) {
136 CHECK(!thr->is_freeing);
137 thr->is_freeing = true;
138 MemoryWrite(thr, pc, addr, kSizeLog1);
139 thr->is_freeing = false;
140 }
141 // s will be destroyed and freed in MetaMap::FreeBlock.
142 }
143
MutexLock(ThreadState * thr,uptr pc,uptr addr,int rec,bool try_lock)144 void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec, bool try_lock) {
145 DPrintf("#%d: MutexLock %zx rec=%d\n", thr->tid, addr, rec);
146 CHECK_GT(rec, 0);
147 if (IsAppMem(addr))
148 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
149 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
150 thr->fast_state.IncrementEpoch();
151 TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
152 bool report_double_lock = false;
153 if (s->owner_tid == SyncVar::kInvalidTid) {
154 CHECK_EQ(s->recursion, 0);
155 s->owner_tid = thr->tid;
156 s->last_lock = thr->fast_state.raw();
157 } else if (s->owner_tid == thr->tid) {
158 CHECK_GT(s->recursion, 0);
159 } else if (flags()->report_mutex_bugs && !s->is_broken) {
160 s->is_broken = true;
161 report_double_lock = true;
162 }
163 if (s->recursion == 0) {
164 StatInc(thr, StatMutexLock);
165 AcquireImpl(thr, pc, &s->clock);
166 AcquireImpl(thr, pc, &s->read_clock);
167 } else if (!s->is_recursive) {
168 StatInc(thr, StatMutexRecLock);
169 }
170 s->recursion += rec;
171 thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
172 if (common_flags()->detect_deadlocks && (s->recursion - rec) == 0) {
173 Callback cb(thr, pc);
174 if (!try_lock)
175 ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
176 ctx->dd->MutexAfterLock(&cb, &s->dd, true, try_lock);
177 }
178 u64 mid = s->GetId();
179 s->mtx.Unlock();
180 // Can't touch s after this point.
181 if (report_double_lock)
182 ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid);
183 if (common_flags()->detect_deadlocks) {
184 Callback cb(thr, pc);
185 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
186 }
187 }
188
MutexUnlock(ThreadState * thr,uptr pc,uptr addr,bool all)189 int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) {
190 DPrintf("#%d: MutexUnlock %zx all=%d\n", thr->tid, addr, all);
191 if (IsAppMem(addr))
192 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
193 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
194 thr->fast_state.IncrementEpoch();
195 TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
196 int rec = 0;
197 bool report_bad_unlock = false;
198 if (kCppMode && (s->recursion == 0 || s->owner_tid != thr->tid)) {
199 if (flags()->report_mutex_bugs && !s->is_broken) {
200 s->is_broken = true;
201 report_bad_unlock = true;
202 }
203 } else {
204 rec = all ? s->recursion : 1;
205 s->recursion -= rec;
206 if (s->recursion == 0) {
207 StatInc(thr, StatMutexUnlock);
208 s->owner_tid = SyncVar::kInvalidTid;
209 ReleaseStoreImpl(thr, pc, &s->clock);
210 } else {
211 StatInc(thr, StatMutexRecUnlock);
212 }
213 }
214 thr->mset.Del(s->GetId(), true);
215 if (common_flags()->detect_deadlocks && s->recursion == 0 &&
216 !report_bad_unlock) {
217 Callback cb(thr, pc);
218 ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true);
219 }
220 u64 mid = s->GetId();
221 s->mtx.Unlock();
222 // Can't touch s after this point.
223 if (report_bad_unlock)
224 ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
225 if (common_flags()->detect_deadlocks && !report_bad_unlock) {
226 Callback cb(thr, pc);
227 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
228 }
229 return rec;
230 }
231
MutexReadLock(ThreadState * thr,uptr pc,uptr addr,bool trylock)232 void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool trylock) {
233 DPrintf("#%d: MutexReadLock %zx\n", thr->tid, addr);
234 StatInc(thr, StatMutexReadLock);
235 if (IsAppMem(addr))
236 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
237 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
238 thr->fast_state.IncrementEpoch();
239 TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
240 bool report_bad_lock = false;
241 if (s->owner_tid != SyncVar::kInvalidTid) {
242 if (flags()->report_mutex_bugs && !s->is_broken) {
243 s->is_broken = true;
244 report_bad_lock = true;
245 }
246 }
247 AcquireImpl(thr, pc, &s->clock);
248 s->last_lock = thr->fast_state.raw();
249 thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
250 if (common_flags()->detect_deadlocks && s->recursion == 0) {
251 Callback cb(thr, pc);
252 if (!trylock)
253 ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
254 ctx->dd->MutexAfterLock(&cb, &s->dd, false, trylock);
255 }
256 u64 mid = s->GetId();
257 s->mtx.ReadUnlock();
258 // Can't touch s after this point.
259 if (report_bad_lock)
260 ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid);
261 if (common_flags()->detect_deadlocks) {
262 Callback cb(thr, pc);
263 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
264 }
265 }
266
MutexReadUnlock(ThreadState * thr,uptr pc,uptr addr)267 void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
268 DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
269 StatInc(thr, StatMutexReadUnlock);
270 if (IsAppMem(addr))
271 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
272 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
273 thr->fast_state.IncrementEpoch();
274 TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
275 bool report_bad_unlock = false;
276 if (s->owner_tid != SyncVar::kInvalidTid) {
277 if (flags()->report_mutex_bugs && !s->is_broken) {
278 s->is_broken = true;
279 report_bad_unlock = true;
280 }
281 }
282 ReleaseImpl(thr, pc, &s->read_clock);
283 if (common_flags()->detect_deadlocks && s->recursion == 0) {
284 Callback cb(thr, pc);
285 ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false);
286 }
287 u64 mid = s->GetId();
288 s->mtx.Unlock();
289 // Can't touch s after this point.
290 thr->mset.Del(mid, false);
291 if (report_bad_unlock)
292 ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr, mid);
293 if (common_flags()->detect_deadlocks) {
294 Callback cb(thr, pc);
295 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
296 }
297 }
298
MutexReadOrWriteUnlock(ThreadState * thr,uptr pc,uptr addr)299 void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
300 DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
301 if (IsAppMem(addr))
302 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
303 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
304 bool write = true;
305 bool report_bad_unlock = false;
306 if (s->owner_tid == SyncVar::kInvalidTid) {
307 // Seems to be read unlock.
308 write = false;
309 StatInc(thr, StatMutexReadUnlock);
310 thr->fast_state.IncrementEpoch();
311 TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
312 ReleaseImpl(thr, pc, &s->read_clock);
313 } else if (s->owner_tid == thr->tid) {
314 // Seems to be write unlock.
315 thr->fast_state.IncrementEpoch();
316 TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
317 CHECK_GT(s->recursion, 0);
318 s->recursion--;
319 if (s->recursion == 0) {
320 StatInc(thr, StatMutexUnlock);
321 s->owner_tid = SyncVar::kInvalidTid;
322 ReleaseImpl(thr, pc, &s->clock);
323 } else {
324 StatInc(thr, StatMutexRecUnlock);
325 }
326 } else if (!s->is_broken) {
327 s->is_broken = true;
328 report_bad_unlock = true;
329 }
330 thr->mset.Del(s->GetId(), write);
331 if (common_flags()->detect_deadlocks && s->recursion == 0) {
332 Callback cb(thr, pc);
333 ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write);
334 }
335 u64 mid = s->GetId();
336 s->mtx.Unlock();
337 // Can't touch s after this point.
338 if (report_bad_unlock)
339 ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
340 if (common_flags()->detect_deadlocks) {
341 Callback cb(thr, pc);
342 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
343 }
344 }
345
MutexRepair(ThreadState * thr,uptr pc,uptr addr)346 void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
347 DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
348 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
349 s->owner_tid = SyncVar::kInvalidTid;
350 s->recursion = 0;
351 s->mtx.Unlock();
352 }
353
MutexInvalidAccess(ThreadState * thr,uptr pc,uptr addr)354 void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr) {
355 DPrintf("#%d: MutexInvalidAccess %zx\n", thr->tid, addr);
356 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
357 u64 mid = s->GetId();
358 s->mtx.Unlock();
359 ReportMutexMisuse(thr, pc, ReportTypeMutexInvalidAccess, addr, mid);
360 }
361
Acquire(ThreadState * thr,uptr pc,uptr addr)362 void Acquire(ThreadState *thr, uptr pc, uptr addr) {
363 DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
364 if (thr->ignore_sync)
365 return;
366 SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, false);
367 if (!s)
368 return;
369 AcquireImpl(thr, pc, &s->clock);
370 s->mtx.ReadUnlock();
371 }
372
UpdateClockCallback(ThreadContextBase * tctx_base,void * arg)373 static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) {
374 ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
375 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
376 if (tctx->status == ThreadStatusRunning)
377 thr->clock.set(tctx->tid, tctx->thr->fast_state.epoch());
378 else
379 thr->clock.set(tctx->tid, tctx->epoch1);
380 }
381
AcquireGlobal(ThreadState * thr,uptr pc)382 void AcquireGlobal(ThreadState *thr, uptr pc) {
383 DPrintf("#%d: AcquireGlobal\n", thr->tid);
384 if (thr->ignore_sync)
385 return;
386 ThreadRegistryLock l(ctx->thread_registry);
387 ctx->thread_registry->RunCallbackForEachThreadLocked(
388 UpdateClockCallback, thr);
389 }
390
Release(ThreadState * thr,uptr pc,uptr addr)391 void Release(ThreadState *thr, uptr pc, uptr addr) {
392 DPrintf("#%d: Release %zx\n", thr->tid, addr);
393 if (thr->ignore_sync)
394 return;
395 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
396 thr->fast_state.IncrementEpoch();
397 // Can't increment epoch w/o writing to the trace as well.
398 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
399 ReleaseImpl(thr, pc, &s->clock);
400 s->mtx.Unlock();
401 }
402
ReleaseStore(ThreadState * thr,uptr pc,uptr addr)403 void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
404 DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
405 if (thr->ignore_sync)
406 return;
407 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
408 thr->fast_state.IncrementEpoch();
409 // Can't increment epoch w/o writing to the trace as well.
410 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
411 ReleaseStoreImpl(thr, pc, &s->clock);
412 s->mtx.Unlock();
413 }
414
415 #ifndef SANITIZER_GO
UpdateSleepClockCallback(ThreadContextBase * tctx_base,void * arg)416 static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
417 ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
418 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
419 if (tctx->status == ThreadStatusRunning)
420 thr->last_sleep_clock.set(tctx->tid, tctx->thr->fast_state.epoch());
421 else
422 thr->last_sleep_clock.set(tctx->tid, tctx->epoch1);
423 }
424
AfterSleep(ThreadState * thr,uptr pc)425 void AfterSleep(ThreadState *thr, uptr pc) {
426 DPrintf("#%d: AfterSleep %zx\n", thr->tid);
427 if (thr->ignore_sync)
428 return;
429 thr->last_sleep_stack_id = CurrentStackId(thr, pc);
430 ThreadRegistryLock l(ctx->thread_registry);
431 ctx->thread_registry->RunCallbackForEachThreadLocked(
432 UpdateSleepClockCallback, thr);
433 }
434 #endif
435
AcquireImpl(ThreadState * thr,uptr pc,SyncClock * c)436 void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
437 if (thr->ignore_sync)
438 return;
439 thr->clock.set(thr->fast_state.epoch());
440 thr->clock.acquire(&thr->proc()->clock_cache, c);
441 StatInc(thr, StatSyncAcquire);
442 }
443
ReleaseImpl(ThreadState * thr,uptr pc,SyncClock * c)444 void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
445 if (thr->ignore_sync)
446 return;
447 thr->clock.set(thr->fast_state.epoch());
448 thr->fast_synch_epoch = thr->fast_state.epoch();
449 thr->clock.release(&thr->proc()->clock_cache, c);
450 StatInc(thr, StatSyncRelease);
451 }
452
ReleaseStoreImpl(ThreadState * thr,uptr pc,SyncClock * c)453 void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) {
454 if (thr->ignore_sync)
455 return;
456 thr->clock.set(thr->fast_state.epoch());
457 thr->fast_synch_epoch = thr->fast_state.epoch();
458 thr->clock.ReleaseStore(&thr->proc()->clock_cache, c);
459 StatInc(thr, StatSyncRelease);
460 }
461
AcquireReleaseImpl(ThreadState * thr,uptr pc,SyncClock * c)462 void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
463 if (thr->ignore_sync)
464 return;
465 thr->clock.set(thr->fast_state.epoch());
466 thr->fast_synch_epoch = thr->fast_state.epoch();
467 thr->clock.acq_rel(&thr->proc()->clock_cache, c);
468 StatInc(thr, StatSyncAcquire);
469 StatInc(thr, StatSyncRelease);
470 }
471
ReportDeadlock(ThreadState * thr,uptr pc,DDReport * r)472 void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
473 if (r == 0)
474 return;
475 ThreadRegistryLock l(ctx->thread_registry);
476 ScopedReport rep(ReportTypeDeadlock);
477 for (int i = 0; i < r->n; i++) {
478 rep.AddMutex(r->loop[i].mtx_ctx0);
479 rep.AddUniqueTid((int)r->loop[i].thr_ctx);
480 rep.AddThread((int)r->loop[i].thr_ctx);
481 }
482 uptr dummy_pc = 0x42;
483 for (int i = 0; i < r->n; i++) {
484 for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
485 u32 stk = r->loop[i].stk[j];
486 if (stk && stk != 0xffffffff) {
487 rep.AddStack(StackDepotGet(stk), true);
488 } else {
489 // Sometimes we fail to extract the stack trace (FIXME: investigate),
490 // but we should still produce some stack trace in the report.
491 rep.AddStack(StackTrace(&dummy_pc, 1), true);
492 }
493 }
494 }
495 OutputReport(thr, rep);
496 }
497
498 } // namespace __tsan
499