1 //===-- tsan_rtl_mutex.cpp ------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
14 #include <sanitizer_common/sanitizer_stackdepot.h>
15
16 #include "tsan_rtl.h"
17 #include "tsan_flags.h"
18 #include "tsan_sync.h"
19 #include "tsan_report.h"
20 #include "tsan_symbolize.h"
21 #include "tsan_platform.h"
22
23 namespace __tsan {
24
25 void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r);
26
27 struct Callback final : public DDCallback {
28 ThreadState *thr;
29 uptr pc;
30
Callback__tsan::Callback31 Callback(ThreadState *thr, uptr pc)
32 : thr(thr)
33 , pc(pc) {
34 DDCallback::pt = thr->proc()->dd_pt;
35 DDCallback::lt = thr->dd_lt;
36 }
37
Unwind__tsan::Callback38 u32 Unwind() override { return CurrentStackId(thr, pc); }
UniqueTid__tsan::Callback39 int UniqueTid() override { return thr->unique_id; }
40 };
41
DDMutexInit(ThreadState * thr,uptr pc,SyncVar * s)42 void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) {
43 Callback cb(thr, pc);
44 ctx->dd->MutexInit(&cb, &s->dd);
45 s->dd.ctx = s->GetId();
46 }
47
ReportMutexMisuse(ThreadState * thr,uptr pc,ReportType typ,uptr addr,u64 mid)48 static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
49 uptr addr, u64 mid) {
50 // In Go, these misuses are either impossible, or detected by std lib,
51 // or false positives (e.g. unlock in a different thread).
52 if (SANITIZER_GO)
53 return;
54 ThreadRegistryLock l(ctx->thread_registry);
55 ScopedReport rep(typ);
56 rep.AddMutex(mid);
57 VarSizeStackTrace trace;
58 ObtainCurrentStack(thr, pc, &trace);
59 rep.AddStack(trace, true);
60 rep.AddLocation(addr, 1);
61 OutputReport(thr, rep);
62 }
63
MutexCreate(ThreadState * thr,uptr pc,uptr addr,u32 flagz)64 void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
65 DPrintf("#%d: MutexCreate %zx flagz=0x%x\n", thr->tid, addr, flagz);
66 StatInc(thr, StatMutexCreate);
67 if (!(flagz & MutexFlagLinkerInit) && IsAppMem(addr)) {
68 CHECK(!thr->is_freeing);
69 thr->is_freeing = true;
70 MemoryWrite(thr, pc, addr, kSizeLog1);
71 thr->is_freeing = false;
72 }
73 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
74 s->SetFlags(flagz & MutexCreationFlagMask);
75 if (!SANITIZER_GO && s->creation_stack_id == 0)
76 s->creation_stack_id = CurrentStackId(thr, pc);
77 s->mtx.Unlock();
78 }
79
MutexDestroy(ThreadState * thr,uptr pc,uptr addr,u32 flagz)80 void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
81 DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
82 StatInc(thr, StatMutexDestroy);
83 SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
84 if (s == 0)
85 return;
86 if ((flagz & MutexFlagLinkerInit)
87 || s->IsFlagSet(MutexFlagLinkerInit)
88 || ((flagz & MutexFlagNotStatic) && !s->IsFlagSet(MutexFlagNotStatic))) {
89 // Destroy is no-op for linker-initialized mutexes.
90 s->mtx.Unlock();
91 return;
92 }
93 if (common_flags()->detect_deadlocks) {
94 Callback cb(thr, pc);
95 ctx->dd->MutexDestroy(&cb, &s->dd);
96 ctx->dd->MutexInit(&cb, &s->dd);
97 }
98 bool unlock_locked = false;
99 if (flags()->report_destroy_locked
100 && s->owner_tid != SyncVar::kInvalidTid
101 && !s->IsFlagSet(MutexFlagBroken)) {
102 s->SetFlags(MutexFlagBroken);
103 unlock_locked = true;
104 }
105 u64 mid = s->GetId();
106 u64 last_lock = s->last_lock;
107 if (!unlock_locked)
108 s->Reset(thr->proc()); // must not reset it before the report is printed
109 s->mtx.Unlock();
110 if (unlock_locked) {
111 ThreadRegistryLock l(ctx->thread_registry);
112 ScopedReport rep(ReportTypeMutexDestroyLocked);
113 rep.AddMutex(mid);
114 VarSizeStackTrace trace;
115 ObtainCurrentStack(thr, pc, &trace);
116 rep.AddStack(trace, true);
117 FastState last(last_lock);
118 RestoreStack(last.tid(), last.epoch(), &trace, 0);
119 rep.AddStack(trace, true);
120 rep.AddLocation(addr, 1);
121 OutputReport(thr, rep);
122
123 SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
124 if (s != 0) {
125 s->Reset(thr->proc());
126 s->mtx.Unlock();
127 }
128 }
129 thr->mset.Remove(mid);
130 // Imitate a memory write to catch unlock-destroy races.
131 // Do this outside of sync mutex, because it can report a race which locks
132 // sync mutexes.
133 if (IsAppMem(addr)) {
134 CHECK(!thr->is_freeing);
135 thr->is_freeing = true;
136 MemoryWrite(thr, pc, addr, kSizeLog1);
137 thr->is_freeing = false;
138 }
139 // s will be destroyed and freed in MetaMap::FreeBlock.
140 }
141
MutexPreLock(ThreadState * thr,uptr pc,uptr addr,u32 flagz)142 void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
143 DPrintf("#%d: MutexPreLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
144 if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
145 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
146 s->UpdateFlags(flagz);
147 if (s->owner_tid != thr->tid) {
148 Callback cb(thr, pc);
149 ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
150 s->mtx.ReadUnlock();
151 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
152 } else {
153 s->mtx.ReadUnlock();
154 }
155 }
156 }
157
MutexPostLock(ThreadState * thr,uptr pc,uptr addr,u32 flagz,int rec)158 void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) {
159 DPrintf("#%d: MutexPostLock %zx flag=0x%x rec=%d\n",
160 thr->tid, addr, flagz, rec);
161 if (flagz & MutexFlagRecursiveLock)
162 CHECK_GT(rec, 0);
163 else
164 rec = 1;
165 if (IsAppMem(addr))
166 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
167 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
168 s->UpdateFlags(flagz);
169 thr->fast_state.IncrementEpoch();
170 TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
171 bool report_double_lock = false;
172 if (s->owner_tid == SyncVar::kInvalidTid) {
173 CHECK_EQ(s->recursion, 0);
174 s->owner_tid = thr->tid;
175 s->last_lock = thr->fast_state.raw();
176 } else if (s->owner_tid == thr->tid) {
177 CHECK_GT(s->recursion, 0);
178 } else if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
179 s->SetFlags(MutexFlagBroken);
180 report_double_lock = true;
181 }
182 const bool first = s->recursion == 0;
183 s->recursion += rec;
184 if (first) {
185 StatInc(thr, StatMutexLock);
186 AcquireImpl(thr, pc, &s->clock);
187 AcquireImpl(thr, pc, &s->read_clock);
188 } else if (!s->IsFlagSet(MutexFlagWriteReentrant)) {
189 StatInc(thr, StatMutexRecLock);
190 }
191 thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
192 bool pre_lock = false;
193 if (first && common_flags()->detect_deadlocks) {
194 pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
195 !(flagz & MutexFlagTryLock);
196 Callback cb(thr, pc);
197 if (pre_lock)
198 ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
199 ctx->dd->MutexAfterLock(&cb, &s->dd, true, flagz & MutexFlagTryLock);
200 }
201 u64 mid = s->GetId();
202 s->mtx.Unlock();
203 // Can't touch s after this point.
204 s = 0;
205 if (report_double_lock)
206 ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid);
207 if (first && pre_lock && common_flags()->detect_deadlocks) {
208 Callback cb(thr, pc);
209 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
210 }
211 }
212
MutexUnlock(ThreadState * thr,uptr pc,uptr addr,u32 flagz)213 int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
214 DPrintf("#%d: MutexUnlock %zx flagz=0x%x\n", thr->tid, addr, flagz);
215 if (IsAppMem(addr))
216 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
217 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
218 thr->fast_state.IncrementEpoch();
219 TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
220 int rec = 0;
221 bool report_bad_unlock = false;
222 if (!SANITIZER_GO && (s->recursion == 0 || s->owner_tid != thr->tid)) {
223 if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
224 s->SetFlags(MutexFlagBroken);
225 report_bad_unlock = true;
226 }
227 } else {
228 rec = (flagz & MutexFlagRecursiveUnlock) ? s->recursion : 1;
229 s->recursion -= rec;
230 if (s->recursion == 0) {
231 StatInc(thr, StatMutexUnlock);
232 s->owner_tid = SyncVar::kInvalidTid;
233 ReleaseStoreImpl(thr, pc, &s->clock);
234 } else {
235 StatInc(thr, StatMutexRecUnlock);
236 }
237 }
238 thr->mset.Del(s->GetId(), true);
239 if (common_flags()->detect_deadlocks && s->recursion == 0 &&
240 !report_bad_unlock) {
241 Callback cb(thr, pc);
242 ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true);
243 }
244 u64 mid = s->GetId();
245 s->mtx.Unlock();
246 // Can't touch s after this point.
247 if (report_bad_unlock)
248 ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
249 if (common_flags()->detect_deadlocks && !report_bad_unlock) {
250 Callback cb(thr, pc);
251 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
252 }
253 return rec;
254 }
255
MutexPreReadLock(ThreadState * thr,uptr pc,uptr addr,u32 flagz)256 void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
257 DPrintf("#%d: MutexPreReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
258 if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
259 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
260 s->UpdateFlags(flagz);
261 Callback cb(thr, pc);
262 ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
263 s->mtx.ReadUnlock();
264 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
265 }
266 }
267
MutexPostReadLock(ThreadState * thr,uptr pc,uptr addr,u32 flagz)268 void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
269 DPrintf("#%d: MutexPostReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
270 StatInc(thr, StatMutexReadLock);
271 if (IsAppMem(addr))
272 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
273 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
274 s->UpdateFlags(flagz);
275 thr->fast_state.IncrementEpoch();
276 TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
277 bool report_bad_lock = false;
278 if (s->owner_tid != SyncVar::kInvalidTid) {
279 if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
280 s->SetFlags(MutexFlagBroken);
281 report_bad_lock = true;
282 }
283 }
284 AcquireImpl(thr, pc, &s->clock);
285 s->last_lock = thr->fast_state.raw();
286 thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
287 bool pre_lock = false;
288 if (common_flags()->detect_deadlocks) {
289 pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
290 !(flagz & MutexFlagTryLock);
291 Callback cb(thr, pc);
292 if (pre_lock)
293 ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
294 ctx->dd->MutexAfterLock(&cb, &s->dd, false, flagz & MutexFlagTryLock);
295 }
296 u64 mid = s->GetId();
297 s->mtx.ReadUnlock();
298 // Can't touch s after this point.
299 s = 0;
300 if (report_bad_lock)
301 ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid);
302 if (pre_lock && common_flags()->detect_deadlocks) {
303 Callback cb(thr, pc);
304 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
305 }
306 }
307
MutexReadUnlock(ThreadState * thr,uptr pc,uptr addr)308 void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
309 DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
310 StatInc(thr, StatMutexReadUnlock);
311 if (IsAppMem(addr))
312 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
313 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
314 thr->fast_state.IncrementEpoch();
315 TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
316 bool report_bad_unlock = false;
317 if (s->owner_tid != SyncVar::kInvalidTid) {
318 if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
319 s->SetFlags(MutexFlagBroken);
320 report_bad_unlock = true;
321 }
322 }
323 ReleaseImpl(thr, pc, &s->read_clock);
324 if (common_flags()->detect_deadlocks && s->recursion == 0) {
325 Callback cb(thr, pc);
326 ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false);
327 }
328 u64 mid = s->GetId();
329 s->mtx.Unlock();
330 // Can't touch s after this point.
331 thr->mset.Del(mid, false);
332 if (report_bad_unlock)
333 ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr, mid);
334 if (common_flags()->detect_deadlocks) {
335 Callback cb(thr, pc);
336 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
337 }
338 }
339
MutexReadOrWriteUnlock(ThreadState * thr,uptr pc,uptr addr)340 void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
341 DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
342 if (IsAppMem(addr))
343 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
344 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
345 bool write = true;
346 bool report_bad_unlock = false;
347 if (s->owner_tid == SyncVar::kInvalidTid) {
348 // Seems to be read unlock.
349 write = false;
350 StatInc(thr, StatMutexReadUnlock);
351 thr->fast_state.IncrementEpoch();
352 TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
353 ReleaseImpl(thr, pc, &s->read_clock);
354 } else if (s->owner_tid == thr->tid) {
355 // Seems to be write unlock.
356 thr->fast_state.IncrementEpoch();
357 TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
358 CHECK_GT(s->recursion, 0);
359 s->recursion--;
360 if (s->recursion == 0) {
361 StatInc(thr, StatMutexUnlock);
362 s->owner_tid = SyncVar::kInvalidTid;
363 ReleaseStoreImpl(thr, pc, &s->clock);
364 } else {
365 StatInc(thr, StatMutexRecUnlock);
366 }
367 } else if (!s->IsFlagSet(MutexFlagBroken)) {
368 s->SetFlags(MutexFlagBroken);
369 report_bad_unlock = true;
370 }
371 thr->mset.Del(s->GetId(), write);
372 if (common_flags()->detect_deadlocks && s->recursion == 0) {
373 Callback cb(thr, pc);
374 ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write);
375 }
376 u64 mid = s->GetId();
377 s->mtx.Unlock();
378 // Can't touch s after this point.
379 if (report_bad_unlock)
380 ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
381 if (common_flags()->detect_deadlocks) {
382 Callback cb(thr, pc);
383 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
384 }
385 }
386
MutexRepair(ThreadState * thr,uptr pc,uptr addr)387 void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
388 DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
389 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
390 s->owner_tid = SyncVar::kInvalidTid;
391 s->recursion = 0;
392 s->mtx.Unlock();
393 }
394
MutexInvalidAccess(ThreadState * thr,uptr pc,uptr addr)395 void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr) {
396 DPrintf("#%d: MutexInvalidAccess %zx\n", thr->tid, addr);
397 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
398 u64 mid = s->GetId();
399 s->mtx.Unlock();
400 ReportMutexMisuse(thr, pc, ReportTypeMutexInvalidAccess, addr, mid);
401 }
402
Acquire(ThreadState * thr,uptr pc,uptr addr)403 void Acquire(ThreadState *thr, uptr pc, uptr addr) {
404 DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
405 if (thr->ignore_sync)
406 return;
407 SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, false);
408 if (!s)
409 return;
410 AcquireImpl(thr, pc, &s->clock);
411 s->mtx.ReadUnlock();
412 }
413
UpdateClockCallback(ThreadContextBase * tctx_base,void * arg)414 static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) {
415 ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
416 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
417 u64 epoch = tctx->epoch1;
418 if (tctx->status == ThreadStatusRunning) {
419 epoch = tctx->thr->fast_state.epoch();
420 tctx->thr->clock.NoteGlobalAcquire(epoch);
421 }
422 thr->clock.set(&thr->proc()->clock_cache, tctx->tid, epoch);
423 }
424
AcquireGlobal(ThreadState * thr,uptr pc)425 void AcquireGlobal(ThreadState *thr, uptr pc) {
426 DPrintf("#%d: AcquireGlobal\n", thr->tid);
427 if (thr->ignore_sync)
428 return;
429 ThreadRegistryLock l(ctx->thread_registry);
430 ctx->thread_registry->RunCallbackForEachThreadLocked(
431 UpdateClockCallback, thr);
432 }
433
ReleaseStoreAcquire(ThreadState * thr,uptr pc,uptr addr)434 void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr) {
435 DPrintf("#%d: ReleaseStoreAcquire %zx\n", thr->tid, addr);
436 if (thr->ignore_sync)
437 return;
438 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
439 thr->fast_state.IncrementEpoch();
440 // Can't increment epoch w/o writing to the trace as well.
441 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
442 ReleaseStoreAcquireImpl(thr, pc, &s->clock);
443 s->mtx.Unlock();
444 }
445
Release(ThreadState * thr,uptr pc,uptr addr)446 void Release(ThreadState *thr, uptr pc, uptr addr) {
447 DPrintf("#%d: Release %zx\n", thr->tid, addr);
448 if (thr->ignore_sync)
449 return;
450 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
451 thr->fast_state.IncrementEpoch();
452 // Can't increment epoch w/o writing to the trace as well.
453 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
454 ReleaseImpl(thr, pc, &s->clock);
455 s->mtx.Unlock();
456 }
457
ReleaseStore(ThreadState * thr,uptr pc,uptr addr)458 void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
459 DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
460 if (thr->ignore_sync)
461 return;
462 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
463 thr->fast_state.IncrementEpoch();
464 // Can't increment epoch w/o writing to the trace as well.
465 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
466 ReleaseStoreImpl(thr, pc, &s->clock);
467 s->mtx.Unlock();
468 }
469
470 #if !SANITIZER_GO
UpdateSleepClockCallback(ThreadContextBase * tctx_base,void * arg)471 static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
472 ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
473 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
474 u64 epoch = tctx->epoch1;
475 if (tctx->status == ThreadStatusRunning)
476 epoch = tctx->thr->fast_state.epoch();
477 thr->last_sleep_clock.set(&thr->proc()->clock_cache, tctx->tid, epoch);
478 }
479
AfterSleep(ThreadState * thr,uptr pc)480 void AfterSleep(ThreadState *thr, uptr pc) {
481 DPrintf("#%d: AfterSleep %zx\n", thr->tid);
482 if (thr->ignore_sync)
483 return;
484 thr->last_sleep_stack_id = CurrentStackId(thr, pc);
485 ThreadRegistryLock l(ctx->thread_registry);
486 ctx->thread_registry->RunCallbackForEachThreadLocked(
487 UpdateSleepClockCallback, thr);
488 }
489 #endif
490
AcquireImpl(ThreadState * thr,uptr pc,SyncClock * c)491 void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
492 if (thr->ignore_sync)
493 return;
494 thr->clock.set(thr->fast_state.epoch());
495 thr->clock.acquire(&thr->proc()->clock_cache, c);
496 StatInc(thr, StatSyncAcquire);
497 }
498
ReleaseStoreAcquireImpl(ThreadState * thr,uptr pc,SyncClock * c)499 void ReleaseStoreAcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
500 if (thr->ignore_sync)
501 return;
502 thr->clock.set(thr->fast_state.epoch());
503 thr->fast_synch_epoch = thr->fast_state.epoch();
504 thr->clock.releaseStoreAcquire(&thr->proc()->clock_cache, c);
505 StatInc(thr, StatSyncReleaseStoreAcquire);
506 }
507
ReleaseImpl(ThreadState * thr,uptr pc,SyncClock * c)508 void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
509 if (thr->ignore_sync)
510 return;
511 thr->clock.set(thr->fast_state.epoch());
512 thr->fast_synch_epoch = thr->fast_state.epoch();
513 thr->clock.release(&thr->proc()->clock_cache, c);
514 StatInc(thr, StatSyncRelease);
515 }
516
ReleaseStoreImpl(ThreadState * thr,uptr pc,SyncClock * c)517 void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) {
518 if (thr->ignore_sync)
519 return;
520 thr->clock.set(thr->fast_state.epoch());
521 thr->fast_synch_epoch = thr->fast_state.epoch();
522 thr->clock.ReleaseStore(&thr->proc()->clock_cache, c);
523 StatInc(thr, StatSyncRelease);
524 }
525
AcquireReleaseImpl(ThreadState * thr,uptr pc,SyncClock * c)526 void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
527 if (thr->ignore_sync)
528 return;
529 thr->clock.set(thr->fast_state.epoch());
530 thr->fast_synch_epoch = thr->fast_state.epoch();
531 thr->clock.acq_rel(&thr->proc()->clock_cache, c);
532 StatInc(thr, StatSyncAcquire);
533 StatInc(thr, StatSyncRelease);
534 }
535
ReportDeadlock(ThreadState * thr,uptr pc,DDReport * r)536 void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
537 if (r == 0)
538 return;
539 ThreadRegistryLock l(ctx->thread_registry);
540 ScopedReport rep(ReportTypeDeadlock);
541 for (int i = 0; i < r->n; i++) {
542 rep.AddMutex(r->loop[i].mtx_ctx0);
543 rep.AddUniqueTid((int)r->loop[i].thr_ctx);
544 rep.AddThread((int)r->loop[i].thr_ctx);
545 }
546 uptr dummy_pc = 0x42;
547 for (int i = 0; i < r->n; i++) {
548 for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
549 u32 stk = r->loop[i].stk[j];
550 if (stk && stk != 0xffffffff) {
551 rep.AddStack(StackDepotGet(stk), true);
552 } else {
553 // Sometimes we fail to extract the stack trace (FIXME: investigate),
554 // but we should still produce some stack trace in the report.
555 rep.AddStack(StackTrace(&dummy_pc, 1), true);
556 }
557 }
558 }
559 OutputReport(thr, rep);
560 }
561
562 } // namespace __tsan
563