• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- tsan_rtl_mutex.cc -------------------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "tsan_rtl.h"
15 #include "tsan_flags.h"
16 #include "tsan_sync.h"
17 #include "tsan_report.h"
18 #include "tsan_symbolize.h"
19 #include "tsan_platform.h"
20 
21 namespace __tsan {
22 
MutexCreate(ThreadState * thr,uptr pc,uptr addr,bool rw,bool recursive,bool linker_init)23 void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
24                  bool rw, bool recursive, bool linker_init) {
25   Context *ctx = CTX();
26   CHECK_GT(thr->in_rtl, 0);
27   DPrintf("#%d: MutexCreate %zx\n", thr->tid, addr);
28   StatInc(thr, StatMutexCreate);
29   if (!linker_init && IsAppMem(addr)) {
30     CHECK(!thr->is_freeing);
31     thr->is_freeing = true;
32     MemoryWrite(thr, pc, addr, kSizeLog1);
33     thr->is_freeing = false;
34   }
35   SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
36   s->is_rw = rw;
37   s->is_recursive = recursive;
38   s->is_linker_init = linker_init;
39   s->mtx.Unlock();
40 }
41 
MutexDestroy(ThreadState * thr,uptr pc,uptr addr)42 void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
43   Context *ctx = CTX();
44   CHECK_GT(thr->in_rtl, 0);
45   DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
46   StatInc(thr, StatMutexDestroy);
47 #ifndef TSAN_GO
48   // Global mutexes not marked as LINKER_INITIALIZED
49   // cause tons of not interesting reports, so just ignore it.
50   if (IsGlobalVar(addr))
51     return;
52 #endif
53   SyncVar *s = ctx->synctab.GetAndRemove(thr, pc, addr);
54   if (s == 0)
55     return;
56   if (IsAppMem(addr)) {
57     CHECK(!thr->is_freeing);
58     thr->is_freeing = true;
59     MemoryWrite(thr, pc, addr, kSizeLog1);
60     thr->is_freeing = false;
61   }
62   if (flags()->report_destroy_locked
63       && s->owner_tid != SyncVar::kInvalidTid
64       && !s->is_broken) {
65     s->is_broken = true;
66     ThreadRegistryLock l(ctx->thread_registry);
67     ScopedReport rep(ReportTypeMutexDestroyLocked);
68     rep.AddMutex(s);
69     StackTrace trace;
70     trace.ObtainCurrent(thr, pc);
71     rep.AddStack(&trace);
72     FastState last(s->last_lock);
73     RestoreStack(last.tid(), last.epoch(), &trace, 0);
74     rep.AddStack(&trace);
75     rep.AddLocation(s->addr, 1);
76     OutputReport(ctx, rep);
77   }
78   thr->mset.Remove(s->GetId());
79   DestroyAndFree(s);
80 }
81 
MutexLock(ThreadState * thr,uptr pc,uptr addr)82 void MutexLock(ThreadState *thr, uptr pc, uptr addr) {
83   CHECK_GT(thr->in_rtl, 0);
84   DPrintf("#%d: MutexLock %zx\n", thr->tid, addr);
85   if (IsAppMem(addr))
86     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
87   SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
88   thr->fast_state.IncrementEpoch();
89   TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
90   if (s->owner_tid == SyncVar::kInvalidTid) {
91     CHECK_EQ(s->recursion, 0);
92     s->owner_tid = thr->tid;
93     s->last_lock = thr->fast_state.raw();
94   } else if (s->owner_tid == thr->tid) {
95     CHECK_GT(s->recursion, 0);
96   } else {
97     Printf("ThreadSanitizer WARNING: double lock\n");
98     PrintCurrentStack(thr, pc);
99   }
100   if (s->recursion == 0) {
101     StatInc(thr, StatMutexLock);
102     thr->clock.set(thr->tid, thr->fast_state.epoch());
103     thr->clock.acquire(&s->clock);
104     StatInc(thr, StatSyncAcquire);
105     thr->clock.acquire(&s->read_clock);
106     StatInc(thr, StatSyncAcquire);
107   } else if (!s->is_recursive) {
108     StatInc(thr, StatMutexRecLock);
109   }
110   s->recursion++;
111   thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
112   s->mtx.Unlock();
113 }
114 
MutexUnlock(ThreadState * thr,uptr pc,uptr addr)115 void MutexUnlock(ThreadState *thr, uptr pc, uptr addr) {
116   CHECK_GT(thr->in_rtl, 0);
117   DPrintf("#%d: MutexUnlock %zx\n", thr->tid, addr);
118   if (IsAppMem(addr))
119     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
120   SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
121   thr->fast_state.IncrementEpoch();
122   TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
123   if (s->recursion == 0) {
124     if (!s->is_broken) {
125       s->is_broken = true;
126       Printf("ThreadSanitizer WARNING: unlock of unlocked mutex\n");
127       PrintCurrentStack(thr, pc);
128     }
129   } else if (s->owner_tid != thr->tid) {
130     if (!s->is_broken) {
131       s->is_broken = true;
132       Printf("ThreadSanitizer WARNING: mutex unlock by another thread\n");
133       PrintCurrentStack(thr, pc);
134     }
135   } else {
136     s->recursion--;
137     if (s->recursion == 0) {
138       StatInc(thr, StatMutexUnlock);
139       s->owner_tid = SyncVar::kInvalidTid;
140       thr->clock.set(thr->tid, thr->fast_state.epoch());
141       thr->fast_synch_epoch = thr->fast_state.epoch();
142       thr->clock.ReleaseStore(&s->clock);
143       StatInc(thr, StatSyncRelease);
144     } else {
145       StatInc(thr, StatMutexRecUnlock);
146     }
147   }
148   thr->mset.Del(s->GetId(), true);
149   s->mtx.Unlock();
150 }
151 
MutexReadLock(ThreadState * thr,uptr pc,uptr addr)152 void MutexReadLock(ThreadState *thr, uptr pc, uptr addr) {
153   CHECK_GT(thr->in_rtl, 0);
154   DPrintf("#%d: MutexReadLock %zx\n", thr->tid, addr);
155   StatInc(thr, StatMutexReadLock);
156   if (IsAppMem(addr))
157     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
158   SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, false);
159   thr->fast_state.IncrementEpoch();
160   TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
161   if (s->owner_tid != SyncVar::kInvalidTid) {
162     Printf("ThreadSanitizer WARNING: read lock of a write locked mutex\n");
163     PrintCurrentStack(thr, pc);
164   }
165   thr->clock.set(thr->tid, thr->fast_state.epoch());
166   thr->clock.acquire(&s->clock);
167   s->last_lock = thr->fast_state.raw();
168   StatInc(thr, StatSyncAcquire);
169   thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
170   s->mtx.ReadUnlock();
171 }
172 
MutexReadUnlock(ThreadState * thr,uptr pc,uptr addr)173 void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
174   CHECK_GT(thr->in_rtl, 0);
175   DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
176   StatInc(thr, StatMutexReadUnlock);
177   if (IsAppMem(addr))
178     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
179   SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
180   thr->fast_state.IncrementEpoch();
181   TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
182   if (s->owner_tid != SyncVar::kInvalidTid) {
183     Printf("ThreadSanitizer WARNING: read unlock of a write "
184                "locked mutex\n");
185     PrintCurrentStack(thr, pc);
186   }
187   thr->clock.set(thr->tid, thr->fast_state.epoch());
188   thr->fast_synch_epoch = thr->fast_state.epoch();
189   thr->clock.release(&s->read_clock);
190   StatInc(thr, StatSyncRelease);
191   s->mtx.Unlock();
192   thr->mset.Del(s->GetId(), false);
193 }
194 
MutexReadOrWriteUnlock(ThreadState * thr,uptr pc,uptr addr)195 void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
196   CHECK_GT(thr->in_rtl, 0);
197   DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
198   if (IsAppMem(addr))
199     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
200   SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
201   bool write = true;
202   if (s->owner_tid == SyncVar::kInvalidTid) {
203     // Seems to be read unlock.
204     write = false;
205     StatInc(thr, StatMutexReadUnlock);
206     thr->fast_state.IncrementEpoch();
207     TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
208     thr->clock.set(thr->tid, thr->fast_state.epoch());
209     thr->fast_synch_epoch = thr->fast_state.epoch();
210     thr->clock.release(&s->read_clock);
211     StatInc(thr, StatSyncRelease);
212   } else if (s->owner_tid == thr->tid) {
213     // Seems to be write unlock.
214     thr->fast_state.IncrementEpoch();
215     TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
216     CHECK_GT(s->recursion, 0);
217     s->recursion--;
218     if (s->recursion == 0) {
219       StatInc(thr, StatMutexUnlock);
220       s->owner_tid = SyncVar::kInvalidTid;
221       // FIXME: Refactor me, plz.
222       // The sequence of events is quite tricky and doubled in several places.
223       // First, it's a bug to increment the epoch w/o writing to the trace.
224       // Then, the acquire/release logic can be factored out as well.
225       thr->clock.set(thr->tid, thr->fast_state.epoch());
226       thr->fast_synch_epoch = thr->fast_state.epoch();
227       thr->clock.ReleaseStore(&s->clock);
228       StatInc(thr, StatSyncRelease);
229     } else {
230       StatInc(thr, StatMutexRecUnlock);
231     }
232   } else if (!s->is_broken) {
233     s->is_broken = true;
234     Printf("ThreadSanitizer WARNING: mutex unlock by another thread\n");
235     PrintCurrentStack(thr, pc);
236   }
237   thr->mset.Del(s->GetId(), write);
238   s->mtx.Unlock();
239 }
240 
Acquire(ThreadState * thr,uptr pc,uptr addr)241 void Acquire(ThreadState *thr, uptr pc, uptr addr) {
242   CHECK_GT(thr->in_rtl, 0);
243   DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
244   SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, false);
245   thr->clock.set(thr->tid, thr->fast_state.epoch());
246   thr->clock.acquire(&s->clock);
247   StatInc(thr, StatSyncAcquire);
248   s->mtx.ReadUnlock();
249 }
250 
UpdateClockCallback(ThreadContextBase * tctx_base,void * arg)251 static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) {
252   ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
253   ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
254   if (tctx->status == ThreadStatusRunning)
255     thr->clock.set(tctx->tid, tctx->thr->fast_state.epoch());
256   else
257     thr->clock.set(tctx->tid, tctx->epoch1);
258 }
259 
AcquireGlobal(ThreadState * thr,uptr pc)260 void AcquireGlobal(ThreadState *thr, uptr pc) {
261   ThreadRegistryLock l(CTX()->thread_registry);
262   CTX()->thread_registry->RunCallbackForEachThreadLocked(
263       UpdateClockCallback, thr);
264 }
265 
Release(ThreadState * thr,uptr pc,uptr addr)266 void Release(ThreadState *thr, uptr pc, uptr addr) {
267   CHECK_GT(thr->in_rtl, 0);
268   DPrintf("#%d: Release %zx\n", thr->tid, addr);
269   SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
270   thr->clock.set(thr->tid, thr->fast_state.epoch());
271   thr->clock.release(&s->clock);
272   StatInc(thr, StatSyncRelease);
273   s->mtx.Unlock();
274 }
275 
ReleaseStore(ThreadState * thr,uptr pc,uptr addr)276 void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
277   CHECK_GT(thr->in_rtl, 0);
278   DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
279   SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
280   thr->clock.set(thr->tid, thr->fast_state.epoch());
281   thr->clock.ReleaseStore(&s->clock);
282   StatInc(thr, StatSyncRelease);
283   s->mtx.Unlock();
284 }
285 
286 #ifndef TSAN_GO
UpdateSleepClockCallback(ThreadContextBase * tctx_base,void * arg)287 static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
288   ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
289   ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
290   if (tctx->status == ThreadStatusRunning)
291     thr->last_sleep_clock.set(tctx->tid, tctx->thr->fast_state.epoch());
292   else
293     thr->last_sleep_clock.set(tctx->tid, tctx->epoch1);
294 }
295 
AfterSleep(ThreadState * thr,uptr pc)296 void AfterSleep(ThreadState *thr, uptr pc) {
297   thr->last_sleep_stack_id = CurrentStackId(thr, pc);
298   ThreadRegistryLock l(CTX()->thread_registry);
299   CTX()->thread_registry->RunCallbackForEachThreadLocked(
300       UpdateSleepClockCallback, thr);
301 }
302 #endif
303 
304 }  // namespace __tsan
305