• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- tsan_rtl_mutex.cc -------------------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "tsan_rtl.h"
15 #include "tsan_flags.h"
16 #include "tsan_sync.h"
17 #include "tsan_report.h"
18 #include "tsan_symbolize.h"
19 #include "tsan_platform.h"
20 
21 namespace __tsan {
22 
MutexCreate(ThreadState * thr,uptr pc,uptr addr,bool rw,bool recursive,bool linker_init)23 void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
24                  bool rw, bool recursive, bool linker_init) {
25   Context *ctx = CTX();
26   CHECK_GT(thr->in_rtl, 0);
27   DPrintf("#%d: MutexCreate %zx\n", thr->tid, addr);
28   StatInc(thr, StatMutexCreate);
29   if (!linker_init && IsAppMem(addr))
30     MemoryWrite1Byte(thr, pc, addr);
31   SyncVar *s = ctx->synctab.GetAndLock(thr, pc, addr, true);
32   s->is_rw = rw;
33   s->is_recursive = recursive;
34   s->is_linker_init = linker_init;
35   s->mtx.Unlock();
36 }
37 
MutexDestroy(ThreadState * thr,uptr pc,uptr addr)38 void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
39   Context *ctx = CTX();
40   CHECK_GT(thr->in_rtl, 0);
41   DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
42   StatInc(thr, StatMutexDestroy);
43 #ifndef TSAN_GO
44   // Global mutexes not marked as LINKER_INITIALIZED
45   // cause tons of not interesting reports, so just ignore it.
46   if (IsGlobalVar(addr))
47     return;
48 #endif
49   SyncVar *s = ctx->synctab.GetAndRemove(thr, pc, addr);
50   if (s == 0)
51     return;
52   if (IsAppMem(addr))
53     MemoryWrite1Byte(thr, pc, addr);
54   if (flags()->report_destroy_locked
55       && s->owner_tid != SyncVar::kInvalidTid
56       && !s->is_broken) {
57     s->is_broken = true;
58     ScopedReport rep(ReportTypeMutexDestroyLocked);
59     rep.AddMutex(s);
60     StackTrace trace;
61     trace.ObtainCurrent(thr, pc);
62     rep.AddStack(&trace);
63     FastState last(s->last_lock);
64     RestoreStack(last.tid(), last.epoch(), &trace);
65     rep.AddStack(&trace);
66     rep.AddLocation(s->addr, 1);
67     OutputReport(rep);
68   }
69   DestroyAndFree(s);
70 }
71 
MutexLock(ThreadState * thr,uptr pc,uptr addr)72 void MutexLock(ThreadState *thr, uptr pc, uptr addr) {
73   CHECK_GT(thr->in_rtl, 0);
74   DPrintf("#%d: MutexLock %zx\n", thr->tid, addr);
75   if (IsAppMem(addr))
76     MemoryRead1Byte(thr, pc, addr);
77   thr->fast_state.IncrementEpoch();
78   TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeLock, addr);
79   SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true);
80   if (s->owner_tid == SyncVar::kInvalidTid) {
81     CHECK_EQ(s->recursion, 0);
82     s->owner_tid = thr->tid;
83     s->last_lock = thr->fast_state.raw();
84   } else if (s->owner_tid == thr->tid) {
85     CHECK_GT(s->recursion, 0);
86   } else {
87     TsanPrintf("ThreadSanitizer WARNING: double lock\n");
88     PrintCurrentStack(thr, pc);
89   }
90   if (s->recursion == 0) {
91     StatInc(thr, StatMutexLock);
92     thr->clock.set(thr->tid, thr->fast_state.epoch());
93     thr->clock.acquire(&s->clock);
94     StatInc(thr, StatSyncAcquire);
95     thr->clock.acquire(&s->read_clock);
96     StatInc(thr, StatSyncAcquire);
97   } else if (!s->is_recursive) {
98     StatInc(thr, StatMutexRecLock);
99   }
100   s->recursion++;
101   s->mtx.Unlock();
102 }
103 
MutexUnlock(ThreadState * thr,uptr pc,uptr addr)104 void MutexUnlock(ThreadState *thr, uptr pc, uptr addr) {
105   CHECK_GT(thr->in_rtl, 0);
106   DPrintf("#%d: MutexUnlock %zx\n", thr->tid, addr);
107   if (IsAppMem(addr))
108     MemoryRead1Byte(thr, pc, addr);
109   thr->fast_state.IncrementEpoch();
110   TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeUnlock, addr);
111   SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true);
112   if (s->recursion == 0) {
113     if (!s->is_broken) {
114       s->is_broken = true;
115       TsanPrintf("ThreadSanitizer WARNING: unlock of unlocked mutex\n");
116       PrintCurrentStack(thr, pc);
117     }
118   } else if (s->owner_tid != thr->tid) {
119     if (!s->is_broken) {
120       s->is_broken = true;
121       TsanPrintf("ThreadSanitizer WARNING: mutex unlock by another thread\n");
122       PrintCurrentStack(thr, pc);
123     }
124   } else {
125     s->recursion--;
126     if (s->recursion == 0) {
127       StatInc(thr, StatMutexUnlock);
128       s->owner_tid = SyncVar::kInvalidTid;
129       thr->clock.set(thr->tid, thr->fast_state.epoch());
130       thr->fast_synch_epoch = thr->fast_state.epoch();
131       thr->clock.ReleaseStore(&s->clock);
132       StatInc(thr, StatSyncRelease);
133     } else {
134       StatInc(thr, StatMutexRecUnlock);
135     }
136   }
137   s->mtx.Unlock();
138 }
139 
MutexReadLock(ThreadState * thr,uptr pc,uptr addr)140 void MutexReadLock(ThreadState *thr, uptr pc, uptr addr) {
141   CHECK_GT(thr->in_rtl, 0);
142   DPrintf("#%d: MutexReadLock %zx\n", thr->tid, addr);
143   StatInc(thr, StatMutexReadLock);
144   if (IsAppMem(addr))
145     MemoryRead1Byte(thr, pc, addr);
146   thr->fast_state.IncrementEpoch();
147   TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeRLock, addr);
148   SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, false);
149   if (s->owner_tid != SyncVar::kInvalidTid) {
150     TsanPrintf("ThreadSanitizer WARNING: read lock of a write locked mutex\n");
151     PrintCurrentStack(thr, pc);
152   }
153   thr->clock.set(thr->tid, thr->fast_state.epoch());
154   thr->clock.acquire(&s->clock);
155   s->last_lock = thr->fast_state.raw();
156   StatInc(thr, StatSyncAcquire);
157   s->mtx.ReadUnlock();
158 }
159 
MutexReadUnlock(ThreadState * thr,uptr pc,uptr addr)160 void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
161   CHECK_GT(thr->in_rtl, 0);
162   DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
163   StatInc(thr, StatMutexReadUnlock);
164   if (IsAppMem(addr))
165     MemoryRead1Byte(thr, pc, addr);
166   thr->fast_state.IncrementEpoch();
167   TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeRUnlock, addr);
168   SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true);
169   if (s->owner_tid != SyncVar::kInvalidTid) {
170     TsanPrintf("ThreadSanitizer WARNING: read unlock of a write "
171                "locked mutex\n");
172     PrintCurrentStack(thr, pc);
173   }
174   thr->clock.set(thr->tid, thr->fast_state.epoch());
175   thr->fast_synch_epoch = thr->fast_state.epoch();
176   thr->clock.release(&s->read_clock);
177   StatInc(thr, StatSyncRelease);
178   s->mtx.Unlock();
179 }
180 
MutexReadOrWriteUnlock(ThreadState * thr,uptr pc,uptr addr)181 void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
182   CHECK_GT(thr->in_rtl, 0);
183   DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
184   if (IsAppMem(addr))
185     MemoryRead1Byte(thr, pc, addr);
186   SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true);
187   if (s->owner_tid == SyncVar::kInvalidTid) {
188     // Seems to be read unlock.
189     StatInc(thr, StatMutexReadUnlock);
190     thr->fast_state.IncrementEpoch();
191     TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeRUnlock, addr);
192     thr->clock.set(thr->tid, thr->fast_state.epoch());
193     thr->fast_synch_epoch = thr->fast_state.epoch();
194     thr->clock.release(&s->read_clock);
195     StatInc(thr, StatSyncRelease);
196   } else if (s->owner_tid == thr->tid) {
197     // Seems to be write unlock.
198     CHECK_GT(s->recursion, 0);
199     s->recursion--;
200     if (s->recursion == 0) {
201       StatInc(thr, StatMutexUnlock);
202       s->owner_tid = SyncVar::kInvalidTid;
203       // FIXME: Refactor me, plz.
204       // The sequence of events is quite tricky and doubled in several places.
205       // First, it's a bug to increment the epoch w/o writing to the trace.
206       // Then, the acquire/release logic can be factored out as well.
207       thr->fast_state.IncrementEpoch();
208       TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeUnlock, addr);
209       thr->clock.set(thr->tid, thr->fast_state.epoch());
210       thr->fast_synch_epoch = thr->fast_state.epoch();
211       thr->clock.ReleaseStore(&s->clock);
212       StatInc(thr, StatSyncRelease);
213     } else {
214       StatInc(thr, StatMutexRecUnlock);
215     }
216   } else if (!s->is_broken) {
217     s->is_broken = true;
218     TsanPrintf("ThreadSanitizer WARNING: mutex unlock by another thread\n");
219     PrintCurrentStack(thr, pc);
220   }
221   s->mtx.Unlock();
222 }
223 
Acquire(ThreadState * thr,uptr pc,uptr addr)224 void Acquire(ThreadState *thr, uptr pc, uptr addr) {
225   CHECK_GT(thr->in_rtl, 0);
226   DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
227   SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, false);
228   thr->clock.set(thr->tid, thr->fast_state.epoch());
229   thr->clock.acquire(&s->clock);
230   StatInc(thr, StatSyncAcquire);
231   s->mtx.ReadUnlock();
232 }
233 
Release(ThreadState * thr,uptr pc,uptr addr)234 void Release(ThreadState *thr, uptr pc, uptr addr) {
235   CHECK_GT(thr->in_rtl, 0);
236   DPrintf("#%d: Release %zx\n", thr->tid, addr);
237   SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true);
238   thr->clock.set(thr->tid, thr->fast_state.epoch());
239   thr->clock.release(&s->clock);
240   StatInc(thr, StatSyncRelease);
241   s->mtx.Unlock();
242 }
243 
ReleaseStore(ThreadState * thr,uptr pc,uptr addr)244 void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
245   CHECK_GT(thr->in_rtl, 0);
246   DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
247   SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true);
248   thr->clock.set(thr->tid, thr->fast_state.epoch());
249   thr->clock.ReleaseStore(&s->clock);
250   StatInc(thr, StatSyncRelease);
251   s->mtx.Unlock();
252 }
253 
254 #ifndef TSAN_GO
AfterSleep(ThreadState * thr,uptr pc)255 void AfterSleep(ThreadState *thr, uptr pc) {
256   Context *ctx = CTX();
257   thr->last_sleep_stack_id = CurrentStackId(thr, pc);
258   Lock l(&ctx->thread_mtx);
259   for (unsigned i = 0; i < kMaxTid; i++) {
260     ThreadContext *tctx = ctx->threads[i];
261     if (tctx == 0)
262       continue;
263     if (tctx->status == ThreadStatusRunning)
264       thr->last_sleep_clock.set(i, tctx->thr->fast_state.epoch());
265     else
266       thr->last_sleep_clock.set(i, tctx->epoch1);
267   }
268 }
269 #endif
270 
271 }  // namespace __tsan
272