• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- tsan_mutex.cc -----------------------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //
12 //===----------------------------------------------------------------------===//
13 #include "sanitizer_common/sanitizer_libc.h"
14 #include "tsan_mutex.h"
15 #include "tsan_platform.h"
16 #include "tsan_rtl.h"
17 
18 namespace __tsan {
19 
20 // Simple reader-writer spin-mutex. Optimized for not-so-contended case.
21 // Readers have preference, can possibly starvate writers.
22 
23 // The table fixes what mutexes can be locked under what mutexes.
24 // E.g. if the row for MutexTypeThreads contains MutexTypeReport,
25 // then Report mutex can be locked while under Threads mutex.
26 // The leaf mutexes can be locked under any other mutexes.
27 // Recursive locking is not supported.
28 #if SANITIZER_DEBUG && !SANITIZER_GO
29 const MutexType MutexTypeLeaf = (MutexType)-1;
30 static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = {
31   /*0  MutexTypeInvalid*/     {},
32   /*1  MutexTypeTrace*/       {MutexTypeLeaf},
33   /*2  MutexTypeThreads*/     {MutexTypeReport},
34   /*3  MutexTypeReport*/      {MutexTypeSyncVar,
35                                MutexTypeMBlock, MutexTypeJavaMBlock},
36   /*4  MutexTypeSyncVar*/     {MutexTypeDDetector},
37   /*5  MutexTypeSyncTab*/     {},  // unused
38   /*6  MutexTypeSlab*/        {MutexTypeLeaf},
39   /*7  MutexTypeAnnotations*/ {},
40   /*8  MutexTypeAtExit*/      {MutexTypeSyncVar},
41   /*9  MutexTypeMBlock*/      {MutexTypeSyncVar},
42   /*10 MutexTypeJavaMBlock*/  {MutexTypeSyncVar},
43   /*11 MutexTypeDDetector*/   {},
44   /*12 MutexTypeFired*/       {MutexTypeLeaf},
45   /*13 MutexTypeRacy*/        {MutexTypeLeaf},
46   /*14 MutexTypeGlobalProc*/  {},
47 };
48 
49 static bool CanLockAdj[MutexTypeCount][MutexTypeCount];
50 #endif
51 
InitializeMutex()52 void InitializeMutex() {
53 #if SANITIZER_DEBUG && !SANITIZER_GO
54   // Build the "can lock" adjacency matrix.
55   // If [i][j]==true, then one can lock mutex j while under mutex i.
56   const int N = MutexTypeCount;
57   int cnt[N] = {};
58   bool leaf[N] = {};
59   for (int i = 1; i < N; i++) {
60     for (int j = 0; j < N; j++) {
61       MutexType z = CanLockTab[i][j];
62       if (z == MutexTypeInvalid)
63         continue;
64       if (z == MutexTypeLeaf) {
65         CHECK(!leaf[i]);
66         leaf[i] = true;
67         continue;
68       }
69       CHECK(!CanLockAdj[i][(int)z]);
70       CanLockAdj[i][(int)z] = true;
71       cnt[i]++;
72     }
73   }
74   for (int i = 0; i < N; i++) {
75     CHECK(!leaf[i] || cnt[i] == 0);
76   }
77   // Add leaf mutexes.
78   for (int i = 0; i < N; i++) {
79     if (!leaf[i])
80       continue;
81     for (int j = 0; j < N; j++) {
82       if (i == j || leaf[j] || j == MutexTypeInvalid)
83         continue;
84       CHECK(!CanLockAdj[j][i]);
85       CanLockAdj[j][i] = true;
86     }
87   }
88   // Build the transitive closure.
89   bool CanLockAdj2[MutexTypeCount][MutexTypeCount];
90   for (int i = 0; i < N; i++) {
91     for (int j = 0; j < N; j++) {
92       CanLockAdj2[i][j] = CanLockAdj[i][j];
93     }
94   }
95   for (int k = 0; k < N; k++) {
96     for (int i = 0; i < N; i++) {
97       for (int j = 0; j < N; j++) {
98         if (CanLockAdj2[i][k] && CanLockAdj2[k][j]) {
99           CanLockAdj2[i][j] = true;
100         }
101       }
102     }
103   }
104 #if 0
105   Printf("Can lock graph:\n");
106   for (int i = 0; i < N; i++) {
107     for (int j = 0; j < N; j++) {
108       Printf("%d ", CanLockAdj[i][j]);
109     }
110     Printf("\n");
111   }
112   Printf("Can lock graph closure:\n");
113   for (int i = 0; i < N; i++) {
114     for (int j = 0; j < N; j++) {
115       Printf("%d ", CanLockAdj2[i][j]);
116     }
117     Printf("\n");
118   }
119 #endif
120   // Verify that the graph is acyclic.
121   for (int i = 0; i < N; i++) {
122     if (CanLockAdj2[i][i]) {
123       Printf("Mutex %d participates in a cycle\n", i);
124       Die();
125     }
126   }
127 #endif
128 }
129 
InternalDeadlockDetector()130 InternalDeadlockDetector::InternalDeadlockDetector() {
131   // Rely on zero initialization because some mutexes can be locked before ctor.
132 }
133 
134 #if SANITIZER_DEBUG && !SANITIZER_GO
Lock(MutexType t)135 void InternalDeadlockDetector::Lock(MutexType t) {
136   // Printf("LOCK %d @%zu\n", t, seq_ + 1);
137   CHECK_GT(t, MutexTypeInvalid);
138   CHECK_LT(t, MutexTypeCount);
139   u64 max_seq = 0;
140   u64 max_idx = MutexTypeInvalid;
141   for (int i = 0; i != MutexTypeCount; i++) {
142     if (locked_[i] == 0)
143       continue;
144     CHECK_NE(locked_[i], max_seq);
145     if (max_seq < locked_[i]) {
146       max_seq = locked_[i];
147       max_idx = i;
148     }
149   }
150   locked_[t] = ++seq_;
151   if (max_idx == MutexTypeInvalid)
152     return;
153   // Printf("  last %d @%zu\n", max_idx, max_seq);
154   if (!CanLockAdj[max_idx][t]) {
155     Printf("ThreadSanitizer: internal deadlock detected\n");
156     Printf("ThreadSanitizer: can't lock %d while under %zu\n",
157                t, (uptr)max_idx);
158     CHECK(0);
159   }
160 }
161 
Unlock(MutexType t)162 void InternalDeadlockDetector::Unlock(MutexType t) {
163   // Printf("UNLO %d @%zu #%zu\n", t, seq_, locked_[t]);
164   CHECK(locked_[t]);
165   locked_[t] = 0;
166 }
167 
CheckNoLocks()168 void InternalDeadlockDetector::CheckNoLocks() {
169   for (int i = 0; i != MutexTypeCount; i++) {
170     CHECK_EQ(locked_[i], 0);
171   }
172 }
173 #endif
174 
CheckNoLocks(ThreadState * thr)175 void CheckNoLocks(ThreadState *thr) {
176 #if SANITIZER_DEBUG && !SANITIZER_GO
177   thr->internal_deadlock_detector.CheckNoLocks();
178 #endif
179 }
180 
181 const uptr kUnlocked = 0;
182 const uptr kWriteLock = 1;
183 const uptr kReadLock = 2;
184 
185 class Backoff {
186  public:
Backoff()187   Backoff()
188     : iter_() {
189   }
190 
Do()191   bool Do() {
192     if (iter_++ < kActiveSpinIters)
193       proc_yield(kActiveSpinCnt);
194     else
195       internal_sched_yield();
196     return true;
197   }
198 
Contention() const199   u64 Contention() const {
200     u64 active = iter_ % kActiveSpinIters;
201     u64 passive = iter_ - active;
202     return active + 10 * passive;
203   }
204 
205  private:
206   int iter_;
207   static const int kActiveSpinIters = 10;
208   static const int kActiveSpinCnt = 20;
209 };
210 
Mutex(MutexType type,StatType stat_type)211 Mutex::Mutex(MutexType type, StatType stat_type) {
212   CHECK_GT(type, MutexTypeInvalid);
213   CHECK_LT(type, MutexTypeCount);
214 #if SANITIZER_DEBUG
215   type_ = type;
216 #endif
217 #if TSAN_COLLECT_STATS
218   stat_type_ = stat_type;
219 #endif
220   atomic_store(&state_, kUnlocked, memory_order_relaxed);
221 }
222 
~Mutex()223 Mutex::~Mutex() {
224   CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
225 }
226 
Lock()227 void Mutex::Lock() {
228 #if SANITIZER_DEBUG && !SANITIZER_GO
229   cur_thread()->internal_deadlock_detector.Lock(type_);
230 #endif
231   uptr cmp = kUnlocked;
232   if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
233                                      memory_order_acquire))
234     return;
235   for (Backoff backoff; backoff.Do();) {
236     if (atomic_load(&state_, memory_order_relaxed) == kUnlocked) {
237       cmp = kUnlocked;
238       if (atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
239                                        memory_order_acquire)) {
240 #if TSAN_COLLECT_STATS && !SANITIZER_GO
241         StatInc(cur_thread(), stat_type_, backoff.Contention());
242 #endif
243         return;
244       }
245     }
246   }
247 }
248 
Unlock()249 void Mutex::Unlock() {
250   uptr prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
251   (void)prev;
252   DCHECK_NE(prev & kWriteLock, 0);
253 #if SANITIZER_DEBUG && !SANITIZER_GO
254   cur_thread()->internal_deadlock_detector.Unlock(type_);
255 #endif
256 }
257 
ReadLock()258 void Mutex::ReadLock() {
259 #if SANITIZER_DEBUG && !SANITIZER_GO
260   cur_thread()->internal_deadlock_detector.Lock(type_);
261 #endif
262   uptr prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
263   if ((prev & kWriteLock) == 0)
264     return;
265   for (Backoff backoff; backoff.Do();) {
266     prev = atomic_load(&state_, memory_order_acquire);
267     if ((prev & kWriteLock) == 0) {
268 #if TSAN_COLLECT_STATS && !SANITIZER_GO
269       StatInc(cur_thread(), stat_type_, backoff.Contention());
270 #endif
271       return;
272     }
273   }
274 }
275 
ReadUnlock()276 void Mutex::ReadUnlock() {
277   uptr prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
278   (void)prev;
279   DCHECK_EQ(prev & kWriteLock, 0);
280   DCHECK_GT(prev & ~kWriteLock, 0);
281 #if SANITIZER_DEBUG && !SANITIZER_GO
282   cur_thread()->internal_deadlock_detector.Unlock(type_);
283 #endif
284 }
285 
CheckLocked()286 void Mutex::CheckLocked() {
287   CHECK_NE(atomic_load(&state_, memory_order_relaxed), 0);
288 }
289 
290 }  // namespace __tsan
291