1 //===-- sanitizer_deadlock_detector2.cc -----------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Deadlock detector implementation based on adjacency lists.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "sanitizer_deadlock_detector_interface.h"
15 #include "sanitizer_common.h"
16 #include "sanitizer_allocator_internal.h"
17 #include "sanitizer_placement_new.h"
18 #include "sanitizer_mutex.h"
19
20 #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2
21
22 namespace __sanitizer {
23
24 const int kMaxNesting = 64;
25 const u32 kNoId = -1;
26 const u32 kEndId = -2;
27 const int kMaxLink = 8;
28 const int kL1Size = 1024;
29 const int kL2Size = 1024;
30 const int kMaxMutex = kL1Size * kL2Size;
31
32 struct Id {
33 u32 id;
34 u32 seq;
35
Id__sanitizer::Id36 explicit Id(u32 id = 0, u32 seq = 0)
37 : id(id)
38 , seq(seq) {
39 }
40 };
41
42 struct Link {
43 u32 id;
44 u32 seq;
45 u32 tid;
46 u32 stk0;
47 u32 stk1;
48
Link__sanitizer::Link49 explicit Link(u32 id = 0, u32 seq = 0, u32 tid = 0, u32 s0 = 0, u32 s1 = 0)
50 : id(id)
51 , seq(seq)
52 , tid(tid)
53 , stk0(s0)
54 , stk1(s1) {
55 }
56 };
57
58 struct DDPhysicalThread {
59 DDReport rep;
60 bool report_pending;
61 bool visited[kMaxMutex];
62 Link pending[kMaxMutex];
63 Link path[kMaxMutex];
64 };
65
66 struct ThreadMutex {
67 u32 id;
68 u32 stk;
69 };
70
71 struct DDLogicalThread {
72 u64 ctx;
73 ThreadMutex locked[kMaxNesting];
74 int nlocked;
75 };
76
77 struct Mutex {
78 StaticSpinMutex mtx;
79 u32 seq;
80 int nlink;
81 Link link[kMaxLink];
82 };
83
84 struct DD : public DDetector {
85 explicit DD(const DDFlags *flags);
86
87 DDPhysicalThread* CreatePhysicalThread();
88 void DestroyPhysicalThread(DDPhysicalThread *pt);
89
90 DDLogicalThread* CreateLogicalThread(u64 ctx);
91 void DestroyLogicalThread(DDLogicalThread *lt);
92
93 void MutexInit(DDCallback *cb, DDMutex *m);
94 void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock);
95 void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
96 bool trylock);
97 void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock);
98 void MutexDestroy(DDCallback *cb, DDMutex *m);
99
100 DDReport *GetReport(DDCallback *cb);
101
102 void CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt, DDMutex *mtx);
103 void Report(DDPhysicalThread *pt, DDLogicalThread *lt, int npath);
104 u32 allocateId(DDCallback *cb);
105 Mutex *getMutex(u32 id);
106 u32 getMutexId(Mutex *m);
107
108 DDFlags flags;
109
110 Mutex* mutex[kL1Size];
111
112 SpinMutex mtx;
113 InternalMmapVector<u32> free_id;
114 int id_gen;
115 };
116
Create(const DDFlags * flags)117 DDetector *DDetector::Create(const DDFlags *flags) {
118 (void)flags;
119 void *mem = MmapOrDie(sizeof(DD), "deadlock detector");
120 return new(mem) DD(flags);
121 }
122
DD(const DDFlags * flags)123 DD::DD(const DDFlags *flags)
124 : flags(*flags)
125 , free_id(1024) {
126 id_gen = 0;
127 }
128
CreatePhysicalThread()129 DDPhysicalThread* DD::CreatePhysicalThread() {
130 DDPhysicalThread *pt = (DDPhysicalThread*)MmapOrDie(sizeof(DDPhysicalThread),
131 "deadlock detector (physical thread)");
132 return pt;
133 }
134
DestroyPhysicalThread(DDPhysicalThread * pt)135 void DD::DestroyPhysicalThread(DDPhysicalThread *pt) {
136 pt->~DDPhysicalThread();
137 UnmapOrDie(pt, sizeof(DDPhysicalThread));
138 }
139
CreateLogicalThread(u64 ctx)140 DDLogicalThread* DD::CreateLogicalThread(u64 ctx) {
141 DDLogicalThread *lt = (DDLogicalThread*)InternalAlloc(
142 sizeof(DDLogicalThread));
143 lt->ctx = ctx;
144 lt->nlocked = 0;
145 return lt;
146 }
147
DestroyLogicalThread(DDLogicalThread * lt)148 void DD::DestroyLogicalThread(DDLogicalThread *lt) {
149 lt->~DDLogicalThread();
150 InternalFree(lt);
151 }
152
MutexInit(DDCallback * cb,DDMutex * m)153 void DD::MutexInit(DDCallback *cb, DDMutex *m) {
154 VPrintf(2, "#%llu: DD::MutexInit(%p)\n", cb->lt->ctx, m);
155 m->id = kNoId;
156 m->recursion = 0;
157 atomic_store(&m->owner, 0, memory_order_relaxed);
158 }
159
getMutex(u32 id)160 Mutex *DD::getMutex(u32 id) {
161 return &mutex[id / kL2Size][id % kL2Size];
162 }
163
getMutexId(Mutex * m)164 u32 DD::getMutexId(Mutex *m) {
165 for (int i = 0; i < kL1Size; i++) {
166 Mutex *tab = mutex[i];
167 if (tab == 0)
168 break;
169 if (m >= tab && m < tab + kL2Size)
170 return i * kL2Size + (m - tab);
171 }
172 return -1;
173 }
174
allocateId(DDCallback * cb)175 u32 DD::allocateId(DDCallback *cb) {
176 u32 id = -1;
177 SpinMutexLock l(&mtx);
178 if (free_id.size() > 0) {
179 id = free_id.back();
180 free_id.pop_back();
181 } else {
182 CHECK_LT(id_gen, kMaxMutex);
183 if ((id_gen % kL2Size) == 0) {
184 mutex[id_gen / kL2Size] = (Mutex*)MmapOrDie(kL2Size * sizeof(Mutex),
185 "deadlock detector (mutex table)");
186 }
187 id = id_gen++;
188 }
189 CHECK_LE(id, kMaxMutex);
190 VPrintf(3, "#%llu: DD::allocateId assign id %d\n", cb->lt->ctx, id);
191 return id;
192 }
193
MutexBeforeLock(DDCallback * cb,DDMutex * m,bool wlock)194 void DD::MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) {
195 VPrintf(2, "#%llu: DD::MutexBeforeLock(%p, wlock=%d) nlocked=%d\n",
196 cb->lt->ctx, m, wlock, cb->lt->nlocked);
197 DDPhysicalThread *pt = cb->pt;
198 DDLogicalThread *lt = cb->lt;
199
200 uptr owner = atomic_load(&m->owner, memory_order_relaxed);
201 if (owner == (uptr)cb->lt) {
202 VPrintf(3, "#%llu: DD::MutexBeforeLock recursive\n",
203 cb->lt->ctx);
204 return;
205 }
206
207 CHECK_LE(lt->nlocked, kMaxNesting);
208
209 // FIXME(dvyukov): don't allocate id if lt->nlocked == 0?
210 if (m->id == kNoId)
211 m->id = allocateId(cb);
212
213 ThreadMutex *tm = <->locked[lt->nlocked++];
214 tm->id = m->id;
215 if (flags.second_deadlock_stack)
216 tm->stk = cb->Unwind();
217 if (lt->nlocked == 1) {
218 VPrintf(3, "#%llu: DD::MutexBeforeLock first mutex\n",
219 cb->lt->ctx);
220 return;
221 }
222
223 bool added = false;
224 Mutex *mtx = getMutex(m->id);
225 for (int i = 0; i < lt->nlocked - 1; i++) {
226 u32 id1 = lt->locked[i].id;
227 u32 stk1 = lt->locked[i].stk;
228 Mutex *mtx1 = getMutex(id1);
229 SpinMutexLock l(&mtx1->mtx);
230 if (mtx1->nlink == kMaxLink) {
231 // FIXME(dvyukov): check stale links
232 continue;
233 }
234 int li = 0;
235 for (; li < mtx1->nlink; li++) {
236 Link *link = &mtx1->link[li];
237 if (link->id == m->id) {
238 if (link->seq != mtx->seq) {
239 link->seq = mtx->seq;
240 link->tid = lt->ctx;
241 link->stk0 = stk1;
242 link->stk1 = cb->Unwind();
243 added = true;
244 VPrintf(3, "#%llu: DD::MutexBeforeLock added %d->%d link\n",
245 cb->lt->ctx, getMutexId(mtx1), m->id);
246 }
247 break;
248 }
249 }
250 if (li == mtx1->nlink) {
251 // FIXME(dvyukov): check stale links
252 Link *link = &mtx1->link[mtx1->nlink++];
253 link->id = m->id;
254 link->seq = mtx->seq;
255 link->tid = lt->ctx;
256 link->stk0 = stk1;
257 link->stk1 = cb->Unwind();
258 added = true;
259 VPrintf(3, "#%llu: DD::MutexBeforeLock added %d->%d link\n",
260 cb->lt->ctx, getMutexId(mtx1), m->id);
261 }
262 }
263
264 if (!added || mtx->nlink == 0) {
265 VPrintf(3, "#%llu: DD::MutexBeforeLock don't check\n",
266 cb->lt->ctx);
267 return;
268 }
269
270 CycleCheck(pt, lt, m);
271 }
272
MutexAfterLock(DDCallback * cb,DDMutex * m,bool wlock,bool trylock)273 void DD::MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
274 bool trylock) {
275 VPrintf(2, "#%llu: DD::MutexAfterLock(%p, wlock=%d, try=%d) nlocked=%d\n",
276 cb->lt->ctx, m, wlock, trylock, cb->lt->nlocked);
277 DDLogicalThread *lt = cb->lt;
278
279 uptr owner = atomic_load(&m->owner, memory_order_relaxed);
280 if (owner == (uptr)cb->lt) {
281 VPrintf(3, "#%llu: DD::MutexAfterLock recursive\n", cb->lt->ctx);
282 CHECK(wlock);
283 m->recursion++;
284 return;
285 }
286 CHECK_EQ(owner, 0);
287 if (wlock) {
288 VPrintf(3, "#%llu: DD::MutexAfterLock set owner\n", cb->lt->ctx);
289 CHECK_EQ(m->recursion, 0);
290 m->recursion = 1;
291 atomic_store(&m->owner, (uptr)cb->lt, memory_order_relaxed);
292 }
293
294 if (!trylock)
295 return;
296
297 CHECK_LE(lt->nlocked, kMaxNesting);
298 if (m->id == kNoId)
299 m->id = allocateId(cb);
300 ThreadMutex *tm = <->locked[lt->nlocked++];
301 tm->id = m->id;
302 if (flags.second_deadlock_stack)
303 tm->stk = cb->Unwind();
304 }
305
MutexBeforeUnlock(DDCallback * cb,DDMutex * m,bool wlock)306 void DD::MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {
307 VPrintf(2, "#%llu: DD::MutexBeforeUnlock(%p, wlock=%d) nlocked=%d\n",
308 cb->lt->ctx, m, wlock, cb->lt->nlocked);
309 DDLogicalThread *lt = cb->lt;
310
311 uptr owner = atomic_load(&m->owner, memory_order_relaxed);
312 if (owner == (uptr)cb->lt) {
313 VPrintf(3, "#%llu: DD::MutexBeforeUnlock recursive\n", cb->lt->ctx);
314 if (--m->recursion > 0)
315 return;
316 VPrintf(3, "#%llu: DD::MutexBeforeUnlock reset owner\n", cb->lt->ctx);
317 atomic_store(&m->owner, 0, memory_order_relaxed);
318 }
319 CHECK_NE(m->id, kNoId);
320 int last = lt->nlocked - 1;
321 for (int i = last; i >= 0; i--) {
322 if (cb->lt->locked[i].id == m->id) {
323 lt->locked[i] = lt->locked[last];
324 lt->nlocked--;
325 break;
326 }
327 }
328 }
329
MutexDestroy(DDCallback * cb,DDMutex * m)330 void DD::MutexDestroy(DDCallback *cb, DDMutex *m) {
331 VPrintf(2, "#%llu: DD::MutexDestroy(%p)\n",
332 cb->lt->ctx, m);
333 DDLogicalThread *lt = cb->lt;
334
335 if (m->id == kNoId)
336 return;
337
338 // Remove the mutex from lt->locked if there.
339 int last = lt->nlocked - 1;
340 for (int i = last; i >= 0; i--) {
341 if (lt->locked[i].id == m->id) {
342 lt->locked[i] = lt->locked[last];
343 lt->nlocked--;
344 break;
345 }
346 }
347
348 // Clear and invalidate the mutex descriptor.
349 {
350 Mutex *mtx = getMutex(m->id);
351 SpinMutexLock l(&mtx->mtx);
352 mtx->seq++;
353 mtx->nlink = 0;
354 }
355
356 // Return id to cache.
357 {
358 SpinMutexLock l(&mtx);
359 free_id.push_back(m->id);
360 }
361 }
362
CycleCheck(DDPhysicalThread * pt,DDLogicalThread * lt,DDMutex * m)363 void DD::CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt,
364 DDMutex *m) {
365 internal_memset(pt->visited, 0, sizeof(pt->visited));
366 int npath = 0;
367 int npending = 0;
368 {
369 Mutex *mtx = getMutex(m->id);
370 SpinMutexLock l(&mtx->mtx);
371 for (int li = 0; li < mtx->nlink; li++)
372 pt->pending[npending++] = mtx->link[li];
373 }
374 while (npending > 0) {
375 Link link = pt->pending[--npending];
376 if (link.id == kEndId) {
377 npath--;
378 continue;
379 }
380 if (pt->visited[link.id])
381 continue;
382 Mutex *mtx1 = getMutex(link.id);
383 SpinMutexLock l(&mtx1->mtx);
384 if (mtx1->seq != link.seq)
385 continue;
386 pt->visited[link.id] = true;
387 if (mtx1->nlink == 0)
388 continue;
389 pt->path[npath++] = link;
390 pt->pending[npending++] = Link(kEndId);
391 if (link.id == m->id)
392 return Report(pt, lt, npath); // Bingo!
393 for (int li = 0; li < mtx1->nlink; li++) {
394 Link *link1 = &mtx1->link[li];
395 // Mutex *mtx2 = getMutex(link->id);
396 // FIXME(dvyukov): fast seq check
397 // FIXME(dvyukov): fast nlink != 0 check
398 // FIXME(dvyukov): fast pending check?
399 // FIXME(dvyukov): npending can be larger than kMaxMutex
400 pt->pending[npending++] = *link1;
401 }
402 }
403 }
404
Report(DDPhysicalThread * pt,DDLogicalThread * lt,int npath)405 void DD::Report(DDPhysicalThread *pt, DDLogicalThread *lt, int npath) {
406 DDReport *rep = &pt->rep;
407 rep->n = npath;
408 for (int i = 0; i < npath; i++) {
409 Link *link = &pt->path[i];
410 Link *link0 = &pt->path[i ? i - 1 : npath - 1];
411 rep->loop[i].thr_ctx = link->tid;
412 rep->loop[i].mtx_ctx0 = link0->id;
413 rep->loop[i].mtx_ctx1 = link->id;
414 rep->loop[i].stk[0] = flags.second_deadlock_stack ? link->stk0 : 0;
415 rep->loop[i].stk[1] = link->stk1;
416 }
417 pt->report_pending = true;
418 }
419
GetReport(DDCallback * cb)420 DDReport *DD::GetReport(DDCallback *cb) {
421 if (!cb->pt->report_pending)
422 return 0;
423 cb->pt->report_pending = false;
424 return &cb->pt->rep;
425 }
426
427 } // namespace __sanitizer
428 #endif // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2
429