• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- sanitizer_deadlock_detector2.cpp ----------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Deadlock detector implementation based on adjacency lists.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "sanitizer_deadlock_detector_interface.h"
14 #include "sanitizer_common.h"
15 #include "sanitizer_allocator_internal.h"
16 #include "sanitizer_placement_new.h"
17 #include "sanitizer_mutex.h"
18 
19 #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2
20 
21 namespace __sanitizer {
22 
23 const int kMaxNesting = 64;
24 const u32 kNoId = -1;
25 const u32 kEndId = -2;
26 const int kMaxLink = 8;
27 const int kL1Size = 1024;
28 const int kL2Size = 1024;
29 const int kMaxMutex = kL1Size * kL2Size;
30 
31 struct Id {
32   u32 id;
33   u32 seq;
34 
Id__sanitizer::Id35   explicit Id(u32 id = 0, u32 seq = 0)
36       : id(id)
37       , seq(seq) {
38   }
39 };
40 
41 struct Link {
42   u32 id;
43   u32 seq;
44   u32 tid;
45   u32 stk0;
46   u32 stk1;
47 
Link__sanitizer::Link48   explicit Link(u32 id = 0, u32 seq = 0, u32 tid = 0, u32 s0 = 0, u32 s1 = 0)
49       : id(id)
50       , seq(seq)
51       , tid(tid)
52       , stk0(s0)
53       , stk1(s1) {
54   }
55 };
56 
57 struct DDPhysicalThread {
58   DDReport rep;
59   bool report_pending;
60   bool visited[kMaxMutex];
61   Link pending[kMaxMutex];
62   Link path[kMaxMutex];
63 };
64 
65 struct ThreadMutex {
66   u32 id;
67   u32 stk;
68 };
69 
70 struct DDLogicalThread {
71   u64         ctx;
72   ThreadMutex locked[kMaxNesting];
73   int         nlocked;
74 };
75 
76 struct Mutex {
77   StaticSpinMutex mtx;
78   u32 seq;
79   int nlink;
80   Link link[kMaxLink];
81 };
82 
83 struct DD final : public DDetector {
84   explicit DD(const DDFlags *flags);
85 
86   DDPhysicalThread* CreatePhysicalThread();
87   void DestroyPhysicalThread(DDPhysicalThread *pt);
88 
89   DDLogicalThread* CreateLogicalThread(u64 ctx);
90   void DestroyLogicalThread(DDLogicalThread *lt);
91 
92   void MutexInit(DDCallback *cb, DDMutex *m);
93   void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock);
94   void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
95       bool trylock);
96   void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock);
97   void MutexDestroy(DDCallback *cb, DDMutex *m);
98 
99   DDReport *GetReport(DDCallback *cb);
100 
101   void CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt, DDMutex *mtx);
102   void Report(DDPhysicalThread *pt, DDLogicalThread *lt, int npath);
103   u32 allocateId(DDCallback *cb);
104   Mutex *getMutex(u32 id);
105   u32 getMutexId(Mutex *m);
106 
107   DDFlags flags;
108 
109   Mutex* mutex[kL1Size];
110 
111   SpinMutex mtx;
112   InternalMmapVector<u32> free_id;
113   int id_gen = 0;
114 };
115 
Create(const DDFlags * flags)116 DDetector *DDetector::Create(const DDFlags *flags) {
117   (void)flags;
118   void *mem = MmapOrDie(sizeof(DD), "deadlock detector");
119   return new(mem) DD(flags);
120 }
121 
DD(const DDFlags * flags)122 DD::DD(const DDFlags *flags) : flags(*flags) { free_id.reserve(1024); }
123 
CreatePhysicalThread()124 DDPhysicalThread* DD::CreatePhysicalThread() {
125   DDPhysicalThread *pt = (DDPhysicalThread*)MmapOrDie(sizeof(DDPhysicalThread),
126       "deadlock detector (physical thread)");
127   return pt;
128 }
129 
DestroyPhysicalThread(DDPhysicalThread * pt)130 void DD::DestroyPhysicalThread(DDPhysicalThread *pt) {
131   pt->~DDPhysicalThread();
132   UnmapOrDie(pt, sizeof(DDPhysicalThread));
133 }
134 
CreateLogicalThread(u64 ctx)135 DDLogicalThread* DD::CreateLogicalThread(u64 ctx) {
136   DDLogicalThread *lt = (DDLogicalThread*)InternalAlloc(
137       sizeof(DDLogicalThread));
138   lt->ctx = ctx;
139   lt->nlocked = 0;
140   return lt;
141 }
142 
DestroyLogicalThread(DDLogicalThread * lt)143 void DD::DestroyLogicalThread(DDLogicalThread *lt) {
144   lt->~DDLogicalThread();
145   InternalFree(lt);
146 }
147 
MutexInit(DDCallback * cb,DDMutex * m)148 void DD::MutexInit(DDCallback *cb, DDMutex *m) {
149   VPrintf(2, "#%llu: DD::MutexInit(%p)\n", cb->lt->ctx, m);
150   m->id = kNoId;
151   m->recursion = 0;
152   atomic_store(&m->owner, 0, memory_order_relaxed);
153 }
154 
getMutex(u32 id)155 Mutex *DD::getMutex(u32 id) {
156   return &mutex[id / kL2Size][id % kL2Size];
157 }
158 
getMutexId(Mutex * m)159 u32 DD::getMutexId(Mutex *m) {
160   for (int i = 0; i < kL1Size; i++) {
161     Mutex *tab = mutex[i];
162     if (tab == 0)
163       break;
164     if (m >= tab && m < tab + kL2Size)
165       return i * kL2Size + (m - tab);
166   }
167   return -1;
168 }
169 
allocateId(DDCallback * cb)170 u32 DD::allocateId(DDCallback *cb) {
171   u32 id = -1;
172   SpinMutexLock l(&mtx);
173   if (free_id.size() > 0) {
174     id = free_id.back();
175     free_id.pop_back();
176   } else {
177     CHECK_LT(id_gen, kMaxMutex);
178     if ((id_gen % kL2Size) == 0) {
179       mutex[id_gen / kL2Size] = (Mutex*)MmapOrDie(kL2Size * sizeof(Mutex),
180           "deadlock detector (mutex table)");
181     }
182     id = id_gen++;
183   }
184   CHECK_LE(id, kMaxMutex);
185   VPrintf(3, "#%llu: DD::allocateId assign id %d\n", cb->lt->ctx, id);
186   return id;
187 }
188 
MutexBeforeLock(DDCallback * cb,DDMutex * m,bool wlock)189 void DD::MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) {
190   VPrintf(2, "#%llu: DD::MutexBeforeLock(%p, wlock=%d) nlocked=%d\n",
191       cb->lt->ctx, m, wlock, cb->lt->nlocked);
192   DDPhysicalThread *pt = cb->pt;
193   DDLogicalThread *lt = cb->lt;
194 
195   uptr owner = atomic_load(&m->owner, memory_order_relaxed);
196   if (owner == (uptr)cb->lt) {
197     VPrintf(3, "#%llu: DD::MutexBeforeLock recursive\n",
198         cb->lt->ctx);
199     return;
200   }
201 
202   CHECK_LE(lt->nlocked, kMaxNesting);
203 
204   // FIXME(dvyukov): don't allocate id if lt->nlocked == 0?
205   if (m->id == kNoId)
206     m->id = allocateId(cb);
207 
208   ThreadMutex *tm = &lt->locked[lt->nlocked++];
209   tm->id = m->id;
210   if (flags.second_deadlock_stack)
211     tm->stk = cb->Unwind();
212   if (lt->nlocked == 1) {
213     VPrintf(3, "#%llu: DD::MutexBeforeLock first mutex\n",
214         cb->lt->ctx);
215     return;
216   }
217 
218   bool added = false;
219   Mutex *mtx = getMutex(m->id);
220   for (int i = 0; i < lt->nlocked - 1; i++) {
221     u32 id1 = lt->locked[i].id;
222     u32 stk1 = lt->locked[i].stk;
223     Mutex *mtx1 = getMutex(id1);
224     SpinMutexLock l(&mtx1->mtx);
225     if (mtx1->nlink == kMaxLink) {
226       // FIXME(dvyukov): check stale links
227       continue;
228     }
229     int li = 0;
230     for (; li < mtx1->nlink; li++) {
231       Link *link = &mtx1->link[li];
232       if (link->id == m->id) {
233         if (link->seq != mtx->seq) {
234           link->seq = mtx->seq;
235           link->tid = lt->ctx;
236           link->stk0 = stk1;
237           link->stk1 = cb->Unwind();
238           added = true;
239           VPrintf(3, "#%llu: DD::MutexBeforeLock added %d->%d link\n",
240               cb->lt->ctx, getMutexId(mtx1), m->id);
241         }
242         break;
243       }
244     }
245     if (li == mtx1->nlink) {
246       // FIXME(dvyukov): check stale links
247       Link *link = &mtx1->link[mtx1->nlink++];
248       link->id = m->id;
249       link->seq = mtx->seq;
250       link->tid = lt->ctx;
251       link->stk0 = stk1;
252       link->stk1 = cb->Unwind();
253       added = true;
254       VPrintf(3, "#%llu: DD::MutexBeforeLock added %d->%d link\n",
255           cb->lt->ctx, getMutexId(mtx1), m->id);
256     }
257   }
258 
259   if (!added || mtx->nlink == 0) {
260     VPrintf(3, "#%llu: DD::MutexBeforeLock don't check\n",
261         cb->lt->ctx);
262     return;
263   }
264 
265   CycleCheck(pt, lt, m);
266 }
267 
MutexAfterLock(DDCallback * cb,DDMutex * m,bool wlock,bool trylock)268 void DD::MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
269     bool trylock) {
270   VPrintf(2, "#%llu: DD::MutexAfterLock(%p, wlock=%d, try=%d) nlocked=%d\n",
271       cb->lt->ctx, m, wlock, trylock, cb->lt->nlocked);
272   DDLogicalThread *lt = cb->lt;
273 
274   uptr owner = atomic_load(&m->owner, memory_order_relaxed);
275   if (owner == (uptr)cb->lt) {
276     VPrintf(3, "#%llu: DD::MutexAfterLock recursive\n", cb->lt->ctx);
277     CHECK(wlock);
278     m->recursion++;
279     return;
280   }
281   CHECK_EQ(owner, 0);
282   if (wlock) {
283     VPrintf(3, "#%llu: DD::MutexAfterLock set owner\n", cb->lt->ctx);
284     CHECK_EQ(m->recursion, 0);
285     m->recursion = 1;
286     atomic_store(&m->owner, (uptr)cb->lt, memory_order_relaxed);
287   }
288 
289   if (!trylock)
290     return;
291 
292   CHECK_LE(lt->nlocked, kMaxNesting);
293   if (m->id == kNoId)
294     m->id = allocateId(cb);
295   ThreadMutex *tm = &lt->locked[lt->nlocked++];
296   tm->id = m->id;
297   if (flags.second_deadlock_stack)
298     tm->stk = cb->Unwind();
299 }
300 
MutexBeforeUnlock(DDCallback * cb,DDMutex * m,bool wlock)301 void DD::MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {
302   VPrintf(2, "#%llu: DD::MutexBeforeUnlock(%p, wlock=%d) nlocked=%d\n",
303       cb->lt->ctx, m, wlock, cb->lt->nlocked);
304   DDLogicalThread *lt = cb->lt;
305 
306   uptr owner = atomic_load(&m->owner, memory_order_relaxed);
307   if (owner == (uptr)cb->lt) {
308     VPrintf(3, "#%llu: DD::MutexBeforeUnlock recursive\n", cb->lt->ctx);
309     if (--m->recursion > 0)
310       return;
311     VPrintf(3, "#%llu: DD::MutexBeforeUnlock reset owner\n", cb->lt->ctx);
312     atomic_store(&m->owner, 0, memory_order_relaxed);
313   }
314   CHECK_NE(m->id, kNoId);
315   int last = lt->nlocked - 1;
316   for (int i = last; i >= 0; i--) {
317     if (cb->lt->locked[i].id == m->id) {
318       lt->locked[i] = lt->locked[last];
319       lt->nlocked--;
320       break;
321     }
322   }
323 }
324 
MutexDestroy(DDCallback * cb,DDMutex * m)325 void DD::MutexDestroy(DDCallback *cb, DDMutex *m) {
326   VPrintf(2, "#%llu: DD::MutexDestroy(%p)\n",
327       cb->lt->ctx, m);
328   DDLogicalThread *lt = cb->lt;
329 
330   if (m->id == kNoId)
331     return;
332 
333   // Remove the mutex from lt->locked if there.
334   int last = lt->nlocked - 1;
335   for (int i = last; i >= 0; i--) {
336     if (lt->locked[i].id == m->id) {
337       lt->locked[i] = lt->locked[last];
338       lt->nlocked--;
339       break;
340     }
341   }
342 
343   // Clear and invalidate the mutex descriptor.
344   {
345     Mutex *mtx = getMutex(m->id);
346     SpinMutexLock l(&mtx->mtx);
347     mtx->seq++;
348     mtx->nlink = 0;
349   }
350 
351   // Return id to cache.
352   {
353     SpinMutexLock l(&mtx);
354     free_id.push_back(m->id);
355   }
356 }
357 
CycleCheck(DDPhysicalThread * pt,DDLogicalThread * lt,DDMutex * m)358 void DD::CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt,
359     DDMutex *m) {
360   internal_memset(pt->visited, 0, sizeof(pt->visited));
361   int npath = 0;
362   int npending = 0;
363   {
364     Mutex *mtx = getMutex(m->id);
365     SpinMutexLock l(&mtx->mtx);
366     for (int li = 0; li < mtx->nlink; li++)
367       pt->pending[npending++] = mtx->link[li];
368   }
369   while (npending > 0) {
370     Link link = pt->pending[--npending];
371     if (link.id == kEndId) {
372       npath--;
373       continue;
374     }
375     if (pt->visited[link.id])
376       continue;
377     Mutex *mtx1 = getMutex(link.id);
378     SpinMutexLock l(&mtx1->mtx);
379     if (mtx1->seq != link.seq)
380       continue;
381     pt->visited[link.id] = true;
382     if (mtx1->nlink == 0)
383       continue;
384     pt->path[npath++] = link;
385     pt->pending[npending++] = Link(kEndId);
386     if (link.id == m->id)
387       return Report(pt, lt, npath);  // Bingo!
388     for (int li = 0; li < mtx1->nlink; li++) {
389       Link *link1 = &mtx1->link[li];
390       // Mutex *mtx2 = getMutex(link->id);
391       // FIXME(dvyukov): fast seq check
392       // FIXME(dvyukov): fast nlink != 0 check
393       // FIXME(dvyukov): fast pending check?
394       // FIXME(dvyukov): npending can be larger than kMaxMutex
395       pt->pending[npending++] = *link1;
396     }
397   }
398 }
399 
Report(DDPhysicalThread * pt,DDLogicalThread * lt,int npath)400 void DD::Report(DDPhysicalThread *pt, DDLogicalThread *lt, int npath) {
401   DDReport *rep = &pt->rep;
402   rep->n = npath;
403   for (int i = 0; i < npath; i++) {
404     Link *link = &pt->path[i];
405     Link *link0 = &pt->path[i ? i - 1 : npath - 1];
406     rep->loop[i].thr_ctx = link->tid;
407     rep->loop[i].mtx_ctx0 = link0->id;
408     rep->loop[i].mtx_ctx1 = link->id;
409     rep->loop[i].stk[0] = flags.second_deadlock_stack ? link->stk0 : 0;
410     rep->loop[i].stk[1] = link->stk1;
411   }
412   pt->report_pending = true;
413 }
414 
GetReport(DDCallback * cb)415 DDReport *DD::GetReport(DDCallback *cb) {
416   if (!cb->pt->report_pending)
417     return 0;
418   cb->pt->report_pending = false;
419   return &cb->pt->rep;
420 }
421 
422 }  // namespace __sanitizer
423 #endif  // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2
424