1//===-- sanitizer_deadlock_detector2.cc -----------------------------------===//
2//
3// This file is distributed under the University of Illinois Open Source
4// License. See LICENSE.TXT for details.
5//
6//===----------------------------------------------------------------------===//
7//
8// Deadlock detector implementation based on adjacency lists.
9//
10//===----------------------------------------------------------------------===//
11
12#include "sanitizer_deadlock_detector_interface.h"
13#include "sanitizer_common.h"
14#include "sanitizer_allocator_internal.h"
15#include "sanitizer_placement_new.h"
16#include "sanitizer_mutex.h"
17
18#if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2
19
20namespace __sanitizer {
21
22const int kMaxNesting = 64;
23const u32 kNoId = -1;
24const u32 kEndId = -2;
25const int kMaxLink = 8;
26const int kL1Size = 1024;
27const int kL2Size = 1024;
28const int kMaxMutex = kL1Size * kL2Size;
29
30struct Id {
31  u32 id;
32  u32 seq;
33
34  explicit Id(u32 id = 0, u32 seq = 0)
35      : id(id)
36      , seq(seq) {
37  }
38};
39
40struct Link {
41  u32 id;
42  u32 seq;
43  u32 tid;
44  u32 stk0;
45  u32 stk1;
46
47  explicit Link(u32 id = 0, u32 seq = 0, u32 tid = 0, u32 s0 = 0, u32 s1 = 0)
48      : id(id)
49      , seq(seq)
50      , tid(tid)
51      , stk0(s0)
52      , stk1(s1) {
53  }
54};
55
56struct DDPhysicalThread {
57  DDReport rep;
58  bool report_pending;
59  bool visited[kMaxMutex];
60  Link pending[kMaxMutex];
61  Link path[kMaxMutex];
62};
63
64struct ThreadMutex {
65  u32 id;
66  u32 stk;
67};
68
69struct DDLogicalThread {
70  u64         ctx;
71  ThreadMutex locked[kMaxNesting];
72  int         nlocked;
73};
74
75struct Mutex {
76  StaticSpinMutex mtx;
77  u32 seq;
78  int nlink;
79  Link link[kMaxLink];
80};
81
82struct DD : public DDetector {
83  explicit DD(const DDFlags *flags);
84
85  DDPhysicalThread* CreatePhysicalThread();
86  void DestroyPhysicalThread(DDPhysicalThread *pt);
87
88  DDLogicalThread* CreateLogicalThread(u64 ctx);
89  void DestroyLogicalThread(DDLogicalThread *lt);
90
91  void MutexInit(DDCallback *cb, DDMutex *m);
92  void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock);
93  void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
94      bool trylock);
95  void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock);
96  void MutexDestroy(DDCallback *cb, DDMutex *m);
97
98  DDReport *GetReport(DDCallback *cb);
99
100  void CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt, DDMutex *mtx);
101  void Report(DDPhysicalThread *pt, DDLogicalThread *lt, int npath);
102  u32 allocateId(DDCallback *cb);
103  Mutex *getMutex(u32 id);
104  u32 getMutexId(Mutex *m);
105
106  DDFlags flags;
107
108  Mutex* mutex[kL1Size];
109
110  SpinMutex mtx;
111  InternalMmapVector<u32> free_id;
112  int id_gen;
113};
114
115DDetector *DDetector::Create(const DDFlags *flags) {
116  (void)flags;
117  void *mem = MmapOrDie(sizeof(DD), "deadlock detector");
118  return new(mem) DD(flags);
119}
120
121DD::DD(const DDFlags *flags)
122    : flags(*flags)
123    , free_id(1024) {
124  id_gen = 0;
125}
126
127DDPhysicalThread* DD::CreatePhysicalThread() {
128  DDPhysicalThread *pt = (DDPhysicalThread*)MmapOrDie(sizeof(DDPhysicalThread),
129      "deadlock detector (physical thread)");
130  return pt;
131}
132
133void DD::DestroyPhysicalThread(DDPhysicalThread *pt) {
134  pt->~DDPhysicalThread();
135  UnmapOrDie(pt, sizeof(DDPhysicalThread));
136}
137
138DDLogicalThread* DD::CreateLogicalThread(u64 ctx) {
139  DDLogicalThread *lt = (DDLogicalThread*)InternalAlloc(
140      sizeof(DDLogicalThread));
141  lt->ctx = ctx;
142  lt->nlocked = 0;
143  return lt;
144}
145
146void DD::DestroyLogicalThread(DDLogicalThread *lt) {
147  lt->~DDLogicalThread();
148  InternalFree(lt);
149}
150
151void DD::MutexInit(DDCallback *cb, DDMutex *m) {
152  VPrintf(2, "#%llu: DD::MutexInit(%p)\n", cb->lt->ctx, m);
153  m->id = kNoId;
154  m->recursion = 0;
155  atomic_store(&m->owner, 0, memory_order_relaxed);
156}
157
158Mutex *DD::getMutex(u32 id) {
159  return &mutex[id / kL2Size][id % kL2Size];
160}
161
162u32 DD::getMutexId(Mutex *m) {
163  for (int i = 0; i < kL1Size; i++) {
164    Mutex *tab = mutex[i];
165    if (tab == 0)
166      break;
167    if (m >= tab && m < tab + kL2Size)
168      return i * kL2Size + (m - tab);
169  }
170  return -1;
171}
172
173u32 DD::allocateId(DDCallback *cb) {
174  u32 id = -1;
175  SpinMutexLock l(&mtx);
176  if (free_id.size() > 0) {
177    id = free_id.back();
178    free_id.pop_back();
179  } else {
180    CHECK_LT(id_gen, kMaxMutex);
181    if ((id_gen % kL2Size) == 0) {
182      mutex[id_gen / kL2Size] = (Mutex*)MmapOrDie(kL2Size * sizeof(Mutex),
183          "deadlock detector (mutex table)");
184    }
185    id = id_gen++;
186  }
187  CHECK_LE(id, kMaxMutex);
188  VPrintf(3, "#%llu: DD::allocateId assign id %d\n", cb->lt->ctx, id);
189  return id;
190}
191
192void DD::MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) {
193  VPrintf(2, "#%llu: DD::MutexBeforeLock(%p, wlock=%d) nlocked=%d\n",
194      cb->lt->ctx, m, wlock, cb->lt->nlocked);
195  DDPhysicalThread *pt = cb->pt;
196  DDLogicalThread *lt = cb->lt;
197
198  uptr owner = atomic_load(&m->owner, memory_order_relaxed);
199  if (owner == (uptr)cb->lt) {
200    VPrintf(3, "#%llu: DD::MutexBeforeLock recursive\n",
201        cb->lt->ctx);
202    return;
203  }
204
205  CHECK_LE(lt->nlocked, kMaxNesting);
206
207  // FIXME(dvyukov): don't allocate id if lt->nlocked == 0?
208  if (m->id == kNoId)
209    m->id = allocateId(cb);
210
211  ThreadMutex *tm = &lt->locked[lt->nlocked++];
212  tm->id = m->id;
213  if (flags.second_deadlock_stack)
214    tm->stk = cb->Unwind();
215  if (lt->nlocked == 1) {
216    VPrintf(3, "#%llu: DD::MutexBeforeLock first mutex\n",
217        cb->lt->ctx);
218    return;
219  }
220
221  bool added = false;
222  Mutex *mtx = getMutex(m->id);
223  for (int i = 0; i < lt->nlocked - 1; i++) {
224    u32 id1 = lt->locked[i].id;
225    u32 stk1 = lt->locked[i].stk;
226    Mutex *mtx1 = getMutex(id1);
227    SpinMutexLock l(&mtx1->mtx);
228    if (mtx1->nlink == kMaxLink) {
229      // FIXME(dvyukov): check stale links
230      continue;
231    }
232    int li = 0;
233    for (; li < mtx1->nlink; li++) {
234      Link *link = &mtx1->link[li];
235      if (link->id == m->id) {
236        if (link->seq != mtx->seq) {
237          link->seq = mtx->seq;
238          link->tid = lt->ctx;
239          link->stk0 = stk1;
240          link->stk1 = cb->Unwind();
241          added = true;
242          VPrintf(3, "#%llu: DD::MutexBeforeLock added %d->%d link\n",
243              cb->lt->ctx, getMutexId(mtx1), m->id);
244        }
245        break;
246      }
247    }
248    if (li == mtx1->nlink) {
249      // FIXME(dvyukov): check stale links
250      Link *link = &mtx1->link[mtx1->nlink++];
251      link->id = m->id;
252      link->seq = mtx->seq;
253      link->tid = lt->ctx;
254      link->stk0 = stk1;
255      link->stk1 = cb->Unwind();
256      added = true;
257      VPrintf(3, "#%llu: DD::MutexBeforeLock added %d->%d link\n",
258          cb->lt->ctx, getMutexId(mtx1), m->id);
259    }
260  }
261
262  if (!added || mtx->nlink == 0) {
263    VPrintf(3, "#%llu: DD::MutexBeforeLock don't check\n",
264        cb->lt->ctx);
265    return;
266  }
267
268  CycleCheck(pt, lt, m);
269}
270
271void DD::MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
272    bool trylock) {
273  VPrintf(2, "#%llu: DD::MutexAfterLock(%p, wlock=%d, try=%d) nlocked=%d\n",
274      cb->lt->ctx, m, wlock, trylock, cb->lt->nlocked);
275  DDLogicalThread *lt = cb->lt;
276
277  uptr owner = atomic_load(&m->owner, memory_order_relaxed);
278  if (owner == (uptr)cb->lt) {
279    VPrintf(3, "#%llu: DD::MutexAfterLock recursive\n", cb->lt->ctx);
280    CHECK(wlock);
281    m->recursion++;
282    return;
283  }
284  CHECK_EQ(owner, 0);
285  if (wlock) {
286    VPrintf(3, "#%llu: DD::MutexAfterLock set owner\n", cb->lt->ctx);
287    CHECK_EQ(m->recursion, 0);
288    m->recursion = 1;
289    atomic_store(&m->owner, (uptr)cb->lt, memory_order_relaxed);
290  }
291
292  if (!trylock)
293    return;
294
295  CHECK_LE(lt->nlocked, kMaxNesting);
296  if (m->id == kNoId)
297    m->id = allocateId(cb);
298  ThreadMutex *tm = &lt->locked[lt->nlocked++];
299  tm->id = m->id;
300  if (flags.second_deadlock_stack)
301    tm->stk = cb->Unwind();
302}
303
304void DD::MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {
305  VPrintf(2, "#%llu: DD::MutexBeforeUnlock(%p, wlock=%d) nlocked=%d\n",
306      cb->lt->ctx, m, wlock, cb->lt->nlocked);
307  DDLogicalThread *lt = cb->lt;
308
309  uptr owner = atomic_load(&m->owner, memory_order_relaxed);
310  if (owner == (uptr)cb->lt) {
311    VPrintf(3, "#%llu: DD::MutexBeforeUnlock recursive\n", cb->lt->ctx);
312    if (--m->recursion > 0)
313      return;
314    VPrintf(3, "#%llu: DD::MutexBeforeUnlock reset owner\n", cb->lt->ctx);
315    atomic_store(&m->owner, 0, memory_order_relaxed);
316  }
317  CHECK_NE(m->id, kNoId);
318  int last = lt->nlocked - 1;
319  for (int i = last; i >= 0; i--) {
320    if (cb->lt->locked[i].id == m->id) {
321      lt->locked[i] = lt->locked[last];
322      lt->nlocked--;
323      break;
324    }
325  }
326}
327
328void DD::MutexDestroy(DDCallback *cb, DDMutex *m) {
329  VPrintf(2, "#%llu: DD::MutexDestroy(%p)\n",
330      cb->lt->ctx, m);
331  DDLogicalThread *lt = cb->lt;
332
333  if (m->id == kNoId)
334    return;
335
336  // Remove the mutex from lt->locked if there.
337  int last = lt->nlocked - 1;
338  for (int i = last; i >= 0; i--) {
339    if (lt->locked[i].id == m->id) {
340      lt->locked[i] = lt->locked[last];
341      lt->nlocked--;
342      break;
343    }
344  }
345
346  // Clear and invalidate the mutex descriptor.
347  {
348    Mutex *mtx = getMutex(m->id);
349    SpinMutexLock l(&mtx->mtx);
350    mtx->seq++;
351    mtx->nlink = 0;
352  }
353
354  // Return id to cache.
355  {
356    SpinMutexLock l(&mtx);
357    free_id.push_back(m->id);
358  }
359}
360
361void DD::CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt,
362    DDMutex *m) {
363  internal_memset(pt->visited, 0, sizeof(pt->visited));
364  int npath = 0;
365  int npending = 0;
366  {
367    Mutex *mtx = getMutex(m->id);
368    SpinMutexLock l(&mtx->mtx);
369    for (int li = 0; li < mtx->nlink; li++)
370      pt->pending[npending++] = mtx->link[li];
371  }
372  while (npending > 0) {
373    Link link = pt->pending[--npending];
374    if (link.id == kEndId) {
375      npath--;
376      continue;
377    }
378    if (pt->visited[link.id])
379      continue;
380    Mutex *mtx1 = getMutex(link.id);
381    SpinMutexLock l(&mtx1->mtx);
382    if (mtx1->seq != link.seq)
383      continue;
384    pt->visited[link.id] = true;
385    if (mtx1->nlink == 0)
386      continue;
387    pt->path[npath++] = link;
388    pt->pending[npending++] = Link(kEndId);
389    if (link.id == m->id)
390      return Report(pt, lt, npath);  // Bingo!
391    for (int li = 0; li < mtx1->nlink; li++) {
392      Link *link1 = &mtx1->link[li];
393      // Mutex *mtx2 = getMutex(link->id);
394      // FIXME(dvyukov): fast seq check
395      // FIXME(dvyukov): fast nlink != 0 check
396      // FIXME(dvyukov): fast pending check?
397      // FIXME(dvyukov): npending can be larger than kMaxMutex
398      pt->pending[npending++] = *link1;
399    }
400  }
401}
402
403void DD::Report(DDPhysicalThread *pt, DDLogicalThread *lt, int npath) {
404  DDReport *rep = &pt->rep;
405  rep->n = npath;
406  for (int i = 0; i < npath; i++) {
407    Link *link = &pt->path[i];
408    Link *link0 = &pt->path[i ? i - 1 : npath - 1];
409    rep->loop[i].thr_ctx = link->tid;
410    rep->loop[i].mtx_ctx0 = link0->id;
411    rep->loop[i].mtx_ctx1 = link->id;
412    rep->loop[i].stk[0] = flags.second_deadlock_stack ? link->stk0 : 0;
413    rep->loop[i].stk[1] = link->stk1;
414  }
415  pt->report_pending = true;
416}
417
418DDReport *DD::GetReport(DDCallback *cb) {
419  if (!cb->pt->report_pending)
420    return 0;
421  cb->pt->report_pending = false;
422  return &cb->pt->rep;
423}
424
425}  // namespace __sanitizer
426#endif  // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2
427