1//===-- sanitizer_deadlock_detector2.cpp ----------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Deadlock detector implementation based on adjacency lists.
10//
11//===----------------------------------------------------------------------===//
12
13#include "sanitizer_deadlock_detector_interface.h"
14#include "sanitizer_common.h"
15#include "sanitizer_allocator_internal.h"
16#include "sanitizer_placement_new.h"
17#include "sanitizer_mutex.h"
18
19#if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2
20
21namespace __sanitizer {
22
23const int kMaxNesting = 64;
24const u32 kNoId = -1;
25const u32 kEndId = -2;
26const int kMaxLink = 8;
27const int kL1Size = 1024;
28const int kL2Size = 1024;
29const int kMaxMutex = kL1Size * kL2Size;
30
31struct Id {
32 u32 id;
33 u32 seq;
34
35 explicit Id(u32 id = 0, u32 seq = 0)
36 : id(id)
37 , seq(seq) {
38 }
39};
40
41struct Link {
42 u32 id;
43 u32 seq;
44 u32 tid;
45 u32 stk0;
46 u32 stk1;
47
48 explicit Link(u32 id = 0, u32 seq = 0, u32 tid = 0, u32 s0 = 0, u32 s1 = 0)
49 : id(id)
50 , seq(seq)
51 , tid(tid)
52 , stk0(s0)
53 , stk1(s1) {
54 }
55};
56
57struct DDPhysicalThread {
58 DDReport rep;
59 bool report_pending;
60 bool visited[kMaxMutex];
61 Link pending[kMaxMutex];
62 Link path[kMaxMutex];
63};
64
65struct ThreadMutex {
66 u32 id;
67 u32 stk;
68};
69
70struct DDLogicalThread {
71 u64 ctx;
72 ThreadMutex locked[kMaxNesting];
73 int nlocked;
74};
75
76struct MutexState {
77 StaticSpinMutex mtx;
78 u32 seq;
79 int nlink;
80 Link link[kMaxLink];
81};
82
83struct DD final : public DDetector {
84 explicit DD(const DDFlags *flags);
85
86 DDPhysicalThread* CreatePhysicalThread();
87 void DestroyPhysicalThread(DDPhysicalThread *pt);
88
89 DDLogicalThread* CreateLogicalThread(u64 ctx);
90 void DestroyLogicalThread(DDLogicalThread *lt);
91
92 void MutexInit(DDCallback *cb, DDMutex *m);
93 void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock);
94 void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
95 bool trylock);
96 void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock);
97 void MutexDestroy(DDCallback *cb, DDMutex *m);
98
99 DDReport *GetReport(DDCallback *cb);
100
101 void CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt, DDMutex *mtx);
102 void Report(DDPhysicalThread *pt, DDLogicalThread *lt, int npath);
103 u32 allocateId(DDCallback *cb);
104 MutexState *getMutex(u32 id);
105 u32 getMutexId(MutexState *m);
106
107 DDFlags flags;
108
109 MutexState *mutex[kL1Size];
110
111 SpinMutex mtx;
112 InternalMmapVector<u32> free_id;
113 int id_gen = 0;
114};
115
116DDetector *DDetector::Create(const DDFlags *flags) {
117 (void)flags;
118 void *mem = MmapOrDie(sizeof(DD), "deadlock detector");
119 return new(mem) DD(flags);
120}
121
122DD::DD(const DDFlags *flags) : flags(*flags) { free_id.reserve(1024); }
123
124DDPhysicalThread* DD::CreatePhysicalThread() {
125 DDPhysicalThread *pt = (DDPhysicalThread*)MmapOrDie(sizeof(DDPhysicalThread),
126 "deadlock detector (physical thread)");
127 return pt;
128}
129
130void DD::DestroyPhysicalThread(DDPhysicalThread *pt) {
131 pt->~DDPhysicalThread();
132 UnmapOrDie(pt, sizeof(DDPhysicalThread));
133}
134
135DDLogicalThread* DD::CreateLogicalThread(u64 ctx) {
136 DDLogicalThread *lt = (DDLogicalThread*)InternalAlloc(
137 sizeof(DDLogicalThread));
138 lt->ctx = ctx;
139 lt->nlocked = 0;
140 return lt;
141}
142
143void DD::DestroyLogicalThread(DDLogicalThread *lt) {
144 lt->~DDLogicalThread();
145 InternalFree(lt);
146}
147
148void DD::MutexInit(DDCallback *cb, DDMutex *m) {
149 VPrintf(2, "#%llu: DD::MutexInit(%p)\n", cb->lt->ctx, m);
150 m->id = kNoId;
151 m->recursion = 0;
152 atomic_store(&m->owner, 0, memory_order_relaxed);
153}
154
155MutexState *DD::getMutex(u32 id) { return &mutex[id / kL2Size][id % kL2Size]; }
156
157u32 DD::getMutexId(MutexState *m) {
158 for (int i = 0; i < kL1Size; i++) {
159 MutexState *tab = mutex[i];
160 if (tab == 0)
161 break;
162 if (m >= tab && m < tab + kL2Size)
163 return i * kL2Size + (m - tab);
164 }
165 return -1;
166}
167
168u32 DD::allocateId(DDCallback *cb) {
169 u32 id = -1;
170 SpinMutexLock l(&mtx);
171 if (free_id.size() > 0) {
172 id = free_id.back();
173 free_id.pop_back();
174 } else {
175 CHECK_LT(id_gen, kMaxMutex);
176 if ((id_gen % kL2Size) == 0) {
177 mutex[id_gen / kL2Size] = (MutexState *)MmapOrDie(
178 kL2Size * sizeof(MutexState), "deadlock detector (mutex table)");
179 }
180 id = id_gen++;
181 }
182 CHECK_LE(id, kMaxMutex);
183 VPrintf(3, "#%llu: DD::allocateId assign id %d\n", cb->lt->ctx, id);
184 return id;
185}
186
187void DD::MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) {
188 VPrintf(2, "#%llu: DD::MutexBeforeLock(%p, wlock=%d) nlocked=%d\n",
189 cb->lt->ctx, m, wlock, cb->lt->nlocked);
190 DDPhysicalThread *pt = cb->pt;
191 DDLogicalThread *lt = cb->lt;
192
193 uptr owner = atomic_load(&m->owner, memory_order_relaxed);
194 if (owner == (uptr)cb->lt) {
195 VPrintf(3, "#%llu: DD::MutexBeforeLock recursive\n",
196 cb->lt->ctx);
197 return;
198 }
199
200 CHECK_LE(lt->nlocked, kMaxNesting);
201
202 // FIXME(dvyukov): don't allocate id if lt->nlocked == 0?
203 if (m->id == kNoId)
204 m->id = allocateId(cb);
205
206 ThreadMutex *tm = &lt->locked[lt->nlocked++];
207 tm->id = m->id;
208 if (flags.second_deadlock_stack)
209 tm->stk = cb->Unwind();
210 if (lt->nlocked == 1) {
211 VPrintf(3, "#%llu: DD::MutexBeforeLock first mutex\n",
212 cb->lt->ctx);
213 return;
214 }
215
216 bool added = false;
217 MutexState *mtx = getMutex(m->id);
218 for (int i = 0; i < lt->nlocked - 1; i++) {
219 u32 id1 = lt->locked[i].id;
220 u32 stk1 = lt->locked[i].stk;
221 MutexState *mtx1 = getMutex(id1);
222 SpinMutexLock l(&mtx1->mtx);
223 if (mtx1->nlink == kMaxLink) {
224 // FIXME(dvyukov): check stale links
225 continue;
226 }
227 int li = 0;
228 for (; li < mtx1->nlink; li++) {
229 Link *link = &mtx1->link[li];
230 if (link->id == m->id) {
231 if (link->seq != mtx->seq) {
232 link->seq = mtx->seq;
233 link->tid = lt->ctx;
234 link->stk0 = stk1;
235 link->stk1 = cb->Unwind();
236 added = true;
237 VPrintf(3, "#%llu: DD::MutexBeforeLock added %d->%d link\n",
238 cb->lt->ctx, getMutexId(mtx1), m->id);
239 }
240 break;
241 }
242 }
243 if (li == mtx1->nlink) {
244 // FIXME(dvyukov): check stale links
245 Link *link = &mtx1->link[mtx1->nlink++];
246 link->id = m->id;
247 link->seq = mtx->seq;
248 link->tid = lt->ctx;
249 link->stk0 = stk1;
250 link->stk1 = cb->Unwind();
251 added = true;
252 VPrintf(3, "#%llu: DD::MutexBeforeLock added %d->%d link\n",
253 cb->lt->ctx, getMutexId(mtx1), m->id);
254 }
255 }
256
257 if (!added || mtx->nlink == 0) {
258 VPrintf(3, "#%llu: DD::MutexBeforeLock don't check\n",
259 cb->lt->ctx);
260 return;
261 }
262
263 CycleCheck(pt, lt, m);
264}
265
266void DD::MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
267 bool trylock) {
268 VPrintf(2, "#%llu: DD::MutexAfterLock(%p, wlock=%d, try=%d) nlocked=%d\n",
269 cb->lt->ctx, m, wlock, trylock, cb->lt->nlocked);
270 DDLogicalThread *lt = cb->lt;
271
272 uptr owner = atomic_load(&m->owner, memory_order_relaxed);
273 if (owner == (uptr)cb->lt) {
274 VPrintf(3, "#%llu: DD::MutexAfterLock recursive\n", cb->lt->ctx);
275 CHECK(wlock);
276 m->recursion++;
277 return;
278 }
279 CHECK_EQ(owner, 0);
280 if (wlock) {
281 VPrintf(3, "#%llu: DD::MutexAfterLock set owner\n", cb->lt->ctx);
282 CHECK_EQ(m->recursion, 0);
283 m->recursion = 1;
284 atomic_store(&m->owner, (uptr)cb->lt, memory_order_relaxed);
285 }
286
287 if (!trylock)
288 return;
289
290 CHECK_LE(lt->nlocked, kMaxNesting);
291 if (m->id == kNoId)
292 m->id = allocateId(cb);
293 ThreadMutex *tm = &lt->locked[lt->nlocked++];
294 tm->id = m->id;
295 if (flags.second_deadlock_stack)
296 tm->stk = cb->Unwind();
297}
298
299void DD::MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {
300 VPrintf(2, "#%llu: DD::MutexBeforeUnlock(%p, wlock=%d) nlocked=%d\n",
301 cb->lt->ctx, m, wlock, cb->lt->nlocked);
302 DDLogicalThread *lt = cb->lt;
303
304 uptr owner = atomic_load(&m->owner, memory_order_relaxed);
305 if (owner == (uptr)cb->lt) {
306 VPrintf(3, "#%llu: DD::MutexBeforeUnlock recursive\n", cb->lt->ctx);
307 if (--m->recursion > 0)
308 return;
309 VPrintf(3, "#%llu: DD::MutexBeforeUnlock reset owner\n", cb->lt->ctx);
310 atomic_store(&m->owner, 0, memory_order_relaxed);
311 }
312 CHECK_NE(m->id, kNoId);
313 int last = lt->nlocked - 1;
314 for (int i = last; i >= 0; i--) {
315 if (cb->lt->locked[i].id == m->id) {
316 lt->locked[i] = lt->locked[last];
317 lt->nlocked--;
318 break;
319 }
320 }
321}
322
323void DD::MutexDestroy(DDCallback *cb, DDMutex *m) {
324 VPrintf(2, "#%llu: DD::MutexDestroy(%p)\n",
325 cb->lt->ctx, m);
326 DDLogicalThread *lt = cb->lt;
327
328 if (m->id == kNoId)
329 return;
330
331 // Remove the mutex from lt->locked if there.
332 int last = lt->nlocked - 1;
333 for (int i = last; i >= 0; i--) {
334 if (lt->locked[i].id == m->id) {
335 lt->locked[i] = lt->locked[last];
336 lt->nlocked--;
337 break;
338 }
339 }
340
341 // Clear and invalidate the mutex descriptor.
342 {
343 MutexState *mtx = getMutex(m->id);
344 SpinMutexLock l(&mtx->mtx);
345 mtx->seq++;
346 mtx->nlink = 0;
347 }
348
349 // Return id to cache.
350 {
351 SpinMutexLock l(&mtx);
352 free_id.push_back(m->id);
353 }
354}
355
356void DD::CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt,
357 DDMutex *m) {
358 internal_memset(pt->visited, 0, sizeof(pt->visited));
359 int npath = 0;
360 int npending = 0;
361 {
362 MutexState *mtx = getMutex(m->id);
363 SpinMutexLock l(&mtx->mtx);
364 for (int li = 0; li < mtx->nlink; li++)
365 pt->pending[npending++] = mtx->link[li];
366 }
367 while (npending > 0) {
368 Link link = pt->pending[--npending];
369 if (link.id == kEndId) {
370 npath--;
371 continue;
372 }
373 if (pt->visited[link.id])
374 continue;
375 MutexState *mtx1 = getMutex(link.id);
376 SpinMutexLock l(&mtx1->mtx);
377 if (mtx1->seq != link.seq)
378 continue;
379 pt->visited[link.id] = true;
380 if (mtx1->nlink == 0)
381 continue;
382 pt->path[npath++] = link;
383 pt->pending[npending++] = Link(kEndId);
384 if (link.id == m->id)
385 return Report(pt, lt, npath); // Bingo!
386 for (int li = 0; li < mtx1->nlink; li++) {
387 Link *link1 = &mtx1->link[li];
388 // MutexState *mtx2 = getMutex(link->id);
389 // FIXME(dvyukov): fast seq check
390 // FIXME(dvyukov): fast nlink != 0 check
391 // FIXME(dvyukov): fast pending check?
392 // FIXME(dvyukov): npending can be larger than kMaxMutex
393 pt->pending[npending++] = *link1;
394 }
395 }
396}
397
398void DD::Report(DDPhysicalThread *pt, DDLogicalThread *lt, int npath) {
399 DDReport *rep = &pt->rep;
400 rep->n = npath;
401 for (int i = 0; i < npath; i++) {
402 Link *link = &pt->path[i];
403 Link *link0 = &pt->path[i ? i - 1 : npath - 1];
404 rep->loop[i].thr_ctx = link->tid;
405 rep->loop[i].mtx_ctx0 = link0->id;
406 rep->loop[i].mtx_ctx1 = link->id;
407 rep->loop[i].stk[0] = flags.second_deadlock_stack ? link->stk0 : 0;
408 rep->loop[i].stk[1] = link->stk1;
409 }
410 pt->report_pending = true;
411}
412
413DDReport *DD::GetReport(DDCallback *cb) {
414 if (!cb->pt->report_pending)
415 return 0;
416 cb->pt->report_pending = false;
417 return &cb->pt->rep;
418}
419
420} // namespace __sanitizer
421#endif // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2
422

source code of compiler-rt/lib/sanitizer_common/sanitizer_deadlock_detector2.cpp