1 | //===-- tsan_rtl_report.cpp -----------------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file is a part of ThreadSanitizer (TSan), a race detector. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "sanitizer_common/sanitizer_common.h" |
14 | #include "sanitizer_common/sanitizer_libc.h" |
15 | #include "sanitizer_common/sanitizer_placement_new.h" |
16 | #include "sanitizer_common/sanitizer_stackdepot.h" |
17 | #include "sanitizer_common/sanitizer_stacktrace.h" |
18 | #include "tsan_fd.h" |
19 | #include "tsan_flags.h" |
20 | #include "tsan_mman.h" |
21 | #include "tsan_platform.h" |
22 | #include "tsan_report.h" |
23 | #include "tsan_rtl.h" |
24 | #include "tsan_suppressions.h" |
25 | #include "tsan_symbolize.h" |
26 | #include "tsan_sync.h" |
27 | |
28 | namespace __tsan { |
29 | |
30 | using namespace __sanitizer; |
31 | |
32 | static ReportStack *SymbolizeStack(StackTrace trace); |
33 | |
34 | // Can be overriden by an application/test to intercept reports. |
35 | #ifdef TSAN_EXTERNAL_HOOKS |
36 | bool OnReport(const ReportDesc *rep, bool suppressed); |
37 | #else |
38 | SANITIZER_WEAK_CXX_DEFAULT_IMPL |
39 | bool OnReport(const ReportDesc *rep, bool suppressed) { |
40 | (void)rep; |
41 | return suppressed; |
42 | } |
43 | #endif |
44 | |
45 | SANITIZER_WEAK_DEFAULT_IMPL |
46 | void __tsan_on_report(const ReportDesc *rep) { |
47 | (void)rep; |
48 | } |
49 | |
50 | static void StackStripMain(SymbolizedStack *frames) { |
51 | SymbolizedStack *last_frame = nullptr; |
52 | SymbolizedStack *last_frame2 = nullptr; |
53 | for (SymbolizedStack *cur = frames; cur; cur = cur->next) { |
54 | last_frame2 = last_frame; |
55 | last_frame = cur; |
56 | } |
57 | |
58 | if (last_frame2 == 0) |
59 | return; |
60 | #if !SANITIZER_GO |
61 | const char *last = last_frame->info.function; |
62 | const char *last2 = last_frame2->info.function; |
63 | // Strip frame above 'main' |
64 | if (last2 && 0 == internal_strcmp(s1: last2, s2: "main" )) { |
65 | last_frame->ClearAll(); |
66 | last_frame2->next = nullptr; |
67 | // Strip our internal thread start routine. |
68 | } else if (last && 0 == internal_strcmp(s1: last, s2: "__tsan_thread_start_func" )) { |
69 | last_frame->ClearAll(); |
70 | last_frame2->next = nullptr; |
71 | // Strip global ctors init, .preinit_array and main caller. |
72 | } else if (last && (0 == internal_strcmp(s1: last, s2: "__do_global_ctors_aux" ) || |
73 | 0 == internal_strcmp(s1: last, s2: "__libc_csu_init" ) || |
74 | 0 == internal_strcmp(s1: last, s2: "__libc_start_main" ))) { |
75 | last_frame->ClearAll(); |
76 | last_frame2->next = nullptr; |
77 | // If both are 0, then we probably just failed to symbolize. |
78 | } else if (last || last2) { |
79 | // Ensure that we recovered stack completely. Trimmed stack |
80 | // can actually happen if we do not instrument some code, |
81 | // so it's only a debug print. However we must try hard to not miss it |
82 | // due to our fault. |
83 | DPrintf("Bottom stack frame is missed\n" ); |
84 | } |
85 | #else |
86 | // The last frame always point into runtime (gosched0, goexit0, runtime.main). |
87 | last_frame->ClearAll(); |
88 | last_frame2->next = nullptr; |
89 | #endif |
90 | } |
91 | |
92 | ReportStack *SymbolizeStackId(u32 stack_id) { |
93 | if (stack_id == 0) |
94 | return 0; |
95 | StackTrace stack = StackDepotGet(id: stack_id); |
96 | if (stack.trace == nullptr) |
97 | return nullptr; |
98 | return SymbolizeStack(trace: stack); |
99 | } |
100 | |
101 | static ReportStack *SymbolizeStack(StackTrace trace) { |
102 | if (trace.size == 0) |
103 | return 0; |
104 | SymbolizedStack *top = nullptr; |
105 | for (uptr si = 0; si < trace.size; si++) { |
106 | const uptr pc = trace.trace[si]; |
107 | uptr pc1 = pc; |
108 | // We obtain the return address, but we're interested in the previous |
109 | // instruction. |
110 | if ((pc & kExternalPCBit) == 0) |
111 | pc1 = StackTrace::GetPreviousInstructionPc(pc); |
112 | SymbolizedStack *ent = SymbolizeCode(addr: pc1); |
113 | CHECK_NE(ent, 0); |
114 | SymbolizedStack *last = ent; |
115 | while (last->next) { |
116 | last->info.address = pc; // restore original pc for report |
117 | last = last->next; |
118 | } |
119 | last->info.address = pc; // restore original pc for report |
120 | last->next = top; |
121 | top = ent; |
122 | } |
123 | StackStripMain(frames: top); |
124 | |
125 | auto *stack = New<ReportStack>(); |
126 | stack->frames = top; |
127 | return stack; |
128 | } |
129 | |
130 | bool ShouldReport(ThreadState *thr, ReportType typ) { |
131 | // We set thr->suppress_reports in the fork context. |
132 | // Taking any locking in the fork context can lead to deadlocks. |
133 | // If any locks are already taken, it's too late to do this check. |
134 | CheckedMutex::CheckNoLocks(); |
135 | // For the same reason check we didn't lock thread_registry yet. |
136 | if (SANITIZER_DEBUG) |
137 | ThreadRegistryLock l(&ctx->thread_registry); |
138 | if (!flags()->report_bugs || thr->suppress_reports) |
139 | return false; |
140 | switch (typ) { |
141 | case ReportTypeSignalUnsafe: |
142 | return flags()->report_signal_unsafe; |
143 | case ReportTypeThreadLeak: |
144 | #if !SANITIZER_GO |
145 | // It's impossible to join phantom threads |
146 | // in the child after fork. |
147 | if (ctx->after_multithreaded_fork) |
148 | return false; |
149 | #endif |
150 | return flags()->report_thread_leaks; |
151 | case ReportTypeMutexDestroyLocked: |
152 | return flags()->report_destroy_locked; |
153 | default: |
154 | return true; |
155 | } |
156 | } |
157 | |
158 | ScopedReportBase::ScopedReportBase(ReportType typ, uptr tag) { |
159 | ctx->thread_registry.CheckLocked(); |
160 | rep_ = New<ReportDesc>(); |
161 | rep_->typ = typ; |
162 | rep_->tag = tag; |
163 | ctx->report_mtx.Lock(); |
164 | } |
165 | |
166 | ScopedReportBase::~ScopedReportBase() { |
167 | ctx->report_mtx.Unlock(); |
168 | DestroyAndFree(p&: rep_); |
169 | } |
170 | |
171 | void ScopedReportBase::AddStack(StackTrace stack, bool suppressable) { |
172 | ReportStack **rs = rep_->stacks.PushBack(); |
173 | *rs = SymbolizeStack(trace: stack); |
174 | (*rs)->suppressable = suppressable; |
175 | } |
176 | |
177 | void ScopedReportBase::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, |
178 | Tid tid, StackTrace stack, |
179 | const MutexSet *mset) { |
180 | uptr addr0, size; |
181 | AccessType typ; |
182 | s.GetAccess(addr: &addr0, size: &size, typ: &typ); |
183 | auto *mop = New<ReportMop>(); |
184 | rep_->mops.PushBack(v: mop); |
185 | mop->tid = tid; |
186 | mop->addr = addr + addr0; |
187 | mop->size = size; |
188 | mop->write = !(typ & kAccessRead); |
189 | mop->atomic = typ & kAccessAtomic; |
190 | mop->stack = SymbolizeStack(trace: stack); |
191 | mop->external_tag = external_tag; |
192 | if (mop->stack) |
193 | mop->stack->suppressable = true; |
194 | for (uptr i = 0; i < mset->Size(); i++) { |
195 | MutexSet::Desc d = mset->Get(i); |
196 | int id = this->AddMutex(addr: d.addr, creation_stack_id: d.stack_id); |
197 | ReportMopMutex mtx = {.id: id, .write: d.write}; |
198 | mop->mset.PushBack(v: mtx); |
199 | } |
200 | } |
201 | |
202 | void ScopedReportBase::AddUniqueTid(Tid unique_tid) { |
203 | rep_->unique_tids.PushBack(v: unique_tid); |
204 | } |
205 | |
206 | void ScopedReportBase::AddThread(const ThreadContext *tctx, bool suppressable) { |
207 | for (uptr i = 0; i < rep_->threads.Size(); i++) { |
208 | if ((u32)rep_->threads[i]->id == tctx->tid) |
209 | return; |
210 | } |
211 | auto *rt = New<ReportThread>(); |
212 | rep_->threads.PushBack(v: rt); |
213 | rt->id = tctx->tid; |
214 | rt->os_id = tctx->os_id; |
215 | rt->running = (tctx->status == ThreadStatusRunning); |
216 | rt->name = internal_strdup(s: tctx->name); |
217 | rt->parent_tid = tctx->parent_tid; |
218 | rt->thread_type = tctx->thread_type; |
219 | rt->stack = 0; |
220 | rt->stack = SymbolizeStackId(stack_id: tctx->creation_stack_id); |
221 | if (rt->stack) |
222 | rt->stack->suppressable = suppressable; |
223 | } |
224 | |
225 | #if !SANITIZER_GO |
226 | static ThreadContext *FindThreadByTidLocked(Tid tid) { |
227 | ctx->thread_registry.CheckLocked(); |
228 | return static_cast<ThreadContext *>( |
229 | ctx->thread_registry.GetThreadLocked(tid)); |
230 | } |
231 | |
232 | static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) { |
233 | uptr addr = (uptr)arg; |
234 | ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base); |
235 | if (tctx->status != ThreadStatusRunning) |
236 | return false; |
237 | ThreadState *thr = tctx->thr; |
238 | CHECK(thr); |
239 | return ((addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) || |
240 | (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size)); |
241 | } |
242 | |
243 | ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) { |
244 | ctx->thread_registry.CheckLocked(); |
245 | ThreadContext *tctx = |
246 | static_cast<ThreadContext *>(ctx->thread_registry.FindThreadContextLocked( |
247 | cb: IsInStackOrTls, arg: (void *)addr)); |
248 | if (!tctx) |
249 | return 0; |
250 | ThreadState *thr = tctx->thr; |
251 | CHECK(thr); |
252 | *is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size); |
253 | return tctx; |
254 | } |
255 | #endif |
256 | |
257 | void ScopedReportBase::AddThread(Tid tid, bool suppressable) { |
258 | #if !SANITIZER_GO |
259 | if (const ThreadContext *tctx = FindThreadByTidLocked(tid)) |
260 | AddThread(tctx, suppressable); |
261 | #endif |
262 | } |
263 | |
264 | int ScopedReportBase::AddMutex(uptr addr, StackID creation_stack_id) { |
265 | for (uptr i = 0; i < rep_->mutexes.Size(); i++) { |
266 | if (rep_->mutexes[i]->addr == addr) |
267 | return rep_->mutexes[i]->id; |
268 | } |
269 | auto *rm = New<ReportMutex>(); |
270 | rep_->mutexes.PushBack(v: rm); |
271 | rm->id = rep_->mutexes.Size() - 1; |
272 | rm->addr = addr; |
273 | rm->stack = SymbolizeStackId(stack_id: creation_stack_id); |
274 | return rm->id; |
275 | } |
276 | |
277 | void ScopedReportBase::AddLocation(uptr addr, uptr size) { |
278 | if (addr == 0) |
279 | return; |
280 | #if !SANITIZER_GO |
281 | int fd = -1; |
282 | Tid creat_tid = kInvalidTid; |
283 | StackID creat_stack = 0; |
284 | bool closed = false; |
285 | if (FdLocation(addr, fd: &fd, tid: &creat_tid, stack: &creat_stack, closed: &closed)) { |
286 | auto *loc = New<ReportLocation>(); |
287 | loc->type = ReportLocationFD; |
288 | loc->fd_closed = closed; |
289 | loc->fd = fd; |
290 | loc->tid = creat_tid; |
291 | loc->stack = SymbolizeStackId(stack_id: creat_stack); |
292 | rep_->locs.PushBack(v: loc); |
293 | AddThread(tid: creat_tid); |
294 | return; |
295 | } |
296 | MBlock *b = 0; |
297 | uptr block_begin = 0; |
298 | Allocator *a = allocator(); |
299 | if (a->PointerIsMine(p: (void*)addr)) { |
300 | block_begin = (uptr)a->GetBlockBegin(p: (void *)addr); |
301 | if (block_begin) |
302 | b = ctx->metamap.GetBlock(p: block_begin); |
303 | } |
304 | if (!b) |
305 | b = JavaHeapBlock(addr, start: &block_begin); |
306 | if (b != 0) { |
307 | auto *loc = New<ReportLocation>(); |
308 | loc->type = ReportLocationHeap; |
309 | loc->heap_chunk_start = block_begin; |
310 | loc->heap_chunk_size = b->siz; |
311 | loc->external_tag = b->tag; |
312 | loc->tid = b->tid; |
313 | loc->stack = SymbolizeStackId(stack_id: b->stk); |
314 | rep_->locs.PushBack(v: loc); |
315 | AddThread(tid: b->tid); |
316 | return; |
317 | } |
318 | bool is_stack = false; |
319 | if (ThreadContext *tctx = IsThreadStackOrTls(addr, is_stack: &is_stack)) { |
320 | auto *loc = New<ReportLocation>(); |
321 | loc->type = is_stack ? ReportLocationStack : ReportLocationTLS; |
322 | loc->tid = tctx->tid; |
323 | rep_->locs.PushBack(v: loc); |
324 | AddThread(tctx); |
325 | } |
326 | #endif |
327 | if (ReportLocation *loc = SymbolizeData(addr)) { |
328 | loc->suppressable = true; |
329 | rep_->locs.PushBack(v: loc); |
330 | return; |
331 | } |
332 | } |
333 | |
334 | #if !SANITIZER_GO |
335 | void ScopedReportBase::AddSleep(StackID stack_id) { |
336 | rep_->sleep = SymbolizeStackId(stack_id); |
337 | } |
338 | #endif |
339 | |
340 | void ScopedReportBase::SetCount(int count) { rep_->count = count; } |
341 | |
342 | void ScopedReportBase::SetSigNum(int sig) { rep_->signum = sig; } |
343 | |
344 | const ReportDesc *ScopedReportBase::GetReport() const { return rep_; } |
345 | |
346 | ScopedReport::ScopedReport(ReportType typ, uptr tag) |
347 | : ScopedReportBase(typ, tag) {} |
348 | |
349 | ScopedReport::~ScopedReport() {} |
350 | |
351 | // Replays the trace up to last_pos position in the last part |
352 | // or up to the provided epoch/sid (whichever is earlier) |
353 | // and calls the provided function f for each event. |
354 | template <typename Func> |
355 | void TraceReplay(Trace *trace, TracePart *last, Event *last_pos, Sid sid, |
356 | Epoch epoch, Func f) { |
357 | TracePart *part = trace->parts.Front(); |
358 | Sid ev_sid = kFreeSid; |
359 | Epoch ev_epoch = kEpochOver; |
360 | for (;;) { |
361 | DCHECK_EQ(part->trace, trace); |
362 | // Note: an event can't start in the last element. |
363 | // Since an event can take up to 2 elements, |
364 | // we ensure we have at least 2 before adding an event. |
365 | Event *end = &part->events[TracePart::kSize - 1]; |
366 | if (part == last) |
367 | end = last_pos; |
368 | f(kFreeSid, kEpochOver, nullptr); // notify about part start |
369 | for (Event *evp = &part->events[0]; evp < end; evp++) { |
370 | Event *evp0 = evp; |
371 | if (!evp->is_access && !evp->is_func) { |
372 | switch (evp->type) { |
373 | case EventType::kTime: { |
374 | auto *ev = reinterpret_cast<EventTime *>(evp); |
375 | ev_sid = static_cast<Sid>(ev->sid); |
376 | ev_epoch = static_cast<Epoch>(ev->epoch); |
377 | if (ev_sid == sid && ev_epoch > epoch) |
378 | return; |
379 | break; |
380 | } |
381 | case EventType::kAccessExt: |
382 | FALLTHROUGH; |
383 | case EventType::kAccessRange: |
384 | FALLTHROUGH; |
385 | case EventType::kLock: |
386 | FALLTHROUGH; |
387 | case EventType::kRLock: |
388 | // These take 2 Event elements. |
389 | evp++; |
390 | break; |
391 | case EventType::kUnlock: |
392 | // This takes 1 Event element. |
393 | break; |
394 | } |
395 | } |
396 | CHECK_NE(ev_sid, kFreeSid); |
397 | CHECK_NE(ev_epoch, kEpochOver); |
398 | f(ev_sid, ev_epoch, evp0); |
399 | } |
400 | if (part == last) |
401 | return; |
402 | part = trace->parts.Next(e: part); |
403 | CHECK(part); |
404 | } |
405 | CHECK(0); |
406 | } |
407 | |
408 | static void RestoreStackMatch(VarSizeStackTrace *pstk, MutexSet *pmset, |
409 | Vector<uptr> *stack, MutexSet *mset, uptr pc, |
410 | bool *found) { |
411 | DPrintf2(" MATCHED\n" ); |
412 | *pmset = *mset; |
413 | stack->PushBack(v: pc); |
414 | pstk->Init(pcs: &(*stack)[0], cnt: stack->Size()); |
415 | stack->PopBack(); |
416 | *found = true; |
417 | } |
418 | |
419 | // Checks if addr1|size1 is fully contained in addr2|size2. |
420 | // We check for fully contained instread of just overlapping |
421 | // because a memory access is always traced once, but can be |
422 | // split into multiple accesses in the shadow. |
423 | static constexpr bool IsWithinAccess(uptr addr1, uptr size1, uptr addr2, |
424 | uptr size2) { |
425 | return addr1 >= addr2 && addr1 + size1 <= addr2 + size2; |
426 | } |
427 | |
428 | // Replays the trace of slot sid up to the target event identified |
429 | // by epoch/addr/size/typ and restores and returns tid, stack, mutex set |
430 | // and tag for that event. If there are multiple such events, it returns |
431 | // the last one. Returns false if the event is not present in the trace. |
432 | bool RestoreStack(EventType type, Sid sid, Epoch epoch, uptr addr, uptr size, |
433 | AccessType typ, Tid *ptid, VarSizeStackTrace *pstk, |
434 | MutexSet *pmset, uptr *ptag) { |
435 | // This function restores stack trace and mutex set for the thread/epoch. |
436 | // It does so by getting stack trace and mutex set at the beginning of |
437 | // trace part, and then replaying the trace till the given epoch. |
438 | DPrintf2("RestoreStack: sid=%u@%u addr=0x%zx/%zu typ=%x\n" , |
439 | static_cast<int>(sid), static_cast<int>(epoch), addr, size, |
440 | static_cast<int>(typ)); |
441 | ctx->slot_mtx.CheckLocked(); // needed to prevent trace part recycling |
442 | ctx->thread_registry.CheckLocked(); |
443 | TidSlot *slot = &ctx->slots[static_cast<uptr>(sid)]; |
444 | Tid tid = kInvalidTid; |
445 | // Need to lock the slot mutex as it protects slot->journal. |
446 | slot->mtx.CheckLocked(); |
447 | for (uptr i = 0; i < slot->journal.Size(); i++) { |
448 | DPrintf2(" journal: epoch=%d tid=%d\n" , |
449 | static_cast<int>(slot->journal[i].epoch), slot->journal[i].tid); |
450 | if (i == slot->journal.Size() - 1 || slot->journal[i + 1].epoch > epoch) { |
451 | tid = slot->journal[i].tid; |
452 | break; |
453 | } |
454 | } |
455 | if (tid == kInvalidTid) |
456 | return false; |
457 | *ptid = tid; |
458 | ThreadContext *tctx = |
459 | static_cast<ThreadContext *>(ctx->thread_registry.GetThreadLocked(tid)); |
460 | Trace *trace = &tctx->trace; |
461 | // Snapshot first/last parts and the current position in the last part. |
462 | TracePart *first_part; |
463 | TracePart *last_part; |
464 | Event *last_pos; |
465 | { |
466 | Lock lock(&trace->mtx); |
467 | first_part = trace->parts.Front(); |
468 | if (!first_part) { |
469 | DPrintf2("RestoreStack: tid=%d trace=%p no trace parts\n" , tid, trace); |
470 | return false; |
471 | } |
472 | last_part = trace->parts.Back(); |
473 | last_pos = trace->final_pos; |
474 | if (tctx->thr) |
475 | last_pos = (Event *)atomic_load_relaxed(a: &tctx->thr->trace_pos); |
476 | } |
477 | DynamicMutexSet mset; |
478 | Vector<uptr> stack; |
479 | uptr prev_pc = 0; |
480 | bool found = false; |
481 | bool is_read = typ & kAccessRead; |
482 | bool is_atomic = typ & kAccessAtomic; |
483 | bool is_free = typ & kAccessFree; |
484 | DPrintf2("RestoreStack: tid=%d parts=[%p-%p] last_pos=%p\n" , tid, |
485 | trace->parts.Front(), last_part, last_pos); |
486 | TraceReplay( |
487 | trace, last: last_part, last_pos, sid, epoch, |
488 | f: [&](Sid ev_sid, Epoch ev_epoch, Event *evp) { |
489 | if (evp == nullptr) { |
490 | // Each trace part is self-consistent, so we reset state. |
491 | stack.Resize(size: 0); |
492 | mset->Reset(); |
493 | prev_pc = 0; |
494 | return; |
495 | } |
496 | bool match = ev_sid == sid && ev_epoch == epoch; |
497 | if (evp->is_access) { |
498 | if (evp->is_func == 0 && evp->type == EventType::kAccessExt && |
499 | evp->_ == 0) // NopEvent |
500 | return; |
501 | auto *ev = reinterpret_cast<EventAccess *>(evp); |
502 | uptr ev_addr = RestoreAddr(addr: ev->addr); |
503 | uptr ev_size = 1 << ev->size_log; |
504 | uptr ev_pc = |
505 | prev_pc + ev->pc_delta - (1 << (EventAccess::kPCBits - 1)); |
506 | prev_pc = ev_pc; |
507 | DPrintf2(" Access: pc=0x%zx addr=0x%zx/%zu type=%u/%u\n" , ev_pc, |
508 | ev_addr, ev_size, ev->is_read, ev->is_atomic); |
509 | if (match && type == EventType::kAccessExt && |
510 | IsWithinAccess(addr1: addr, size1: size, addr2: ev_addr, size2: ev_size) && |
511 | is_read == ev->is_read && is_atomic == ev->is_atomic && !is_free) |
512 | RestoreStackMatch(pstk, pmset, stack: &stack, mset, pc: ev_pc, found: &found); |
513 | return; |
514 | } |
515 | if (evp->is_func) { |
516 | auto *ev = reinterpret_cast<EventFunc *>(evp); |
517 | if (ev->pc) { |
518 | DPrintf2(" FuncEnter: pc=0x%llx\n" , ev->pc); |
519 | stack.PushBack(v: ev->pc); |
520 | } else { |
521 | DPrintf2(" FuncExit\n" ); |
522 | // We don't log pathologically large stacks in each part, |
523 | // if the stack was truncated we can have more func exits than |
524 | // entries. |
525 | if (stack.Size()) |
526 | stack.PopBack(); |
527 | } |
528 | return; |
529 | } |
530 | switch (evp->type) { |
531 | case EventType::kAccessExt: { |
532 | auto *ev = reinterpret_cast<EventAccessExt *>(evp); |
533 | uptr ev_addr = RestoreAddr(addr: ev->addr); |
534 | uptr ev_size = 1 << ev->size_log; |
535 | prev_pc = ev->pc; |
536 | DPrintf2(" AccessExt: pc=0x%llx addr=0x%zx/%zu type=%u/%u\n" , |
537 | ev->pc, ev_addr, ev_size, ev->is_read, ev->is_atomic); |
538 | if (match && type == EventType::kAccessExt && |
539 | IsWithinAccess(addr1: addr, size1: size, addr2: ev_addr, size2: ev_size) && |
540 | is_read == ev->is_read && is_atomic == ev->is_atomic && |
541 | !is_free) |
542 | RestoreStackMatch(pstk, pmset, stack: &stack, mset, pc: ev->pc, found: &found); |
543 | break; |
544 | } |
545 | case EventType::kAccessRange: { |
546 | auto *ev = reinterpret_cast<EventAccessRange *>(evp); |
547 | uptr ev_addr = RestoreAddr(addr: ev->addr); |
548 | uptr ev_size = |
549 | (ev->size_hi << EventAccessRange::kSizeLoBits) + ev->size_lo; |
550 | uptr ev_pc = RestoreAddr(addr: ev->pc); |
551 | prev_pc = ev_pc; |
552 | DPrintf2(" Range: pc=0x%zx addr=0x%zx/%zu type=%u/%u\n" , ev_pc, |
553 | ev_addr, ev_size, ev->is_read, ev->is_free); |
554 | if (match && type == EventType::kAccessExt && |
555 | IsWithinAccess(addr1: addr, size1: size, addr2: ev_addr, size2: ev_size) && |
556 | is_read == ev->is_read && !is_atomic && is_free == ev->is_free) |
557 | RestoreStackMatch(pstk, pmset, stack: &stack, mset, pc: ev_pc, found: &found); |
558 | break; |
559 | } |
560 | case EventType::kLock: |
561 | FALLTHROUGH; |
562 | case EventType::kRLock: { |
563 | auto *ev = reinterpret_cast<EventLock *>(evp); |
564 | bool is_write = ev->type == EventType::kLock; |
565 | uptr ev_addr = RestoreAddr(addr: ev->addr); |
566 | uptr ev_pc = RestoreAddr(addr: ev->pc); |
567 | StackID stack_id = |
568 | (ev->stack_hi << EventLock::kStackIDLoBits) + ev->stack_lo; |
569 | DPrintf2(" Lock: pc=0x%zx addr=0x%zx stack=%u write=%d\n" , ev_pc, |
570 | ev_addr, stack_id, is_write); |
571 | mset->AddAddr(addr: ev_addr, stack_id, write: is_write); |
572 | // Events with ev_pc == 0 are written to the beginning of trace |
573 | // part as initial mutex set (are not real). |
574 | if (match && type == EventType::kLock && addr == ev_addr && ev_pc) |
575 | RestoreStackMatch(pstk, pmset, stack: &stack, mset, pc: ev_pc, found: &found); |
576 | break; |
577 | } |
578 | case EventType::kUnlock: { |
579 | auto *ev = reinterpret_cast<EventUnlock *>(evp); |
580 | uptr ev_addr = RestoreAddr(addr: ev->addr); |
581 | DPrintf2(" Unlock: addr=0x%zx\n" , ev_addr); |
582 | mset->DelAddr(addr: ev_addr); |
583 | break; |
584 | } |
585 | case EventType::kTime: |
586 | // TraceReplay already extracted sid/epoch from it, |
587 | // nothing else to do here. |
588 | break; |
589 | } |
590 | }); |
591 | ExtractTagFromStack(stack: pstk, tag: ptag); |
592 | return found; |
593 | } |
594 | |
595 | bool RacyStacks::operator==(const RacyStacks &other) const { |
596 | if (hash[0] == other.hash[0] && hash[1] == other.hash[1]) |
597 | return true; |
598 | if (hash[0] == other.hash[1] && hash[1] == other.hash[0]) |
599 | return true; |
600 | return false; |
601 | } |
602 | |
603 | static bool FindRacyStacks(const RacyStacks &hash) { |
604 | for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) { |
605 | if (hash == ctx->racy_stacks[i]) { |
606 | VPrintf(2, "ThreadSanitizer: suppressing report as doubled (stack)\n" ); |
607 | return true; |
608 | } |
609 | } |
610 | return false; |
611 | } |
612 | |
613 | static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2]) { |
614 | if (!flags()->suppress_equal_stacks) |
615 | return false; |
616 | RacyStacks hash; |
617 | hash.hash[0] = md5_hash(data: traces[0].trace, size: traces[0].size * sizeof(uptr)); |
618 | hash.hash[1] = md5_hash(data: traces[1].trace, size: traces[1].size * sizeof(uptr)); |
619 | { |
620 | ReadLock lock(&ctx->racy_mtx); |
621 | if (FindRacyStacks(hash)) |
622 | return true; |
623 | } |
624 | Lock lock(&ctx->racy_mtx); |
625 | if (FindRacyStacks(hash)) |
626 | return true; |
627 | ctx->racy_stacks.PushBack(v: hash); |
628 | return false; |
629 | } |
630 | |
631 | bool OutputReport(ThreadState *thr, const ScopedReport &srep) { |
632 | // These should have been checked in ShouldReport. |
633 | // It's too late to check them here, we have already taken locks. |
634 | CHECK(flags()->report_bugs); |
635 | CHECK(!thr->suppress_reports); |
636 | atomic_store_relaxed(a: &ctx->last_symbolize_time_ns, v: NanoTime()); |
637 | const ReportDesc *rep = srep.GetReport(); |
638 | CHECK_EQ(thr->current_report, nullptr); |
639 | thr->current_report = rep; |
640 | Suppression *supp = 0; |
641 | uptr pc_or_addr = 0; |
642 | for (uptr i = 0; pc_or_addr == 0 && i < rep->mops.Size(); i++) |
643 | pc_or_addr = IsSuppressed(typ: rep->typ, stack: rep->mops[i]->stack, sp: &supp); |
644 | for (uptr i = 0; pc_or_addr == 0 && i < rep->stacks.Size(); i++) |
645 | pc_or_addr = IsSuppressed(typ: rep->typ, stack: rep->stacks[i], sp: &supp); |
646 | for (uptr i = 0; pc_or_addr == 0 && i < rep->threads.Size(); i++) |
647 | pc_or_addr = IsSuppressed(typ: rep->typ, stack: rep->threads[i]->stack, sp: &supp); |
648 | for (uptr i = 0; pc_or_addr == 0 && i < rep->locs.Size(); i++) |
649 | pc_or_addr = IsSuppressed(typ: rep->typ, loc: rep->locs[i], sp: &supp); |
650 | if (pc_or_addr != 0) { |
651 | Lock lock(&ctx->fired_suppressions_mtx); |
652 | FiredSuppression s = {.type: srep.GetReport()->typ, .pc_or_addr: pc_or_addr, .supp: supp}; |
653 | ctx->fired_suppressions.push_back(element: s); |
654 | } |
655 | { |
656 | bool suppressed = OnReport(rep, suppressed: pc_or_addr != 0); |
657 | if (suppressed) { |
658 | thr->current_report = nullptr; |
659 | return false; |
660 | } |
661 | } |
662 | PrintReport(rep); |
663 | __tsan_on_report(rep); |
664 | ctx->nreported++; |
665 | if (flags()->halt_on_error) |
666 | Die(); |
667 | thr->current_report = nullptr; |
668 | return true; |
669 | } |
670 | |
671 | bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace) { |
672 | ReadLock lock(&ctx->fired_suppressions_mtx); |
673 | for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) { |
674 | if (ctx->fired_suppressions[k].type != type) |
675 | continue; |
676 | for (uptr j = 0; j < trace.size; j++) { |
677 | FiredSuppression *s = &ctx->fired_suppressions[k]; |
678 | if (trace.trace[j] == s->pc_or_addr) { |
679 | if (s->supp) |
680 | atomic_fetch_add(a: &s->supp->hit_count, v: 1, mo: memory_order_relaxed); |
681 | return true; |
682 | } |
683 | } |
684 | } |
685 | return false; |
686 | } |
687 | |
688 | static bool IsFiredSuppression(Context *ctx, ReportType type, uptr addr) { |
689 | ReadLock lock(&ctx->fired_suppressions_mtx); |
690 | for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) { |
691 | if (ctx->fired_suppressions[k].type != type) |
692 | continue; |
693 | FiredSuppression *s = &ctx->fired_suppressions[k]; |
694 | if (addr == s->pc_or_addr) { |
695 | if (s->supp) |
696 | atomic_fetch_add(a: &s->supp->hit_count, v: 1, mo: memory_order_relaxed); |
697 | return true; |
698 | } |
699 | } |
700 | return false; |
701 | } |
702 | |
703 | static bool SpuriousRace(Shadow old) { |
704 | Shadow last(LoadShadow(p: &ctx->last_spurious_race)); |
705 | return last.sid() == old.sid() && last.epoch() == old.epoch(); |
706 | } |
707 | |
708 | void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old, |
709 | AccessType typ0) { |
710 | CheckedMutex::CheckNoLocks(); |
711 | |
712 | // Symbolizer makes lots of intercepted calls. If we try to process them, |
713 | // at best it will cause deadlocks on internal mutexes. |
714 | ScopedIgnoreInterceptors ignore; |
715 | |
716 | uptr addr = ShadowToMem(s: shadow_mem); |
717 | DPrintf("#%d: ReportRace %p\n" , thr->tid, (void *)addr); |
718 | if (!ShouldReport(thr, typ: ReportTypeRace)) |
719 | return; |
720 | uptr addr_off0, size0; |
721 | cur.GetAccess(addr: &addr_off0, size: &size0, typ: nullptr); |
722 | uptr addr_off1, size1, typ1; |
723 | old.GetAccess(addr: &addr_off1, size: &size1, typ: &typ1); |
724 | if (!flags()->report_atomic_races && |
725 | ((typ0 & kAccessAtomic) || (typ1 & kAccessAtomic)) && |
726 | !(typ0 & kAccessFree) && !(typ1 & kAccessFree)) |
727 | return; |
728 | if (SpuriousRace(old)) |
729 | return; |
730 | |
731 | const uptr kMop = 2; |
732 | Shadow s[kMop] = {cur, old}; |
733 | uptr addr0 = addr + addr_off0; |
734 | uptr addr1 = addr + addr_off1; |
735 | uptr end0 = addr0 + size0; |
736 | uptr end1 = addr1 + size1; |
737 | uptr addr_min = min(a: addr0, b: addr1); |
738 | uptr addr_max = max(a: end0, b: end1); |
739 | if (IsExpectedReport(addr: addr_min, size: addr_max - addr_min)) |
740 | return; |
741 | |
742 | ReportType rep_typ = ReportTypeRace; |
743 | if ((typ0 & kAccessVptr) && (typ1 & kAccessFree)) |
744 | rep_typ = ReportTypeVptrUseAfterFree; |
745 | else if (typ0 & kAccessVptr) |
746 | rep_typ = ReportTypeVptrRace; |
747 | else if (typ1 & kAccessFree) |
748 | rep_typ = ReportTypeUseAfterFree; |
749 | |
750 | if (IsFiredSuppression(ctx, type: rep_typ, addr)) |
751 | return; |
752 | |
753 | VarSizeStackTrace traces[kMop]; |
754 | Tid tids[kMop] = {thr->tid, kInvalidTid}; |
755 | uptr tags[kMop] = {kExternalTagNone, kExternalTagNone}; |
756 | |
757 | ObtainCurrentStack(thr, toppc: thr->trace_prev_pc, stack: &traces[0], tag: &tags[0]); |
758 | if (IsFiredSuppression(ctx, type: rep_typ, trace: traces[0])) |
759 | return; |
760 | |
761 | DynamicMutexSet mset1; |
762 | MutexSet *mset[kMop] = {&thr->mset, mset1}; |
763 | |
764 | // We need to lock the slot during RestoreStack because it protects |
765 | // the slot journal. |
766 | Lock slot_lock(&ctx->slots[static_cast<uptr>(s[1].sid())].mtx); |
767 | ThreadRegistryLock l0(&ctx->thread_registry); |
768 | Lock slots_lock(&ctx->slot_mtx); |
769 | if (SpuriousRace(old)) |
770 | return; |
771 | if (!RestoreStack(type: EventType::kAccessExt, sid: s[1].sid(), epoch: s[1].epoch(), addr: addr1, |
772 | size: size1, typ: typ1, ptid: &tids[1], pstk: &traces[1], pmset: mset[1], ptag: &tags[1])) { |
773 | StoreShadow(sp: &ctx->last_spurious_race, s: old.raw()); |
774 | return; |
775 | } |
776 | |
777 | if (IsFiredSuppression(ctx, type: rep_typ, trace: traces[1])) |
778 | return; |
779 | |
780 | if (HandleRacyStacks(thr, traces)) |
781 | return; |
782 | |
783 | // If any of the accesses has a tag, treat this as an "external" race. |
784 | uptr tag = kExternalTagNone; |
785 | for (uptr i = 0; i < kMop; i++) { |
786 | if (tags[i] != kExternalTagNone) { |
787 | rep_typ = ReportTypeExternalRace; |
788 | tag = tags[i]; |
789 | break; |
790 | } |
791 | } |
792 | |
793 | ScopedReport rep(rep_typ, tag); |
794 | for (uptr i = 0; i < kMop; i++) |
795 | rep.AddMemoryAccess(addr, external_tag: tags[i], s: s[i], tid: tids[i], stack: traces[i], mset: mset[i]); |
796 | |
797 | for (uptr i = 0; i < kMop; i++) { |
798 | ThreadContext *tctx = static_cast<ThreadContext *>( |
799 | ctx->thread_registry.GetThreadLocked(tid: tids[i])); |
800 | rep.AddThread(tctx); |
801 | } |
802 | |
803 | rep.AddLocation(addr: addr_min, size: addr_max - addr_min); |
804 | |
805 | if (flags()->print_full_thread_history) { |
806 | const ReportDesc *rep_desc = rep.GetReport(); |
807 | for (uptr i = 0; i < rep_desc->threads.Size(); i++) { |
808 | Tid parent_tid = rep_desc->threads[i]->parent_tid; |
809 | if (parent_tid == kMainTid || parent_tid == kInvalidTid) |
810 | continue; |
811 | ThreadContext *parent_tctx = static_cast<ThreadContext *>( |
812 | ctx->thread_registry.GetThreadLocked(tid: parent_tid)); |
813 | rep.AddThread(tctx: parent_tctx); |
814 | } |
815 | } |
816 | |
817 | #if !SANITIZER_GO |
818 | if (!((typ0 | typ1) & kAccessFree) && |
819 | s[1].epoch() <= thr->last_sleep_clock.Get(sid: s[1].sid())) |
820 | rep.AddSleep(stack_id: thr->last_sleep_stack_id); |
821 | #endif |
822 | OutputReport(thr, srep: rep); |
823 | } |
824 | |
825 | void PrintCurrentStack(ThreadState *thr, uptr pc) { |
826 | VarSizeStackTrace trace; |
827 | ObtainCurrentStack(thr, toppc: pc, stack: &trace); |
828 | PrintStack(stack: SymbolizeStack(trace)); |
829 | } |
830 | |
831 | // Always inlining PrintCurrentStackSlow, because LocatePcInTrace assumes |
832 | // __sanitizer_print_stack_trace exists in the actual unwinded stack, but |
833 | // tail-call to PrintCurrentStackSlow breaks this assumption because |
834 | // __sanitizer_print_stack_trace disappears after tail-call. |
835 | // However, this solution is not reliable enough, please see dvyukov's comment |
836 | // http://reviews.llvm.org/D19148#406208 |
837 | // Also see PR27280 comment 2 and 3 for breaking examples and analysis. |
838 | ALWAYS_INLINE USED void PrintCurrentStackSlow(uptr pc) { |
839 | #if !SANITIZER_GO |
840 | uptr bp = GET_CURRENT_FRAME(); |
841 | auto *ptrace = New<BufferedStackTrace>(); |
842 | ptrace->Unwind(pc, bp, context: nullptr, request_fast: false); |
843 | |
844 | for (uptr i = 0; i < ptrace->size / 2; i++) { |
845 | uptr tmp = ptrace->trace_buffer[i]; |
846 | ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1]; |
847 | ptrace->trace_buffer[ptrace->size - i - 1] = tmp; |
848 | } |
849 | PrintStack(stack: SymbolizeStack(trace: *ptrace)); |
850 | #endif |
851 | } |
852 | |
853 | } // namespace __tsan |
854 | |
855 | using namespace __tsan; |
856 | |
857 | extern "C" { |
858 | SANITIZER_INTERFACE_ATTRIBUTE |
859 | void __sanitizer_print_stack_trace() { |
860 | PrintCurrentStackSlow(pc: StackTrace::GetCurrentPc()); |
861 | } |
862 | } // extern "C" |
863 | |