1 | //===-- tsan_rtl.cpp ------------------------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file is a part of ThreadSanitizer (TSan), a race detector. |
10 | // |
11 | // Main file (entry points) for the TSan run-time. |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #include "tsan_rtl.h" |
15 | |
16 | #include "sanitizer_common/sanitizer_atomic.h" |
17 | #include "sanitizer_common/sanitizer_common.h" |
18 | #include "sanitizer_common/sanitizer_file.h" |
19 | #include "sanitizer_common/sanitizer_interface_internal.h" |
20 | #include "sanitizer_common/sanitizer_libc.h" |
21 | #include "sanitizer_common/sanitizer_placement_new.h" |
22 | #include "sanitizer_common/sanitizer_stackdepot.h" |
23 | #include "sanitizer_common/sanitizer_symbolizer.h" |
24 | #include "tsan_defs.h" |
25 | #include "tsan_interface.h" |
26 | #include "tsan_mman.h" |
27 | #include "tsan_platform.h" |
28 | #include "tsan_suppressions.h" |
29 | #include "tsan_symbolize.h" |
30 | #include "ubsan/ubsan_init.h" |
31 | |
32 | volatile int __tsan_resumed = 0; |
33 | |
34 | extern "C" void __tsan_resume() { |
35 | __tsan_resumed = 1; |
36 | } |
37 | |
38 | SANITIZER_WEAK_DEFAULT_IMPL |
39 | void __tsan_test_only_on_fork() {} |
40 | |
41 | namespace __tsan { |
42 | |
43 | #if !SANITIZER_GO |
44 | void (*on_initialize)(void); |
45 | int (*on_finalize)(int); |
46 | #endif |
47 | |
48 | #if !SANITIZER_GO && !SANITIZER_APPLE |
49 | __attribute__((tls_model("initial-exec" ))) |
50 | THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED( |
51 | SANITIZER_CACHE_LINE_SIZE); |
52 | #endif |
53 | static char ctx_placeholder[sizeof(Context)] ALIGNED(SANITIZER_CACHE_LINE_SIZE); |
54 | Context *ctx; |
55 | |
56 | // Can be overriden by a front-end. |
57 | #ifdef TSAN_EXTERNAL_HOOKS |
58 | bool OnFinalize(bool failed); |
59 | void OnInitialize(); |
60 | #else |
61 | SANITIZER_WEAK_CXX_DEFAULT_IMPL |
62 | bool OnFinalize(bool failed) { |
63 | # if !SANITIZER_GO |
64 | if (on_finalize) |
65 | return on_finalize(failed); |
66 | # endif |
67 | return failed; |
68 | } |
69 | |
70 | SANITIZER_WEAK_CXX_DEFAULT_IMPL |
71 | void OnInitialize() { |
72 | # if !SANITIZER_GO |
73 | if (on_initialize) |
74 | on_initialize(); |
75 | # endif |
76 | } |
77 | #endif |
78 | |
79 | static TracePart* TracePartAlloc(ThreadState* thr) { |
80 | TracePart* part = nullptr; |
81 | { |
82 | Lock lock(&ctx->slot_mtx); |
83 | uptr max_parts = Trace::kMinParts + flags()->history_size; |
84 | Trace* trace = &thr->tctx->trace; |
85 | if (trace->parts_allocated == max_parts || |
86 | ctx->trace_part_finished_excess) { |
87 | part = ctx->trace_part_recycle.PopFront(); |
88 | DPrintf("#%d: TracePartAlloc: part=%p\n" , thr->tid, part); |
89 | if (part && part->trace) { |
90 | Trace* trace1 = part->trace; |
91 | Lock trace_lock(&trace1->mtx); |
92 | part->trace = nullptr; |
93 | TracePart* part1 = trace1->parts.PopFront(); |
94 | CHECK_EQ(part, part1); |
95 | if (trace1->parts_allocated > trace1->parts.Size()) { |
96 | ctx->trace_part_finished_excess += |
97 | trace1->parts_allocated - trace1->parts.Size(); |
98 | trace1->parts_allocated = trace1->parts.Size(); |
99 | } |
100 | } |
101 | } |
102 | if (trace->parts_allocated < max_parts) { |
103 | trace->parts_allocated++; |
104 | if (ctx->trace_part_finished_excess) |
105 | ctx->trace_part_finished_excess--; |
106 | } |
107 | if (!part) |
108 | ctx->trace_part_total_allocated++; |
109 | else if (ctx->trace_part_recycle_finished) |
110 | ctx->trace_part_recycle_finished--; |
111 | } |
112 | if (!part) |
113 | part = new (MmapOrDie(size: sizeof(*part), mem_type: "TracePart" )) TracePart(); |
114 | return part; |
115 | } |
116 | |
117 | static void TracePartFree(TracePart* part) SANITIZER_REQUIRES(ctx->slot_mtx) { |
118 | DCHECK(part->trace); |
119 | part->trace = nullptr; |
120 | ctx->trace_part_recycle.PushFront(e: part); |
121 | } |
122 | |
123 | void TraceResetForTesting() { |
124 | Lock lock(&ctx->slot_mtx); |
125 | while (auto* part = ctx->trace_part_recycle.PopFront()) { |
126 | if (auto trace = part->trace) |
127 | CHECK_EQ(trace->parts.PopFront(), part); |
128 | UnmapOrDie(addr: part, size: sizeof(*part)); |
129 | } |
130 | ctx->trace_part_total_allocated = 0; |
131 | ctx->trace_part_recycle_finished = 0; |
132 | ctx->trace_part_finished_excess = 0; |
133 | } |
134 | |
135 | static void DoResetImpl(uptr epoch) { |
136 | ThreadRegistryLock lock0(&ctx->thread_registry); |
137 | Lock lock1(&ctx->slot_mtx); |
138 | CHECK_EQ(ctx->global_epoch, epoch); |
139 | ctx->global_epoch++; |
140 | CHECK(!ctx->resetting); |
141 | ctx->resetting = true; |
142 | for (u32 i = ctx->thread_registry.NumThreadsLocked(); i--;) { |
143 | ThreadContext* tctx = (ThreadContext*)ctx->thread_registry.GetThreadLocked( |
144 | tid: static_cast<Tid>(i)); |
145 | // Potentially we could purge all ThreadStatusDead threads from the |
146 | // registry. Since we reset all shadow, they can't race with anything |
147 | // anymore. However, their tid's can still be stored in some aux places |
148 | // (e.g. tid of thread that created something). |
149 | auto trace = &tctx->trace; |
150 | Lock lock(&trace->mtx); |
151 | bool attached = tctx->thr && tctx->thr->slot; |
152 | auto parts = &trace->parts; |
153 | bool local = false; |
154 | while (!parts->Empty()) { |
155 | auto part = parts->Front(); |
156 | local = local || part == trace->local_head; |
157 | if (local) |
158 | CHECK(!ctx->trace_part_recycle.Queued(part)); |
159 | else |
160 | ctx->trace_part_recycle.Remove(e: part); |
161 | if (attached && parts->Size() == 1) { |
162 | // The thread is running and this is the last/current part. |
163 | // Set the trace position to the end of the current part |
164 | // to force the thread to call SwitchTracePart and re-attach |
165 | // to a new slot and allocate a new trace part. |
166 | // Note: the thread is concurrently modifying the position as well, |
167 | // so this is only best-effort. The thread can only modify position |
168 | // within this part, because switching parts is protected by |
169 | // slot/trace mutexes that we hold here. |
170 | atomic_store_relaxed( |
171 | a: &tctx->thr->trace_pos, |
172 | v: reinterpret_cast<uptr>(&part->events[TracePart::kSize])); |
173 | break; |
174 | } |
175 | parts->Remove(e: part); |
176 | TracePartFree(part); |
177 | } |
178 | CHECK_LE(parts->Size(), 1); |
179 | trace->local_head = parts->Front(); |
180 | if (tctx->thr && !tctx->thr->slot) { |
181 | atomic_store_relaxed(a: &tctx->thr->trace_pos, v: 0); |
182 | tctx->thr->trace_prev_pc = 0; |
183 | } |
184 | if (trace->parts_allocated > trace->parts.Size()) { |
185 | ctx->trace_part_finished_excess += |
186 | trace->parts_allocated - trace->parts.Size(); |
187 | trace->parts_allocated = trace->parts.Size(); |
188 | } |
189 | } |
190 | while (ctx->slot_queue.PopFront()) { |
191 | } |
192 | for (auto& slot : ctx->slots) { |
193 | slot.SetEpoch(kEpochZero); |
194 | slot.journal.Reset(); |
195 | slot.thr = nullptr; |
196 | ctx->slot_queue.PushBack(e: &slot); |
197 | } |
198 | |
199 | DPrintf("Resetting shadow...\n" ); |
200 | auto shadow_begin = ShadowBeg(); |
201 | auto shadow_end = ShadowEnd(); |
202 | #if SANITIZER_GO |
203 | CHECK_NE(0, ctx->mapped_shadow_begin); |
204 | shadow_begin = ctx->mapped_shadow_begin; |
205 | shadow_end = ctx->mapped_shadow_end; |
206 | VPrintf(2, "shadow_begin-shadow_end: (0x%zx-0x%zx)\n" , |
207 | shadow_begin, shadow_end); |
208 | #endif |
209 | |
210 | #if SANITIZER_WINDOWS |
211 | auto resetFailed = |
212 | !ZeroMmapFixedRegion(shadow_begin, shadow_end - shadow_begin); |
213 | #else |
214 | auto resetFailed = |
215 | !MmapFixedSuperNoReserve(fixed_addr: shadow_begin, size: shadow_end-shadow_begin, name: "shadow" ); |
216 | # if !SANITIZER_GO |
217 | DontDumpShadow(addr: shadow_begin, size: shadow_end - shadow_begin); |
218 | # endif |
219 | #endif |
220 | if (resetFailed) { |
221 | Printf(format: "failed to reset shadow memory\n" ); |
222 | Die(); |
223 | } |
224 | DPrintf("Resetting meta shadow...\n" ); |
225 | ctx->metamap.ResetClocks(); |
226 | StoreShadow(sp: &ctx->last_spurious_race, s: Shadow::kEmpty); |
227 | ctx->resetting = false; |
228 | } |
229 | |
230 | // Clang does not understand locking all slots in the loop: |
231 | // error: expecting mutex 'slot.mtx' to be held at start of each loop |
232 | void DoReset(ThreadState* thr, uptr epoch) SANITIZER_NO_THREAD_SAFETY_ANALYSIS { |
233 | for (auto& slot : ctx->slots) { |
234 | slot.mtx.Lock(); |
235 | if (UNLIKELY(epoch == 0)) |
236 | epoch = ctx->global_epoch; |
237 | if (UNLIKELY(epoch != ctx->global_epoch)) { |
238 | // Epoch can't change once we've locked the first slot. |
239 | CHECK_EQ(slot.sid, 0); |
240 | slot.mtx.Unlock(); |
241 | return; |
242 | } |
243 | } |
244 | DPrintf("#%d: DoReset epoch=%lu\n" , thr ? thr->tid : -1, epoch); |
245 | DoResetImpl(epoch); |
246 | for (auto& slot : ctx->slots) slot.mtx.Unlock(); |
247 | } |
248 | |
249 | void FlushShadowMemory() { DoReset(thr: nullptr, epoch: 0); } |
250 | |
251 | static TidSlot* FindSlotAndLock(ThreadState* thr) |
252 | SANITIZER_ACQUIRE(thr->slot->mtx) SANITIZER_NO_THREAD_SAFETY_ANALYSIS { |
253 | CHECK(!thr->slot); |
254 | TidSlot* slot = nullptr; |
255 | for (;;) { |
256 | uptr epoch; |
257 | { |
258 | Lock lock(&ctx->slot_mtx); |
259 | epoch = ctx->global_epoch; |
260 | if (slot) { |
261 | // This is an exhausted slot from the previous iteration. |
262 | if (ctx->slot_queue.Queued(e: slot)) |
263 | ctx->slot_queue.Remove(e: slot); |
264 | thr->slot_locked = false; |
265 | slot->mtx.Unlock(); |
266 | } |
267 | for (;;) { |
268 | slot = ctx->slot_queue.PopFront(); |
269 | if (!slot) |
270 | break; |
271 | if (slot->epoch() != kEpochLast) { |
272 | ctx->slot_queue.PushBack(e: slot); |
273 | break; |
274 | } |
275 | } |
276 | } |
277 | if (!slot) { |
278 | DoReset(thr, epoch); |
279 | continue; |
280 | } |
281 | slot->mtx.Lock(); |
282 | CHECK(!thr->slot_locked); |
283 | thr->slot_locked = true; |
284 | if (slot->thr) { |
285 | DPrintf("#%d: preempting sid=%d tid=%d\n" , thr->tid, (u32)slot->sid, |
286 | slot->thr->tid); |
287 | slot->SetEpoch(slot->thr->fast_state.epoch()); |
288 | slot->thr = nullptr; |
289 | } |
290 | if (slot->epoch() != kEpochLast) |
291 | return slot; |
292 | } |
293 | } |
294 | |
295 | void SlotAttachAndLock(ThreadState* thr) { |
296 | TidSlot* slot = FindSlotAndLock(thr); |
297 | DPrintf("#%d: SlotAttach: slot=%u\n" , thr->tid, static_cast<int>(slot->sid)); |
298 | CHECK(!slot->thr); |
299 | CHECK(!thr->slot); |
300 | slot->thr = thr; |
301 | thr->slot = slot; |
302 | Epoch epoch = EpochInc(epoch: slot->epoch()); |
303 | CHECK(!EpochOverflow(epoch)); |
304 | slot->SetEpoch(epoch); |
305 | thr->fast_state.SetSid(slot->sid); |
306 | thr->fast_state.SetEpoch(epoch); |
307 | if (thr->slot_epoch != ctx->global_epoch) { |
308 | thr->slot_epoch = ctx->global_epoch; |
309 | thr->clock.Reset(); |
310 | #if !SANITIZER_GO |
311 | thr->last_sleep_stack_id = kInvalidStackID; |
312 | thr->last_sleep_clock.Reset(); |
313 | #endif |
314 | } |
315 | thr->clock.Set(sid: slot->sid, v: epoch); |
316 | slot->journal.PushBack(v: {.tid: thr->tid, .epoch: epoch}); |
317 | } |
318 | |
319 | static void SlotDetachImpl(ThreadState* thr, bool exiting) { |
320 | TidSlot* slot = thr->slot; |
321 | thr->slot = nullptr; |
322 | if (thr != slot->thr) { |
323 | slot = nullptr; // we don't own the slot anymore |
324 | if (thr->slot_epoch != ctx->global_epoch) { |
325 | TracePart* part = nullptr; |
326 | auto* trace = &thr->tctx->trace; |
327 | { |
328 | Lock l(&trace->mtx); |
329 | auto* parts = &trace->parts; |
330 | // The trace can be completely empty in an unlikely event |
331 | // the thread is preempted right after it acquired the slot |
332 | // in ThreadStart and did not trace any events yet. |
333 | CHECK_LE(parts->Size(), 1); |
334 | part = parts->PopFront(); |
335 | thr->tctx->trace.local_head = nullptr; |
336 | atomic_store_relaxed(a: &thr->trace_pos, v: 0); |
337 | thr->trace_prev_pc = 0; |
338 | } |
339 | if (part) { |
340 | Lock l(&ctx->slot_mtx); |
341 | TracePartFree(part); |
342 | } |
343 | } |
344 | return; |
345 | } |
346 | CHECK(exiting || thr->fast_state.epoch() == kEpochLast); |
347 | slot->SetEpoch(thr->fast_state.epoch()); |
348 | slot->thr = nullptr; |
349 | } |
350 | |
351 | void SlotDetach(ThreadState* thr) { |
352 | Lock lock(&thr->slot->mtx); |
353 | SlotDetachImpl(thr, exiting: true); |
354 | } |
355 | |
356 | void SlotLock(ThreadState* thr) SANITIZER_NO_THREAD_SAFETY_ANALYSIS { |
357 | DCHECK(!thr->slot_locked); |
358 | #if SANITIZER_DEBUG |
359 | // Check these mutexes are not locked. |
360 | // We can call DoReset from SlotAttachAndLock, which will lock |
361 | // these mutexes, but it happens only every once in a while. |
362 | { ThreadRegistryLock lock(&ctx->thread_registry); } |
363 | { Lock lock(&ctx->slot_mtx); } |
364 | #endif |
365 | TidSlot* slot = thr->slot; |
366 | slot->mtx.Lock(); |
367 | thr->slot_locked = true; |
368 | if (LIKELY(thr == slot->thr && thr->fast_state.epoch() != kEpochLast)) |
369 | return; |
370 | SlotDetachImpl(thr, exiting: false); |
371 | thr->slot_locked = false; |
372 | slot->mtx.Unlock(); |
373 | SlotAttachAndLock(thr); |
374 | } |
375 | |
376 | void SlotUnlock(ThreadState* thr) { |
377 | DCHECK(thr->slot_locked); |
378 | thr->slot_locked = false; |
379 | thr->slot->mtx.Unlock(); |
380 | } |
381 | |
382 | Context::Context() |
383 | : initialized(), |
384 | report_mtx(MutexTypeReport), |
385 | nreported(), |
386 | thread_registry([](Tid tid) -> ThreadContextBase* { |
387 | return new (Alloc(sz: sizeof(ThreadContext))) ThreadContext(tid); |
388 | }), |
389 | racy_mtx(MutexTypeRacy), |
390 | racy_stacks(), |
391 | fired_suppressions_mtx(MutexTypeFired), |
392 | slot_mtx(MutexTypeSlots), |
393 | resetting() { |
394 | fired_suppressions.reserve(new_size: 8); |
395 | for (uptr i = 0; i < ARRAY_SIZE(slots); i++) { |
396 | TidSlot* slot = &slots[i]; |
397 | slot->sid = static_cast<Sid>(i); |
398 | slot_queue.PushBack(e: slot); |
399 | } |
400 | global_epoch = 1; |
401 | } |
402 | |
403 | TidSlot::TidSlot() : mtx(MutexTypeSlot) {} |
404 | |
405 | // The objects are allocated in TLS, so one may rely on zero-initialization. |
406 | ThreadState::ThreadState(Tid tid) |
407 | // Do not touch these, rely on zero initialization, |
408 | // they may be accessed before the ctor. |
409 | // ignore_reads_and_writes() |
410 | // ignore_interceptors() |
411 | : tid(tid) { |
412 | CHECK_EQ(reinterpret_cast<uptr>(this) % SANITIZER_CACHE_LINE_SIZE, 0); |
413 | #if !SANITIZER_GO |
414 | // C/C++ uses fixed size shadow stack. |
415 | const int kInitStackSize = kShadowStackSize; |
416 | shadow_stack = static_cast<uptr*>( |
417 | MmapNoReserveOrDie(size: kInitStackSize * sizeof(uptr), mem_type: "shadow stack" )); |
418 | SetShadowRegionHugePageMode(addr: reinterpret_cast<uptr>(shadow_stack), |
419 | length: kInitStackSize * sizeof(uptr)); |
420 | #else |
421 | // Go uses malloc-allocated shadow stack with dynamic size. |
422 | const int kInitStackSize = 8; |
423 | shadow_stack = static_cast<uptr*>(Alloc(kInitStackSize * sizeof(uptr))); |
424 | #endif |
425 | shadow_stack_pos = shadow_stack; |
426 | shadow_stack_end = shadow_stack + kInitStackSize; |
427 | } |
428 | |
429 | #if !SANITIZER_GO |
430 | void MemoryProfiler(u64 uptime) { |
431 | if (ctx->memprof_fd == kInvalidFd) |
432 | return; |
433 | InternalMmapVector<char> buf(4096); |
434 | WriteMemoryProfile(buf: buf.data(), buf_size: buf.size(), uptime_ns: uptime); |
435 | WriteToFile(fd: ctx->memprof_fd, buff: buf.data(), buff_size: internal_strlen(s: buf.data())); |
436 | } |
437 | |
438 | static bool InitializeMemoryProfiler() { |
439 | ctx->memprof_fd = kInvalidFd; |
440 | const char *fname = flags()->profile_memory; |
441 | if (!fname || !fname[0]) |
442 | return false; |
443 | if (internal_strcmp(s1: fname, s2: "stdout" ) == 0) { |
444 | ctx->memprof_fd = 1; |
445 | } else if (internal_strcmp(s1: fname, s2: "stderr" ) == 0) { |
446 | ctx->memprof_fd = 2; |
447 | } else { |
448 | InternalScopedString filename; |
449 | filename.AppendF(format: "%s.%d" , fname, (int)internal_getpid()); |
450 | ctx->memprof_fd = OpenFile(filename: filename.data(), mode: WrOnly); |
451 | if (ctx->memprof_fd == kInvalidFd) { |
452 | Printf(format: "ThreadSanitizer: failed to open memory profile file '%s'\n" , |
453 | filename.data()); |
454 | return false; |
455 | } |
456 | } |
457 | MemoryProfiler(uptime: 0); |
458 | return true; |
459 | } |
460 | |
461 | static void *BackgroundThread(void *arg) { |
462 | // This is a non-initialized non-user thread, nothing to see here. |
463 | // We don't use ScopedIgnoreInterceptors, because we want ignores to be |
464 | // enabled even when the thread function exits (e.g. during pthread thread |
465 | // shutdown code). |
466 | cur_thread_init()->ignore_interceptors++; |
467 | const u64 kMs2Ns = 1000 * 1000; |
468 | const u64 start = NanoTime(); |
469 | |
470 | u64 last_flush = start; |
471 | uptr = 0; |
472 | while (!atomic_load_relaxed(a: &ctx->stop_background_thread)) { |
473 | SleepForMillis(millis: 100); |
474 | u64 now = NanoTime(); |
475 | |
476 | // Flush memory if requested. |
477 | if (flags()->flush_memory_ms > 0) { |
478 | if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) { |
479 | VReport(1, "ThreadSanitizer: periodic memory flush\n" ); |
480 | FlushShadowMemory(); |
481 | now = last_flush = NanoTime(); |
482 | } |
483 | } |
484 | if (flags()->memory_limit_mb > 0) { |
485 | uptr = GetRSS(); |
486 | uptr limit = uptr(flags()->memory_limit_mb) << 20; |
487 | VReport(1, |
488 | "ThreadSanitizer: memory flush check" |
489 | " RSS=%llu LAST=%llu LIMIT=%llu\n" , |
490 | (u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20); |
491 | if (2 * rss > limit + last_rss) { |
492 | VReport(1, "ThreadSanitizer: flushing memory due to RSS\n" ); |
493 | FlushShadowMemory(); |
494 | rss = GetRSS(); |
495 | now = NanoTime(); |
496 | VReport(1, "ThreadSanitizer: memory flushed RSS=%llu\n" , |
497 | (u64)rss >> 20); |
498 | } |
499 | last_rss = rss; |
500 | } |
501 | |
502 | MemoryProfiler(uptime: now - start); |
503 | |
504 | // Flush symbolizer cache if requested. |
505 | if (flags()->flush_symbolizer_ms > 0) { |
506 | u64 last = atomic_load(a: &ctx->last_symbolize_time_ns, |
507 | mo: memory_order_relaxed); |
508 | if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) { |
509 | Lock l(&ctx->report_mtx); |
510 | ScopedErrorReportLock l2; |
511 | SymbolizeFlush(); |
512 | atomic_store(a: &ctx->last_symbolize_time_ns, v: 0, mo: memory_order_relaxed); |
513 | } |
514 | } |
515 | } |
516 | return nullptr; |
517 | } |
518 | |
519 | static void StartBackgroundThread() { |
520 | ctx->background_thread = internal_start_thread(func: &BackgroundThread, arg: 0); |
521 | } |
522 | |
523 | #ifndef __mips__ |
524 | static void StopBackgroundThread() { |
525 | atomic_store(a: &ctx->stop_background_thread, v: 1, mo: memory_order_relaxed); |
526 | internal_join_thread(th: ctx->background_thread); |
527 | ctx->background_thread = 0; |
528 | } |
529 | #endif |
530 | #endif |
531 | |
532 | void DontNeedShadowFor(uptr addr, uptr size) { |
533 | ReleaseMemoryPagesToOS(beg: reinterpret_cast<uptr>(MemToShadow(x: addr)), |
534 | end: reinterpret_cast<uptr>(MemToShadow(x: addr + size))); |
535 | } |
536 | |
537 | #if !SANITIZER_GO |
538 | // We call UnmapShadow before the actual munmap, at that point we don't yet |
539 | // know if the provided address/size are sane. We can't call UnmapShadow |
540 | // after the actual munmap becuase at that point the memory range can |
541 | // already be reused for something else, so we can't rely on the munmap |
542 | // return value to understand is the values are sane. |
543 | // While calling munmap with insane values (non-canonical address, negative |
544 | // size, etc) is an error, the kernel won't crash. We must also try to not |
545 | // crash as the failure mode is very confusing (paging fault inside of the |
546 | // runtime on some derived shadow address). |
547 | static bool IsValidMmapRange(uptr addr, uptr size) { |
548 | if (size == 0) |
549 | return true; |
550 | if (static_cast<sptr>(size) < 0) |
551 | return false; |
552 | if (!IsAppMem(mem: addr) || !IsAppMem(mem: addr + size - 1)) |
553 | return false; |
554 | // Check that if the start of the region belongs to one of app ranges, |
555 | // end of the region belongs to the same region. |
556 | const uptr ranges[][2] = { |
557 | {LoAppMemBeg(), LoAppMemEnd()}, |
558 | {MidAppMemBeg(), MidAppMemEnd()}, |
559 | {HiAppMemBeg(), HiAppMemEnd()}, |
560 | }; |
561 | for (auto range : ranges) { |
562 | if (addr >= range[0] && addr < range[1]) |
563 | return addr + size <= range[1]; |
564 | } |
565 | return false; |
566 | } |
567 | |
568 | void UnmapShadow(ThreadState *thr, uptr addr, uptr size) { |
569 | if (size == 0 || !IsValidMmapRange(addr, size)) |
570 | return; |
571 | DontNeedShadowFor(addr, size); |
572 | ScopedGlobalProcessor sgp; |
573 | SlotLocker locker(thr, true); |
574 | ctx->metamap.ResetRange(proc: thr->proc(), p: addr, sz: size, reset: true); |
575 | } |
576 | #endif |
577 | |
578 | void MapShadow(uptr addr, uptr size) { |
579 | // Ensure thead registry lock held, so as to synchronize |
580 | // with DoReset, which also access the mapped_shadow_* ctxt fields. |
581 | ThreadRegistryLock lock0(&ctx->thread_registry); |
582 | static bool data_mapped = false; |
583 | |
584 | #if !SANITIZER_GO |
585 | // Global data is not 64K aligned, but there are no adjacent mappings, |
586 | // so we can get away with unaligned mapping. |
587 | // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment |
588 | const uptr kPageSize = GetPageSizeCached(); |
589 | uptr shadow_begin = RoundDownTo(x: (uptr)MemToShadow(x: addr), boundary: kPageSize); |
590 | uptr shadow_end = RoundUpTo(size: (uptr)MemToShadow(x: addr + size), boundary: kPageSize); |
591 | if (!MmapFixedNoReserve(fixed_addr: shadow_begin, size: shadow_end - shadow_begin, name: "shadow" )) |
592 | Die(); |
593 | #else |
594 | uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), (64 << 10)); |
595 | uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), (64 << 10)); |
596 | VPrintf(2, "MapShadow for (0x%zx-0x%zx), begin/end: (0x%zx-0x%zx)\n" , |
597 | addr, addr + size, shadow_begin, shadow_end); |
598 | |
599 | if (!data_mapped) { |
600 | // First call maps data+bss. |
601 | if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow" )) |
602 | Die(); |
603 | } else { |
604 | VPrintf(2, "ctx->mapped_shadow_{begin,end} = (0x%zx-0x%zx)\n" , |
605 | ctx->mapped_shadow_begin, ctx->mapped_shadow_end); |
606 | // Second and subsequent calls map heap. |
607 | if (shadow_end <= ctx->mapped_shadow_end) |
608 | return; |
609 | if (!ctx->mapped_shadow_begin || ctx->mapped_shadow_begin > shadow_begin) |
610 | ctx->mapped_shadow_begin = shadow_begin; |
611 | if (shadow_begin < ctx->mapped_shadow_end) |
612 | shadow_begin = ctx->mapped_shadow_end; |
613 | VPrintf(2, "MapShadow begin/end = (0x%zx-0x%zx)\n" , |
614 | shadow_begin, shadow_end); |
615 | if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin, |
616 | "shadow" )) |
617 | Die(); |
618 | ctx->mapped_shadow_end = shadow_end; |
619 | } |
620 | #endif |
621 | |
622 | // Meta shadow is 2:1, so tread carefully. |
623 | static uptr mapped_meta_end = 0; |
624 | uptr meta_begin = (uptr)MemToMeta(x: addr); |
625 | uptr meta_end = (uptr)MemToMeta(x: addr + size); |
626 | meta_begin = RoundDownTo(x: meta_begin, boundary: 64 << 10); |
627 | meta_end = RoundUpTo(size: meta_end, boundary: 64 << 10); |
628 | if (!data_mapped) { |
629 | // First call maps data+bss. |
630 | data_mapped = true; |
631 | if (!MmapFixedSuperNoReserve(fixed_addr: meta_begin, size: meta_end - meta_begin, |
632 | name: "meta shadow" )) |
633 | Die(); |
634 | } else { |
635 | // Mapping continuous heap. |
636 | // Windows wants 64K alignment. |
637 | meta_begin = RoundDownTo(x: meta_begin, boundary: 64 << 10); |
638 | meta_end = RoundUpTo(size: meta_end, boundary: 64 << 10); |
639 | CHECK_GT(meta_end, mapped_meta_end); |
640 | if (meta_begin < mapped_meta_end) |
641 | meta_begin = mapped_meta_end; |
642 | if (!MmapFixedSuperNoReserve(fixed_addr: meta_begin, size: meta_end - meta_begin, |
643 | name: "meta shadow" )) |
644 | Die(); |
645 | mapped_meta_end = meta_end; |
646 | } |
647 | VPrintf(2, "mapped meta shadow for (0x%zx-0x%zx) at (0x%zx-0x%zx)\n" , addr, |
648 | addr + size, meta_begin, meta_end); |
649 | } |
650 | |
651 | #if !SANITIZER_GO |
652 | static void OnStackUnwind(const SignalContext &sig, const void *, |
653 | BufferedStackTrace *stack) { |
654 | stack->Unwind(pc: StackTrace::GetNextInstructionPc(pc: sig.pc), bp: sig.bp, context: sig.context, |
655 | request_fast: common_flags()->fast_unwind_on_fatal); |
656 | } |
657 | |
658 | static void TsanOnDeadlySignal(int signo, void *siginfo, void *context) { |
659 | HandleDeadlySignal(siginfo, context, tid: GetTid(), unwind: &OnStackUnwind, unwind_context: nullptr); |
660 | } |
661 | #endif |
662 | |
663 | void CheckUnwind() { |
664 | // There is high probability that interceptors will check-fail as well, |
665 | // on the other hand there is no sense in processing interceptors |
666 | // since we are going to die soon. |
667 | ScopedIgnoreInterceptors ignore; |
668 | #if !SANITIZER_GO |
669 | ThreadState* thr = cur_thread(); |
670 | thr->nomalloc = false; |
671 | thr->ignore_sync++; |
672 | thr->ignore_reads_and_writes++; |
673 | atomic_store_relaxed(a: &thr->in_signal_handler, v: 0); |
674 | #endif |
675 | PrintCurrentStackSlow(pc: StackTrace::GetCurrentPc()); |
676 | } |
677 | |
678 | bool is_initialized; |
679 | |
680 | void Initialize(ThreadState *thr) { |
681 | // Thread safe because done before all threads exist. |
682 | if (is_initialized) |
683 | return; |
684 | is_initialized = true; |
685 | // We are not ready to handle interceptors yet. |
686 | ScopedIgnoreInterceptors ignore; |
687 | SanitizerToolName = "ThreadSanitizer" ; |
688 | // Install tool-specific callbacks in sanitizer_common. |
689 | SetCheckUnwindCallback(CheckUnwind); |
690 | |
691 | ctx = new(ctx_placeholder) Context; |
692 | const char *env_name = SANITIZER_GO ? "GORACE" : "TSAN_OPTIONS" ; |
693 | const char *options = GetEnv(name: env_name); |
694 | CacheBinaryName(); |
695 | CheckASLR(); |
696 | InitializeFlags(flags: &ctx->flags, env: options, env_option_name: env_name); |
697 | AvoidCVE_2016_2143(); |
698 | __sanitizer::InitializePlatformEarly(); |
699 | __tsan::InitializePlatformEarly(); |
700 | |
701 | #if !SANITIZER_GO |
702 | InitializeAllocator(); |
703 | ReplaceSystemMalloc(); |
704 | #endif |
705 | if (common_flags()->detect_deadlocks) |
706 | ctx->dd = DDetector::Create(flags: flags()); |
707 | Processor *proc = ProcCreate(); |
708 | ProcWire(proc, thr); |
709 | InitializeInterceptors(); |
710 | InitializePlatform(); |
711 | InitializeDynamicAnnotations(); |
712 | #if !SANITIZER_GO |
713 | InitializeShadowMemory(); |
714 | InitializeAllocatorLate(); |
715 | InstallDeadlySignalHandlers(handler: TsanOnDeadlySignal); |
716 | #endif |
717 | // Setup correct file descriptor for error reports. |
718 | __sanitizer_set_report_path(path: common_flags()->log_path); |
719 | InitializeSuppressions(); |
720 | #if !SANITIZER_GO |
721 | InitializeLibIgnore(); |
722 | Symbolizer::GetOrInit()->AddHooks(start_hook: EnterSymbolizer, end_hook: ExitSymbolizer); |
723 | #endif |
724 | |
725 | VPrintf(1, "***** Running under ThreadSanitizer v3 (pid %d) *****\n" , |
726 | (int)internal_getpid()); |
727 | |
728 | // Initialize thread 0. |
729 | Tid tid = ThreadCreate(thr: nullptr, pc: 0, uid: 0, detached: true); |
730 | CHECK_EQ(tid, kMainTid); |
731 | ThreadStart(thr, tid, os_id: GetTid(), thread_type: ThreadType::Regular); |
732 | #if TSAN_CONTAINS_UBSAN |
733 | __ubsan::InitAsPlugin(); |
734 | #endif |
735 | |
736 | #if !SANITIZER_GO |
737 | Symbolizer::LateInitialize(); |
738 | if (InitializeMemoryProfiler() || flags()->force_background_thread) |
739 | MaybeSpawnBackgroundThread(); |
740 | #endif |
741 | ctx->initialized = true; |
742 | |
743 | if (flags()->stop_on_start) { |
744 | Printf(format: "ThreadSanitizer is suspended at startup (pid %d)." |
745 | " Call __tsan_resume().\n" , |
746 | (int)internal_getpid()); |
747 | while (__tsan_resumed == 0) {} |
748 | } |
749 | |
750 | OnInitialize(); |
751 | } |
752 | |
753 | void MaybeSpawnBackgroundThread() { |
754 | // On MIPS, TSan initialization is run before |
755 | // __pthread_initialize_minimal_internal() is finished, so we can not spawn |
756 | // new threads. |
757 | #if !SANITIZER_GO && !defined(__mips__) |
758 | static atomic_uint32_t bg_thread = {}; |
759 | if (atomic_load(a: &bg_thread, mo: memory_order_relaxed) == 0 && |
760 | atomic_exchange(a: &bg_thread, v: 1, mo: memory_order_relaxed) == 0) { |
761 | StartBackgroundThread(); |
762 | SetSandboxingCallback(StopBackgroundThread); |
763 | } |
764 | #endif |
765 | } |
766 | |
767 | int Finalize(ThreadState *thr) { |
768 | bool failed = false; |
769 | |
770 | #if !SANITIZER_GO |
771 | if (common_flags()->print_module_map == 1) |
772 | DumpProcessMap(); |
773 | #endif |
774 | |
775 | if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1) |
776 | internal_usleep(useconds: u64(flags()->atexit_sleep_ms) * 1000); |
777 | |
778 | { |
779 | // Wait for pending reports. |
780 | ScopedErrorReportLock lock; |
781 | } |
782 | |
783 | #if !SANITIZER_GO |
784 | if (Verbosity()) AllocatorPrintStats(); |
785 | #endif |
786 | |
787 | ThreadFinalize(thr); |
788 | |
789 | if (ctx->nreported) { |
790 | failed = true; |
791 | #if !SANITIZER_GO |
792 | Printf(format: "ThreadSanitizer: reported %d warnings\n" , ctx->nreported); |
793 | #else |
794 | Printf("Found %d data race(s)\n" , ctx->nreported); |
795 | #endif |
796 | } |
797 | |
798 | if (common_flags()->print_suppressions) |
799 | PrintMatchedSuppressions(); |
800 | |
801 | failed = OnFinalize(failed); |
802 | |
803 | return failed ? common_flags()->exitcode : 0; |
804 | } |
805 | |
806 | #if !SANITIZER_GO |
807 | void ForkBefore(ThreadState* thr, uptr pc) SANITIZER_NO_THREAD_SAFETY_ANALYSIS { |
808 | GlobalProcessorLock(); |
809 | // Detaching from the slot makes OnUserFree skip writing to the shadow. |
810 | // The slot will be locked so any attempts to use it will deadlock anyway. |
811 | SlotDetach(thr); |
812 | for (auto& slot : ctx->slots) slot.mtx.Lock(); |
813 | ctx->thread_registry.Lock(); |
814 | ctx->slot_mtx.Lock(); |
815 | ScopedErrorReportLock::Lock(); |
816 | AllocatorLock(); |
817 | // Suppress all reports in the pthread_atfork callbacks. |
818 | // Reports will deadlock on the report_mtx. |
819 | // We could ignore sync operations as well, |
820 | // but so far it's unclear if it will do more good or harm. |
821 | // Unnecessarily ignoring things can lead to false positives later. |
822 | thr->suppress_reports++; |
823 | // On OS X, REAL(fork) can call intercepted functions (OSSpinLockLock), and |
824 | // we'll assert in CheckNoLocks() unless we ignore interceptors. |
825 | // On OS X libSystem_atfork_prepare/parent/child callbacks are called |
826 | // after/before our callbacks and they call free. |
827 | thr->ignore_interceptors++; |
828 | // Disables memory write in OnUserAlloc/Free. |
829 | thr->ignore_reads_and_writes++; |
830 | |
831 | __tsan_test_only_on_fork(); |
832 | } |
833 | |
834 | static void ForkAfter(ThreadState* thr) SANITIZER_NO_THREAD_SAFETY_ANALYSIS { |
835 | thr->suppress_reports--; // Enabled in ForkBefore. |
836 | thr->ignore_interceptors--; |
837 | thr->ignore_reads_and_writes--; |
838 | AllocatorUnlock(); |
839 | ScopedErrorReportLock::Unlock(); |
840 | ctx->slot_mtx.Unlock(); |
841 | ctx->thread_registry.Unlock(); |
842 | for (auto& slot : ctx->slots) slot.mtx.Unlock(); |
843 | SlotAttachAndLock(thr); |
844 | SlotUnlock(thr); |
845 | GlobalProcessorUnlock(); |
846 | } |
847 | |
848 | void ForkParentAfter(ThreadState* thr, uptr pc) { ForkAfter(thr); } |
849 | |
850 | void ForkChildAfter(ThreadState* thr, uptr pc, bool start_thread) { |
851 | ForkAfter(thr); |
852 | u32 nthread = ctx->thread_registry.OnFork(tid: thr->tid); |
853 | VPrintf(1, |
854 | "ThreadSanitizer: forked new process with pid %d," |
855 | " parent had %d threads\n" , |
856 | (int)internal_getpid(), (int)nthread); |
857 | if (nthread == 1) { |
858 | if (start_thread) |
859 | StartBackgroundThread(); |
860 | } else { |
861 | // We've just forked a multi-threaded process. We cannot reasonably function |
862 | // after that (some mutexes may be locked before fork). So just enable |
863 | // ignores for everything in the hope that we will exec soon. |
864 | ctx->after_multithreaded_fork = true; |
865 | thr->ignore_interceptors++; |
866 | thr->suppress_reports++; |
867 | ThreadIgnoreBegin(thr, pc); |
868 | ThreadIgnoreSyncBegin(thr, pc); |
869 | } |
870 | } |
871 | #endif |
872 | |
873 | #if SANITIZER_GO |
874 | NOINLINE |
875 | void GrowShadowStack(ThreadState *thr) { |
876 | const int sz = thr->shadow_stack_end - thr->shadow_stack; |
877 | const int newsz = 2 * sz; |
878 | auto *newstack = (uptr *)Alloc(newsz * sizeof(uptr)); |
879 | internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr)); |
880 | Free(thr->shadow_stack); |
881 | thr->shadow_stack = newstack; |
882 | thr->shadow_stack_pos = newstack + sz; |
883 | thr->shadow_stack_end = newstack + newsz; |
884 | } |
885 | #endif |
886 | |
887 | StackID CurrentStackId(ThreadState *thr, uptr pc) { |
888 | #if !SANITIZER_GO |
889 | if (!thr->is_inited) // May happen during bootstrap. |
890 | return kInvalidStackID; |
891 | #endif |
892 | if (pc != 0) { |
893 | #if !SANITIZER_GO |
894 | DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end); |
895 | #else |
896 | if (thr->shadow_stack_pos == thr->shadow_stack_end) |
897 | GrowShadowStack(thr); |
898 | #endif |
899 | thr->shadow_stack_pos[0] = pc; |
900 | thr->shadow_stack_pos++; |
901 | } |
902 | StackID id = StackDepotPut( |
903 | stack: StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack)); |
904 | if (pc != 0) |
905 | thr->shadow_stack_pos--; |
906 | return id; |
907 | } |
908 | |
909 | static bool TraceSkipGap(ThreadState* thr) { |
910 | Trace *trace = &thr->tctx->trace; |
911 | Event *pos = reinterpret_cast<Event *>(atomic_load_relaxed(a: &thr->trace_pos)); |
912 | DCHECK_EQ(reinterpret_cast<uptr>(pos + 1) & TracePart::kAlignment, 0); |
913 | auto *part = trace->parts.Back(); |
914 | DPrintf("#%d: TraceSwitchPart enter trace=%p parts=%p-%p pos=%p\n" , thr->tid, |
915 | trace, trace->parts.Front(), part, pos); |
916 | if (!part) |
917 | return false; |
918 | // We can get here when we still have space in the current trace part. |
919 | // The fast-path check in TraceAcquire has false positives in the middle of |
920 | // the part. Check if we are indeed at the end of the current part or not, |
921 | // and fill any gaps with NopEvent's. |
922 | Event* end = &part->events[TracePart::kSize]; |
923 | DCHECK_GE(pos, &part->events[0]); |
924 | DCHECK_LE(pos, end); |
925 | if (pos + 1 < end) { |
926 | if ((reinterpret_cast<uptr>(pos) & TracePart::kAlignment) == |
927 | TracePart::kAlignment) |
928 | *pos++ = NopEvent; |
929 | *pos++ = NopEvent; |
930 | DCHECK_LE(pos + 2, end); |
931 | atomic_store_relaxed(a: &thr->trace_pos, v: reinterpret_cast<uptr>(pos)); |
932 | return true; |
933 | } |
934 | // We are indeed at the end. |
935 | for (; pos < end; pos++) *pos = NopEvent; |
936 | return false; |
937 | } |
938 | |
939 | NOINLINE |
940 | void TraceSwitchPart(ThreadState* thr) { |
941 | if (TraceSkipGap(thr)) |
942 | return; |
943 | #if !SANITIZER_GO |
944 | if (ctx->after_multithreaded_fork) { |
945 | // We just need to survive till exec. |
946 | TracePart* part = thr->tctx->trace.parts.Back(); |
947 | if (part) { |
948 | atomic_store_relaxed(a: &thr->trace_pos, |
949 | v: reinterpret_cast<uptr>(&part->events[0])); |
950 | return; |
951 | } |
952 | } |
953 | #endif |
954 | TraceSwitchPartImpl(thr); |
955 | } |
956 | |
957 | void TraceSwitchPartImpl(ThreadState* thr) { |
958 | SlotLocker locker(thr, true); |
959 | Trace* trace = &thr->tctx->trace; |
960 | TracePart* part = TracePartAlloc(thr); |
961 | part->trace = trace; |
962 | thr->trace_prev_pc = 0; |
963 | TracePart* recycle = nullptr; |
964 | // Keep roughly half of parts local to the thread |
965 | // (not queued into the recycle queue). |
966 | uptr local_parts = (Trace::kMinParts + flags()->history_size + 1) / 2; |
967 | { |
968 | Lock lock(&trace->mtx); |
969 | if (trace->parts.Empty()) |
970 | trace->local_head = part; |
971 | if (trace->parts.Size() >= local_parts) { |
972 | recycle = trace->local_head; |
973 | trace->local_head = trace->parts.Next(e: recycle); |
974 | } |
975 | trace->parts.PushBack(e: part); |
976 | atomic_store_relaxed(a: &thr->trace_pos, |
977 | v: reinterpret_cast<uptr>(&part->events[0])); |
978 | } |
979 | // Make this part self-sufficient by restoring the current stack |
980 | // and mutex set in the beginning of the trace. |
981 | TraceTime(thr); |
982 | { |
983 | // Pathologically large stacks may not fit into the part. |
984 | // In these cases we log only fixed number of top frames. |
985 | const uptr kMaxFrames = 1000; |
986 | // Check that kMaxFrames won't consume the whole part. |
987 | static_assert(kMaxFrames < TracePart::kSize / 2, "kMaxFrames is too big" ); |
988 | uptr* pos = Max(a: &thr->shadow_stack[0], b: thr->shadow_stack_pos - kMaxFrames); |
989 | for (; pos < thr->shadow_stack_pos; pos++) { |
990 | if (TryTraceFunc(thr, pc: *pos)) |
991 | continue; |
992 | CHECK(TraceSkipGap(thr)); |
993 | CHECK(TryTraceFunc(thr, *pos)); |
994 | } |
995 | } |
996 | for (uptr i = 0; i < thr->mset.Size(); i++) { |
997 | MutexSet::Desc d = thr->mset.Get(i); |
998 | for (uptr i = 0; i < d.count; i++) |
999 | TraceMutexLock(thr, type: d.write ? EventType::kLock : EventType::kRLock, pc: 0, |
1000 | addr: d.addr, stk: d.stack_id); |
1001 | } |
1002 | // Callers of TraceSwitchPart expect that TraceAcquire will always succeed |
1003 | // after the call. It's possible that TryTraceFunc/TraceMutexLock above |
1004 | // filled the trace part exactly up to the TracePart::kAlignment gap |
1005 | // and the next TraceAcquire won't succeed. Skip the gap to avoid that. |
1006 | EventFunc *ev; |
1007 | if (!TraceAcquire(thr, ev: &ev)) { |
1008 | CHECK(TraceSkipGap(thr)); |
1009 | CHECK(TraceAcquire(thr, &ev)); |
1010 | } |
1011 | { |
1012 | Lock lock(&ctx->slot_mtx); |
1013 | // There is a small chance that the slot may be not queued at this point. |
1014 | // This can happen if the slot has kEpochLast epoch and another thread |
1015 | // in FindSlotAndLock discovered that it's exhausted and removed it from |
1016 | // the slot queue. kEpochLast can happen in 2 cases: (1) if TraceSwitchPart |
1017 | // was called with the slot locked and epoch already at kEpochLast, |
1018 | // or (2) if we've acquired a new slot in SlotLock in the beginning |
1019 | // of the function and the slot was at kEpochLast - 1, so after increment |
1020 | // in SlotAttachAndLock it become kEpochLast. |
1021 | if (ctx->slot_queue.Queued(e: thr->slot)) { |
1022 | ctx->slot_queue.Remove(e: thr->slot); |
1023 | ctx->slot_queue.PushBack(e: thr->slot); |
1024 | } |
1025 | if (recycle) |
1026 | ctx->trace_part_recycle.PushBack(e: recycle); |
1027 | } |
1028 | DPrintf("#%d: TraceSwitchPart exit parts=%p-%p pos=0x%zx\n" , thr->tid, |
1029 | trace->parts.Front(), trace->parts.Back(), |
1030 | atomic_load_relaxed(&thr->trace_pos)); |
1031 | } |
1032 | |
1033 | void ThreadIgnoreBegin(ThreadState* thr, uptr pc) { |
1034 | DPrintf("#%d: ThreadIgnoreBegin\n" , thr->tid); |
1035 | thr->ignore_reads_and_writes++; |
1036 | CHECK_GT(thr->ignore_reads_and_writes, 0); |
1037 | thr->fast_state.SetIgnoreBit(); |
1038 | #if !SANITIZER_GO |
1039 | if (pc && !ctx->after_multithreaded_fork) |
1040 | thr->mop_ignore_set.Add(stack_id: CurrentStackId(thr, pc)); |
1041 | #endif |
1042 | } |
1043 | |
1044 | void ThreadIgnoreEnd(ThreadState *thr) { |
1045 | DPrintf("#%d: ThreadIgnoreEnd\n" , thr->tid); |
1046 | CHECK_GT(thr->ignore_reads_and_writes, 0); |
1047 | thr->ignore_reads_and_writes--; |
1048 | if (thr->ignore_reads_and_writes == 0) { |
1049 | thr->fast_state.ClearIgnoreBit(); |
1050 | #if !SANITIZER_GO |
1051 | thr->mop_ignore_set.Reset(); |
1052 | #endif |
1053 | } |
1054 | } |
1055 | |
1056 | #if !SANITIZER_GO |
1057 | extern "C" SANITIZER_INTERFACE_ATTRIBUTE |
1058 | uptr __tsan_testonly_shadow_stack_current_size() { |
1059 | ThreadState *thr = cur_thread(); |
1060 | return thr->shadow_stack_pos - thr->shadow_stack; |
1061 | } |
1062 | #endif |
1063 | |
1064 | void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) { |
1065 | DPrintf("#%d: ThreadIgnoreSyncBegin\n" , thr->tid); |
1066 | thr->ignore_sync++; |
1067 | CHECK_GT(thr->ignore_sync, 0); |
1068 | #if !SANITIZER_GO |
1069 | if (pc && !ctx->after_multithreaded_fork) |
1070 | thr->sync_ignore_set.Add(stack_id: CurrentStackId(thr, pc)); |
1071 | #endif |
1072 | } |
1073 | |
1074 | void ThreadIgnoreSyncEnd(ThreadState *thr) { |
1075 | DPrintf("#%d: ThreadIgnoreSyncEnd\n" , thr->tid); |
1076 | CHECK_GT(thr->ignore_sync, 0); |
1077 | thr->ignore_sync--; |
1078 | #if !SANITIZER_GO |
1079 | if (thr->ignore_sync == 0) |
1080 | thr->sync_ignore_set.Reset(); |
1081 | #endif |
1082 | } |
1083 | |
1084 | bool MD5Hash::operator==(const MD5Hash &other) const { |
1085 | return hash[0] == other.hash[0] && hash[1] == other.hash[1]; |
1086 | } |
1087 | |
1088 | #if SANITIZER_DEBUG |
1089 | void build_consistency_debug() {} |
1090 | #else |
1091 | void build_consistency_release() {} |
1092 | #endif |
1093 | } // namespace __tsan |
1094 | |
1095 | #if SANITIZER_CHECK_DEADLOCKS |
1096 | namespace __sanitizer { |
1097 | using namespace __tsan; |
1098 | MutexMeta mutex_meta[] = { |
1099 | {MutexInvalid, "Invalid" , {}}, |
1100 | {MutexThreadRegistry, |
1101 | "ThreadRegistry" , |
1102 | {MutexTypeSlots, MutexTypeTrace, MutexTypeReport}}, |
1103 | {MutexTypeReport, "Report" , {MutexTypeTrace}}, |
1104 | {MutexTypeSyncVar, "SyncVar" , {MutexTypeReport, MutexTypeTrace}}, |
1105 | {MutexTypeAnnotations, "Annotations" , {}}, |
1106 | {MutexTypeAtExit, "AtExit" , {}}, |
1107 | {MutexTypeFired, "Fired" , {MutexLeaf}}, |
1108 | {MutexTypeRacy, "Racy" , {MutexLeaf}}, |
1109 | {MutexTypeGlobalProc, "GlobalProc" , {MutexTypeSlot, MutexTypeSlots}}, |
1110 | {MutexTypeInternalAlloc, "InternalAlloc" , {MutexLeaf}}, |
1111 | {MutexTypeTrace, "Trace" , {}}, |
1112 | {MutexTypeSlot, |
1113 | "Slot" , |
1114 | {MutexMulti, MutexTypeTrace, MutexTypeSyncVar, MutexThreadRegistry, |
1115 | MutexTypeSlots}}, |
1116 | {MutexTypeSlots, "Slots" , {MutexTypeTrace, MutexTypeReport}}, |
1117 | {}, |
1118 | }; |
1119 | |
1120 | void PrintMutexPC(uptr pc) { StackTrace(&pc, 1).Print(); } |
1121 | |
1122 | } // namespace __sanitizer |
1123 | #endif |
1124 | |