1//===-- tsan_interceptors_posix.cpp ---------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of ThreadSanitizer (TSan), a race detector.
10//
11// FIXME: move as many interceptors as possible into
12// sanitizer_common/sanitizer_common_interceptors.inc
13//===----------------------------------------------------------------------===//
14
15#include "sanitizer_common/sanitizer_atomic.h"
16#include "sanitizer_common/sanitizer_errno.h"
17#include "sanitizer_common/sanitizer_libc.h"
18#include "sanitizer_common/sanitizer_linux.h"
19#include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
20#include "sanitizer_common/sanitizer_platform_limits_posix.h"
21#include "sanitizer_common/sanitizer_placement_new.h"
22#include "sanitizer_common/sanitizer_posix.h"
23#include "sanitizer_common/sanitizer_stacktrace.h"
24#include "sanitizer_common/sanitizer_tls_get_addr.h"
25#include "interception/interception.h"
26#include "tsan_interceptors.h"
27#include "tsan_interface.h"
28#include "tsan_platform.h"
29#include "tsan_suppressions.h"
30#include "tsan_rtl.h"
31#include "tsan_mman.h"
32#include "tsan_fd.h"
33
34#include <stdarg.h>
35
36using namespace __tsan;
37
38DECLARE_REAL(void *, memcpy, void *to, const void *from, SIZE_T size)
39DECLARE_REAL(void *, memset, void *block, int c, SIZE_T size)
40
41#if SANITIZER_FREEBSD || SANITIZER_APPLE
42#define stdout __stdoutp
43#define stderr __stderrp
44#endif
45
46#if SANITIZER_NETBSD
47#define dirfd(dirp) (*(int *)(dirp))
48#define fileno_unlocked(fp) \
49 (((__sanitizer_FILE *)fp)->_file == -1 \
50 ? -1 \
51 : (int)(unsigned short)(((__sanitizer_FILE *)fp)->_file))
52
53#define stdout ((__sanitizer_FILE*)&__sF[1])
54#define stderr ((__sanitizer_FILE*)&__sF[2])
55
56#define nanosleep __nanosleep50
57#define vfork __vfork14
58#endif
59
60#ifdef __mips__
61const int kSigCount = 129;
62#else
63const int kSigCount = 65;
64#endif
65
66#ifdef __mips__
67struct ucontext_t {
68 u64 opaque[768 / sizeof(u64) + 1];
69};
70#else
71struct ucontext_t {
72 // The size is determined by looking at sizeof of real ucontext_t on linux.
73 u64 opaque[936 / sizeof(u64) + 1];
74};
75#endif
76
77#if defined(__x86_64__) || defined(__mips__) || SANITIZER_PPC64V1 || \
78 defined(__s390x__)
79#define PTHREAD_ABI_BASE "GLIBC_2.3.2"
80#elif defined(__aarch64__) || SANITIZER_PPC64V2
81#define PTHREAD_ABI_BASE "GLIBC_2.17"
82#elif SANITIZER_LOONGARCH64
83#define PTHREAD_ABI_BASE "GLIBC_2.36"
84#elif SANITIZER_RISCV64
85# define PTHREAD_ABI_BASE "GLIBC_2.27"
86#endif
87
88extern "C" int pthread_attr_init(void *attr);
89extern "C" int pthread_attr_destroy(void *attr);
90DECLARE_REAL(int, pthread_attr_getdetachstate, void *, void *)
91extern "C" int pthread_attr_setstacksize(void *attr, uptr stacksize);
92extern "C" int pthread_atfork(void (*prepare)(void), void (*parent)(void),
93 void (*child)(void));
94extern "C" int pthread_key_create(unsigned *key, void (*destructor)(void* v));
95extern "C" int pthread_setspecific(unsigned key, const void *v);
96DECLARE_REAL(int, pthread_mutexattr_gettype, void *, void *)
97DECLARE_REAL(int, fflush, __sanitizer_FILE *fp)
98DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr size)
99DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
100extern "C" int pthread_equal(void *t1, void *t2);
101extern "C" void *pthread_self();
102extern "C" void _exit(int status);
103#if !SANITIZER_NETBSD
104extern "C" int fileno_unlocked(void *stream);
105extern "C" int dirfd(void *dirp);
106#endif
107#if SANITIZER_NETBSD
108extern __sanitizer_FILE __sF[];
109#else
110extern __sanitizer_FILE *stdout, *stderr;
111#endif
112#if !SANITIZER_FREEBSD && !SANITIZER_APPLE && !SANITIZER_NETBSD
113const int PTHREAD_MUTEX_RECURSIVE = 1;
114const int PTHREAD_MUTEX_RECURSIVE_NP = 1;
115#else
116const int PTHREAD_MUTEX_RECURSIVE = 2;
117const int PTHREAD_MUTEX_RECURSIVE_NP = 2;
118#endif
119#if !SANITIZER_FREEBSD && !SANITIZER_APPLE && !SANITIZER_NETBSD
120const int EPOLL_CTL_ADD = 1;
121#endif
122const int SIGILL = 4;
123const int SIGTRAP = 5;
124const int SIGABRT = 6;
125const int SIGFPE = 8;
126const int SIGSEGV = 11;
127const int SIGPIPE = 13;
128const int SIGTERM = 15;
129#if defined(__mips__) || SANITIZER_FREEBSD || SANITIZER_APPLE || SANITIZER_NETBSD
130const int SIGBUS = 10;
131const int SIGSYS = 12;
132#else
133const int SIGBUS = 7;
134const int SIGSYS = 31;
135#endif
136#if SANITIZER_HAS_SIGINFO
137const int SI_TIMER = -2;
138#endif
139void *const MAP_FAILED = (void*)-1;
140#if SANITIZER_NETBSD
141const int PTHREAD_BARRIER_SERIAL_THREAD = 1234567;
142#elif !SANITIZER_APPLE
143const int PTHREAD_BARRIER_SERIAL_THREAD = -1;
144#endif
145const int MAP_FIXED = 0x10;
146typedef long long_t;
147typedef __sanitizer::u16 mode_t;
148
149// From /usr/include/unistd.h
150# define F_ULOCK 0 /* Unlock a previously locked region. */
151# define F_LOCK 1 /* Lock a region for exclusive use. */
152# define F_TLOCK 2 /* Test and lock a region for exclusive use. */
153# define F_TEST 3 /* Test a region for other processes locks. */
154
155#if SANITIZER_FREEBSD || SANITIZER_APPLE || SANITIZER_NETBSD
156const int SA_SIGINFO = 0x40;
157const int SIG_SETMASK = 3;
158#elif defined(__mips__)
159const int SA_SIGINFO = 8;
160const int SIG_SETMASK = 3;
161#else
162const int SA_SIGINFO = 4;
163const int SIG_SETMASK = 2;
164#endif
165
166namespace __tsan {
167struct SignalDesc {
168 bool armed;
169 __sanitizer_siginfo siginfo;
170 ucontext_t ctx;
171};
172
173struct ThreadSignalContext {
174 int int_signal_send;
175 SignalDesc pending_signals[kSigCount];
176 // emptyset and oldset are too big for stack.
177 __sanitizer_sigset_t emptyset;
178 __sanitizer_sigset_t oldset;
179};
180
181void EnterBlockingFunc(ThreadState *thr) {
182 for (;;) {
183 // The order is important to not delay a signal infinitely if it's
184 // delivered right before we set in_blocking_func. Note: we can't call
185 // ProcessPendingSignals when in_blocking_func is set, or we can handle
186 // a signal synchronously when we are already handling a signal.
187 atomic_store(a: &thr->in_blocking_func, v: 1, mo: memory_order_relaxed);
188 if (atomic_load(a: &thr->pending_signals, mo: memory_order_relaxed) == 0)
189 break;
190 atomic_store(a: &thr->in_blocking_func, v: 0, mo: memory_order_relaxed);
191 ProcessPendingSignals(thr);
192 }
193}
194
195// The sole reason tsan wraps atexit callbacks is to establish synchronization
196// between callback setup and callback execution.
197struct AtExitCtx {
198 void (*f)();
199 void *arg;
200 uptr pc;
201};
202
203// InterceptorContext holds all global data required for interceptors.
204// It's explicitly constructed in InitializeInterceptors with placement new
205// and is never destroyed. This allows usage of members with non-trivial
206// constructors and destructors.
207struct InterceptorContext {
208 // The object is 64-byte aligned, because we want hot data to be located
209 // in a single cache line if possible (it's accessed in every interceptor).
210 ALIGNED(64) LibIgnore libignore;
211 __sanitizer_sigaction sigactions[kSigCount];
212#if !SANITIZER_APPLE && !SANITIZER_NETBSD
213 unsigned finalize_key;
214#endif
215
216 Mutex atexit_mu;
217 Vector<struct AtExitCtx *> AtExitStack;
218
219 InterceptorContext() : libignore(LINKER_INITIALIZED), atexit_mu(MutexTypeAtExit), AtExitStack() {}
220};
221
222static ALIGNED(64) char interceptor_placeholder[sizeof(InterceptorContext)];
223InterceptorContext *interceptor_ctx() {
224 return reinterpret_cast<InterceptorContext*>(&interceptor_placeholder[0]);
225}
226
227LibIgnore *libignore() {
228 return &interceptor_ctx()->libignore;
229}
230
231void InitializeLibIgnore() {
232 const SuppressionContext &supp = *Suppressions();
233 const uptr n = supp.SuppressionCount();
234 for (uptr i = 0; i < n; i++) {
235 const Suppression *s = supp.SuppressionAt(i);
236 if (0 == internal_strcmp(s1: s->type, s2: kSuppressionLib))
237 libignore()->AddIgnoredLibrary(name_templ: s->templ);
238 }
239 if (flags()->ignore_noninstrumented_modules)
240 libignore()->IgnoreNoninstrumentedModules(enable: true);
241 libignore()->OnLibraryLoaded(name: 0);
242}
243
244// The following two hooks can be used by for cooperative scheduling when
245// locking.
246#ifdef TSAN_EXTERNAL_HOOKS
247void OnPotentiallyBlockingRegionBegin();
248void OnPotentiallyBlockingRegionEnd();
249#else
250SANITIZER_WEAK_CXX_DEFAULT_IMPL void OnPotentiallyBlockingRegionBegin() {}
251SANITIZER_WEAK_CXX_DEFAULT_IMPL void OnPotentiallyBlockingRegionEnd() {}
252#endif
253
254} // namespace __tsan
255
256static ThreadSignalContext *SigCtx(ThreadState *thr) {
257 // This function may be called reentrantly if it is interrupted by a signal
258 // handler. Use CAS to handle the race.
259 uptr ctx = atomic_load(a: &thr->signal_ctx, mo: memory_order_relaxed);
260 if (ctx == 0 && !thr->is_dead) {
261 uptr pctx =
262 (uptr)MmapOrDie(size: sizeof(ThreadSignalContext), mem_type: "ThreadSignalContext");
263 MemoryResetRange(thr, pc: (uptr)&SigCtx, addr: pctx, size: sizeof(ThreadSignalContext));
264 if (atomic_compare_exchange_strong(a: &thr->signal_ctx, cmp: &ctx, xchg: pctx,
265 mo: memory_order_relaxed)) {
266 ctx = pctx;
267 } else {
268 UnmapOrDie(addr: (ThreadSignalContext *)pctx, size: sizeof(ThreadSignalContext));
269 }
270 }
271 return (ThreadSignalContext *)ctx;
272}
273
274ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname,
275 uptr pc)
276 : thr_(thr) {
277 LazyInitialize(thr);
278 if (UNLIKELY(atomic_load(&thr->in_blocking_func, memory_order_relaxed))) {
279 // pthread_join is marked as blocking, but it's also known to call other
280 // intercepted functions (mmap, free). If we don't reset in_blocking_func
281 // we can get deadlocks and memory corruptions if we deliver a synchronous
282 // signal inside of an mmap/free interceptor.
283 // So reset it and restore it back in the destructor.
284 // See https://github.com/google/sanitizers/issues/1540
285 atomic_store(a: &thr->in_blocking_func, v: 0, mo: memory_order_relaxed);
286 in_blocking_func_ = true;
287 }
288 if (!thr_->is_inited) return;
289 if (!thr_->ignore_interceptors) FuncEntry(thr, pc);
290 DPrintf("#%d: intercept %s()\n", thr_->tid, fname);
291 ignoring_ =
292 !thr_->in_ignored_lib && (flags()->ignore_interceptors_accesses ||
293 libignore()->IsIgnored(pc, pc_in_ignored_lib: &in_ignored_lib_));
294 EnableIgnores();
295}
296
297ScopedInterceptor::~ScopedInterceptor() {
298 if (!thr_->is_inited) return;
299 DisableIgnores();
300 if (UNLIKELY(in_blocking_func_))
301 EnterBlockingFunc(thr: thr_);
302 if (!thr_->ignore_interceptors) {
303 ProcessPendingSignals(thr: thr_);
304 FuncExit(thr: thr_);
305 CheckedMutex::CheckNoLocks();
306 }
307}
308
309NOINLINE
310void ScopedInterceptor::EnableIgnoresImpl() {
311 ThreadIgnoreBegin(thr: thr_, pc: 0);
312 if (flags()->ignore_noninstrumented_modules)
313 thr_->suppress_reports++;
314 if (in_ignored_lib_) {
315 DCHECK(!thr_->in_ignored_lib);
316 thr_->in_ignored_lib = true;
317 }
318}
319
320NOINLINE
321void ScopedInterceptor::DisableIgnoresImpl() {
322 ThreadIgnoreEnd(thr: thr_);
323 if (flags()->ignore_noninstrumented_modules)
324 thr_->suppress_reports--;
325 if (in_ignored_lib_) {
326 DCHECK(thr_->in_ignored_lib);
327 thr_->in_ignored_lib = false;
328 }
329}
330
331#define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func)
332#if SANITIZER_FREEBSD || SANITIZER_NETBSD
333# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func)
334#else
335# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver)
336#endif
337#if SANITIZER_FREEBSD
338# define TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(func) \
339 INTERCEPT_FUNCTION(_pthread_##func)
340#else
341# define TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(func)
342#endif
343#if SANITIZER_NETBSD
344# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func) \
345 INTERCEPT_FUNCTION(__libc_##func)
346# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func) \
347 INTERCEPT_FUNCTION(__libc_thr_##func)
348#else
349# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func)
350# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func)
351#endif
352
353#define READ_STRING_OF_LEN(thr, pc, s, len, n) \
354 MemoryAccessRange((thr), (pc), (uptr)(s), \
355 common_flags()->strict_string_checks ? (len) + 1 : (n), false)
356
357#define READ_STRING(thr, pc, s, n) \
358 READ_STRING_OF_LEN((thr), (pc), (s), internal_strlen(s), (n))
359
360#define BLOCK_REAL(name) (BlockingCall(thr), REAL(name))
361
362struct BlockingCall {
363 explicit BlockingCall(ThreadState *thr)
364 : thr(thr) {
365 EnterBlockingFunc(thr);
366 // When we are in a "blocking call", we process signals asynchronously
367 // (right when they arrive). In this context we do not expect to be
368 // executing any user/runtime code. The known interceptor sequence when
369 // this is not true is: pthread_join -> munmap(stack). It's fine
370 // to ignore munmap in this case -- we handle stack shadow separately.
371 thr->ignore_interceptors++;
372 }
373
374 ~BlockingCall() {
375 thr->ignore_interceptors--;
376 atomic_store(a: &thr->in_blocking_func, v: 0, mo: memory_order_relaxed);
377 }
378
379 ThreadState *thr;
380};
381
382TSAN_INTERCEPTOR(unsigned, sleep, unsigned sec) {
383 SCOPED_TSAN_INTERCEPTOR(sleep, sec);
384 unsigned res = BLOCK_REAL(sleep)(sec);
385 AfterSleep(thr, pc);
386 return res;
387}
388
389TSAN_INTERCEPTOR(int, usleep, long_t usec) {
390 SCOPED_TSAN_INTERCEPTOR(usleep, usec);
391 int res = BLOCK_REAL(usleep)(usec);
392 AfterSleep(thr, pc);
393 return res;
394}
395
396TSAN_INTERCEPTOR(int, nanosleep, void *req, void *rem) {
397 SCOPED_TSAN_INTERCEPTOR(nanosleep, req, rem);
398 int res = BLOCK_REAL(nanosleep)(req, rem);
399 AfterSleep(thr, pc);
400 return res;
401}
402
403TSAN_INTERCEPTOR(int, pause, int fake) {
404 SCOPED_TSAN_INTERCEPTOR(pause, fake);
405 return BLOCK_REAL(pause)(fake);
406}
407
408// Note: we specifically call the function in such strange way
409// with "installed_at" because in reports it will appear between
410// callback frames and the frame that installed the callback.
411static void at_exit_callback_installed_at() {
412 AtExitCtx *ctx;
413 {
414 // Ensure thread-safety.
415 Lock l(&interceptor_ctx()->atexit_mu);
416
417 // Pop AtExitCtx from the top of the stack of callback functions
418 uptr element = interceptor_ctx()->AtExitStack.Size() - 1;
419 ctx = interceptor_ctx()->AtExitStack[element];
420 interceptor_ctx()->AtExitStack.PopBack();
421 }
422
423 ThreadState *thr = cur_thread();
424 Acquire(thr, pc: ctx->pc, addr: (uptr)ctx);
425 FuncEntry(thr, pc: ctx->pc);
426 ((void(*)())ctx->f)();
427 FuncExit(thr);
428 Free(p&: ctx);
429}
430
431static void cxa_at_exit_callback_installed_at(void *arg) {
432 ThreadState *thr = cur_thread();
433 AtExitCtx *ctx = (AtExitCtx*)arg;
434 Acquire(thr, pc: ctx->pc, addr: (uptr)arg);
435 FuncEntry(thr, pc: ctx->pc);
436 ((void(*)(void *arg))ctx->f)(ctx->arg);
437 FuncExit(thr);
438 Free(p&: ctx);
439}
440
441static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
442 void *arg, void *dso);
443
444#if !SANITIZER_ANDROID
445TSAN_INTERCEPTOR(int, atexit, void (*f)()) {
446 if (in_symbolizer())
447 return 0;
448 // We want to setup the atexit callback even if we are in ignored lib
449 // or after fork.
450 SCOPED_INTERCEPTOR_RAW(atexit, f);
451 return setup_at_exit_wrapper(thr, GET_CALLER_PC(), f: (void (*)())f, arg: 0, dso: 0);
452}
453#endif
454
455TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) {
456 if (in_symbolizer())
457 return 0;
458 SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso);
459 return setup_at_exit_wrapper(thr, GET_CALLER_PC(), f: (void (*)())f, arg, dso);
460}
461
462static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
463 void *arg, void *dso) {
464 auto *ctx = New<AtExitCtx>();
465 ctx->f = f;
466 ctx->arg = arg;
467 ctx->pc = pc;
468 Release(thr, pc, addr: (uptr)ctx);
469 // Memory allocation in __cxa_atexit will race with free during exit,
470 // because we do not see synchronization around atexit callback list.
471 ThreadIgnoreBegin(thr, pc);
472 int res;
473 if (!dso) {
474 // NetBSD does not preserve the 2nd argument if dso is equal to 0
475 // Store ctx in a local stack-like structure
476
477 // Ensure thread-safety.
478 Lock l(&interceptor_ctx()->atexit_mu);
479 // __cxa_atexit calls calloc. If we don't ignore interceptors, we will fail
480 // due to atexit_mu held on exit from the calloc interceptor.
481 ScopedIgnoreInterceptors ignore;
482
483 res = REAL(__cxa_atexit)((void (*)(void *a))at_exit_callback_installed_at,
484 0, 0);
485 // Push AtExitCtx on the top of the stack of callback functions
486 if (!res) {
487 interceptor_ctx()->AtExitStack.PushBack(v: ctx);
488 }
489 } else {
490 res = REAL(__cxa_atexit)(cxa_at_exit_callback_installed_at, ctx, dso);
491 }
492 ThreadIgnoreEnd(thr);
493 return res;
494}
495
496#if !SANITIZER_APPLE && !SANITIZER_NETBSD
497static void on_exit_callback_installed_at(int status, void *arg) {
498 ThreadState *thr = cur_thread();
499 AtExitCtx *ctx = (AtExitCtx*)arg;
500 Acquire(thr, pc: ctx->pc, addr: (uptr)arg);
501 FuncEntry(thr, pc: ctx->pc);
502 ((void(*)(int status, void *arg))ctx->f)(status, ctx->arg);
503 FuncExit(thr);
504 Free(p&: ctx);
505}
506
507TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) {
508 if (in_symbolizer())
509 return 0;
510 SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg);
511 auto *ctx = New<AtExitCtx>();
512 ctx->f = (void(*)())f;
513 ctx->arg = arg;
514 ctx->pc = GET_CALLER_PC();
515 Release(thr, pc, addr: (uptr)ctx);
516 // Memory allocation in __cxa_atexit will race with free during exit,
517 // because we do not see synchronization around atexit callback list.
518 ThreadIgnoreBegin(thr, pc);
519 int res = REAL(on_exit)(on_exit_callback_installed_at, ctx);
520 ThreadIgnoreEnd(thr);
521 return res;
522}
523#define TSAN_MAYBE_INTERCEPT_ON_EXIT TSAN_INTERCEPT(on_exit)
524#else
525#define TSAN_MAYBE_INTERCEPT_ON_EXIT
526#endif
527
528// Cleanup old bufs.
529static void JmpBufGarbageCollect(ThreadState *thr, uptr sp) {
530 for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
531 JmpBuf *buf = &thr->jmp_bufs[i];
532 if (buf->sp <= sp) {
533 uptr sz = thr->jmp_bufs.Size();
534 internal_memcpy(dest: buf, src: &thr->jmp_bufs[sz - 1], n: sizeof(*buf));
535 thr->jmp_bufs.PopBack();
536 i--;
537 }
538 }
539}
540
541static void SetJmp(ThreadState *thr, uptr sp) {
542 if (!thr->is_inited) // called from libc guts during bootstrap
543 return;
544 // Cleanup old bufs.
545 JmpBufGarbageCollect(thr, sp);
546 // Remember the buf.
547 JmpBuf *buf = thr->jmp_bufs.PushBack();
548 buf->sp = sp;
549 buf->shadow_stack_pos = thr->shadow_stack_pos;
550 ThreadSignalContext *sctx = SigCtx(thr);
551 buf->int_signal_send = sctx ? sctx->int_signal_send : 0;
552 buf->in_blocking_func = atomic_load(a: &thr->in_blocking_func, mo: memory_order_relaxed);
553 buf->in_signal_handler = atomic_load(a: &thr->in_signal_handler,
554 mo: memory_order_relaxed);
555}
556
557static void LongJmp(ThreadState *thr, uptr *env) {
558 uptr sp = ExtractLongJmpSp(env);
559 // Find the saved buf with matching sp.
560 for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
561 JmpBuf *buf = &thr->jmp_bufs[i];
562 if (buf->sp == sp) {
563 CHECK_GE(thr->shadow_stack_pos, buf->shadow_stack_pos);
564 // Unwind the stack.
565 while (thr->shadow_stack_pos > buf->shadow_stack_pos)
566 FuncExit(thr);
567 ThreadSignalContext *sctx = SigCtx(thr);
568 if (sctx)
569 sctx->int_signal_send = buf->int_signal_send;
570 atomic_store(a: &thr->in_blocking_func, v: buf->in_blocking_func,
571 mo: memory_order_relaxed);
572 atomic_store(a: &thr->in_signal_handler, v: buf->in_signal_handler,
573 mo: memory_order_relaxed);
574 JmpBufGarbageCollect(thr, sp: buf->sp - 1); // do not collect buf->sp
575 return;
576 }
577 }
578 Printf(format: "ThreadSanitizer: can't find longjmp buf\n");
579 CHECK(0);
580}
581
582// FIXME: put everything below into a common extern "C" block?
583extern "C" void __tsan_setjmp(uptr sp) { SetJmp(thr: cur_thread_init(), sp); }
584
585#if SANITIZER_APPLE
586TSAN_INTERCEPTOR(int, setjmp, void *env);
587TSAN_INTERCEPTOR(int, _setjmp, void *env);
588TSAN_INTERCEPTOR(int, sigsetjmp, void *env);
589#else // SANITIZER_APPLE
590
591#if SANITIZER_NETBSD
592#define setjmp_symname __setjmp14
593#define sigsetjmp_symname __sigsetjmp14
594#else
595#define setjmp_symname setjmp
596#define sigsetjmp_symname sigsetjmp
597#endif
598
599DEFINE_REAL(int, setjmp_symname, void *env)
600DEFINE_REAL(int, _setjmp, void *env)
601DEFINE_REAL(int, sigsetjmp_symname, void *env)
602#if !SANITIZER_NETBSD
603DEFINE_REAL(int, __sigsetjmp, void *env)
604#endif
605
606// The real interceptor for setjmp is special, and implemented in pure asm. We
607// just need to initialize the REAL functions so that they can be used in asm.
608static void InitializeSetjmpInterceptors() {
609 // We can not use TSAN_INTERCEPT to get setjmp addr, because it does &setjmp and
610 // setjmp is not present in some versions of libc.
611 using __interception::InterceptFunction;
612 InterceptFunction(SANITIZER_STRINGIFY(setjmp_symname), ptr_to_real: (uptr*)&REAL(setjmp_symname), func: 0, trampoline: 0);
613 InterceptFunction(name: "_setjmp", ptr_to_real: (uptr*)&REAL(_setjmp), func: 0, trampoline: 0);
614 InterceptFunction(SANITIZER_STRINGIFY(sigsetjmp_symname), ptr_to_real: (uptr*)&REAL(sigsetjmp_symname), func: 0,
615 trampoline: 0);
616#if !SANITIZER_NETBSD
617 InterceptFunction(name: "__sigsetjmp", ptr_to_real: (uptr*)&REAL(__sigsetjmp), func: 0, trampoline: 0);
618#endif
619}
620#endif // SANITIZER_APPLE
621
622#if SANITIZER_NETBSD
623#define longjmp_symname __longjmp14
624#define siglongjmp_symname __siglongjmp14
625#else
626#define longjmp_symname longjmp
627#define siglongjmp_symname siglongjmp
628#endif
629
630TSAN_INTERCEPTOR(void, longjmp_symname, uptr *env, int val) {
631 // Note: if we call REAL(longjmp) in the context of ScopedInterceptor,
632 // bad things will happen. We will jump over ScopedInterceptor dtor and can
633 // leave thr->in_ignored_lib set.
634 {
635 SCOPED_INTERCEPTOR_RAW(longjmp_symname, env, val);
636 }
637 LongJmp(thr: cur_thread(), env);
638 REAL(longjmp_symname)(env, val);
639}
640
641TSAN_INTERCEPTOR(void, siglongjmp_symname, uptr *env, int val) {
642 {
643 SCOPED_INTERCEPTOR_RAW(siglongjmp_symname, env, val);
644 }
645 LongJmp(thr: cur_thread(), env);
646 REAL(siglongjmp_symname)(env, val);
647}
648
649#if SANITIZER_NETBSD
650TSAN_INTERCEPTOR(void, _longjmp, uptr *env, int val) {
651 {
652 SCOPED_INTERCEPTOR_RAW(_longjmp, env, val);
653 }
654 LongJmp(cur_thread(), env);
655 REAL(_longjmp)(env, val);
656}
657#endif
658
659#if !SANITIZER_APPLE
660TSAN_INTERCEPTOR(void*, malloc, uptr size) {
661 if (in_symbolizer())
662 return InternalAlloc(size);
663 void *p = 0;
664 {
665 SCOPED_INTERCEPTOR_RAW(malloc, size);
666 p = user_alloc(thr, pc, sz: size);
667 }
668 invoke_malloc_hook(ptr: p, size);
669 return p;
670}
671
672// In glibc<2.25, dynamic TLS blocks are allocated by __libc_memalign. Intercept
673// __libc_memalign so that (1) we can detect races (2) free will not be called
674// on libc internally allocated blocks.
675TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) {
676 SCOPED_INTERCEPTOR_RAW(__libc_memalign, align, sz);
677 return user_memalign(thr, pc, align, sz);
678}
679
680TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) {
681 if (in_symbolizer())
682 return InternalCalloc(count: size, size: n);
683 void *p = 0;
684 {
685 SCOPED_INTERCEPTOR_RAW(calloc, size, n);
686 p = user_calloc(thr, pc, sz: size, n);
687 }
688 invoke_malloc_hook(ptr: p, size: n * size);
689 return p;
690}
691
692TSAN_INTERCEPTOR(void*, realloc, void *p, uptr size) {
693 if (in_symbolizer())
694 return InternalRealloc(p, size);
695 if (p)
696 invoke_free_hook(ptr: p);
697 {
698 SCOPED_INTERCEPTOR_RAW(realloc, p, size);
699 p = user_realloc(thr, pc, p, sz: size);
700 }
701 invoke_malloc_hook(ptr: p, size);
702 return p;
703}
704
705TSAN_INTERCEPTOR(void*, reallocarray, void *p, uptr size, uptr n) {
706 if (in_symbolizer())
707 return InternalReallocArray(p, count: size, size: n);
708 if (p)
709 invoke_free_hook(ptr: p);
710 {
711 SCOPED_INTERCEPTOR_RAW(reallocarray, p, size, n);
712 p = user_reallocarray(thr, pc, p, sz: size, n);
713 }
714 invoke_malloc_hook(ptr: p, size);
715 return p;
716}
717
718TSAN_INTERCEPTOR(void, free, void *p) {
719 if (p == 0)
720 return;
721 if (in_symbolizer())
722 return InternalFree(p);
723 invoke_free_hook(ptr: p);
724 SCOPED_INTERCEPTOR_RAW(free, p);
725 user_free(thr, pc, p);
726}
727
728TSAN_INTERCEPTOR(void, cfree, void *p) {
729 if (p == 0)
730 return;
731 if (in_symbolizer())
732 return InternalFree(p);
733 invoke_free_hook(ptr: p);
734 SCOPED_INTERCEPTOR_RAW(cfree, p);
735 user_free(thr, pc, p);
736}
737
738TSAN_INTERCEPTOR(uptr, malloc_usable_size, void *p) {
739 SCOPED_INTERCEPTOR_RAW(malloc_usable_size, p);
740 return user_alloc_usable_size(p);
741}
742#endif
743
744TSAN_INTERCEPTOR(char *, strcpy, char *dst, const char *src) {
745 SCOPED_TSAN_INTERCEPTOR(strcpy, dst, src);
746 uptr srclen = internal_strlen(s: src);
747 MemoryAccessRange(thr, pc, addr: (uptr)dst, size: srclen + 1, is_write: true);
748 MemoryAccessRange(thr, pc, addr: (uptr)src, size: srclen + 1, is_write: false);
749 return REAL(strcpy)(dst, src);
750}
751
752TSAN_INTERCEPTOR(char*, strncpy, char *dst, char *src, uptr n) {
753 SCOPED_TSAN_INTERCEPTOR(strncpy, dst, src, n);
754 uptr srclen = internal_strnlen(s: src, maxlen: n);
755 MemoryAccessRange(thr, pc, addr: (uptr)dst, size: n, is_write: true);
756 MemoryAccessRange(thr, pc, addr: (uptr)src, size: min(a: srclen + 1, b: n), is_write: false);
757 return REAL(strncpy)(dst, src, n);
758}
759
760TSAN_INTERCEPTOR(char*, strdup, const char *str) {
761 SCOPED_TSAN_INTERCEPTOR(strdup, str);
762 // strdup will call malloc, so no instrumentation is required here.
763 return REAL(strdup)(str);
764}
765
766// Zero out addr if it points into shadow memory and was provided as a hint
767// only, i.e., MAP_FIXED is not set.
768static bool fix_mmap_addr(void **addr, long_t sz, int flags) {
769 if (*addr) {
770 if (!IsAppMem(mem: (uptr)*addr) || !IsAppMem(mem: (uptr)*addr + sz - 1)) {
771 if (flags & MAP_FIXED) {
772 errno = errno_EINVAL;
773 return false;
774 } else {
775 *addr = 0;
776 }
777 }
778 }
779 return true;
780}
781
782template <class Mmap>
783static void *mmap_interceptor(ThreadState *thr, uptr pc, Mmap real_mmap,
784 void *addr, SIZE_T sz, int prot, int flags,
785 int fd, OFF64_T off) {
786 if (!fix_mmap_addr(addr: &addr, sz, flags)) return MAP_FAILED;
787 void *res = real_mmap(addr, sz, prot, flags, fd, off);
788 if (res != MAP_FAILED) {
789 if (!IsAppMem(mem: (uptr)res) || !IsAppMem(mem: (uptr)res + sz - 1)) {
790 Report(format: "ThreadSanitizer: mmap at bad address: addr=%p size=%p res=%p\n",
791 addr, (void*)sz, res);
792 Die();
793 }
794 if (fd > 0) FdAccess(thr, pc, fd);
795 MemoryRangeImitateWriteOrResetRange(thr, pc, addr: (uptr)res, size: sz);
796 }
797 return res;
798}
799
800template <class Munmap>
801static int munmap_interceptor(ThreadState *thr, uptr pc, Munmap real_munmap,
802 void *addr, SIZE_T sz) {
803 UnmapShadow(thr, addr: (uptr)addr, size: sz);
804 int res = real_munmap(addr, sz);
805 return res;
806}
807
808#if SANITIZER_LINUX
809TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) {
810 SCOPED_INTERCEPTOR_RAW(memalign, align, sz);
811 return user_memalign(thr, pc, align, sz);
812}
813#define TSAN_MAYBE_INTERCEPT_MEMALIGN TSAN_INTERCEPT(memalign)
814#else
815#define TSAN_MAYBE_INTERCEPT_MEMALIGN
816#endif
817
818#if !SANITIZER_APPLE
819TSAN_INTERCEPTOR(void*, aligned_alloc, uptr align, uptr sz) {
820 if (in_symbolizer())
821 return InternalAlloc(size: sz, cache: nullptr, alignment: align);
822 SCOPED_INTERCEPTOR_RAW(aligned_alloc, align, sz);
823 return user_aligned_alloc(thr, pc, align, sz);
824}
825
826TSAN_INTERCEPTOR(void*, valloc, uptr sz) {
827 if (in_symbolizer())
828 return InternalAlloc(size: sz, cache: nullptr, alignment: GetPageSizeCached());
829 SCOPED_INTERCEPTOR_RAW(valloc, sz);
830 return user_valloc(thr, pc, sz);
831}
832#endif
833
834#if SANITIZER_LINUX
835TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) {
836 if (in_symbolizer()) {
837 uptr PageSize = GetPageSizeCached();
838 sz = sz ? RoundUpTo(size: sz, boundary: PageSize) : PageSize;
839 return InternalAlloc(size: sz, cache: nullptr, alignment: PageSize);
840 }
841 SCOPED_INTERCEPTOR_RAW(pvalloc, sz);
842 return user_pvalloc(thr, pc, sz);
843}
844#define TSAN_MAYBE_INTERCEPT_PVALLOC TSAN_INTERCEPT(pvalloc)
845#else
846#define TSAN_MAYBE_INTERCEPT_PVALLOC
847#endif
848
849#if !SANITIZER_APPLE
850TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
851 if (in_symbolizer()) {
852 void *p = InternalAlloc(size: sz, cache: nullptr, alignment: align);
853 if (!p)
854 return errno_ENOMEM;
855 *memptr = p;
856 return 0;
857 }
858 SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, align, sz);
859 return user_posix_memalign(thr, pc, memptr, align, sz);
860}
861#endif
862
863// Both __cxa_guard_acquire and pthread_once 0-initialize
864// the object initially. pthread_once does not have any
865// other ABI requirements. __cxa_guard_acquire assumes
866// that any non-0 value in the first byte means that
867// initialization is completed. Contents of the remaining
868// bytes are up to us.
869constexpr u32 kGuardInit = 0;
870constexpr u32 kGuardDone = 1;
871constexpr u32 kGuardRunning = 1 << 16;
872constexpr u32 kGuardWaiter = 1 << 17;
873
874static int guard_acquire(ThreadState *thr, uptr pc, atomic_uint32_t *g,
875 bool blocking_hooks = true) {
876 if (blocking_hooks)
877 OnPotentiallyBlockingRegionBegin();
878 auto on_exit = at_scope_exit(fn: [blocking_hooks] {
879 if (blocking_hooks)
880 OnPotentiallyBlockingRegionEnd();
881 });
882
883 for (;;) {
884 u32 cmp = atomic_load(a: g, mo: memory_order_acquire);
885 if (cmp == kGuardInit) {
886 if (atomic_compare_exchange_strong(a: g, cmp: &cmp, xchg: kGuardRunning,
887 mo: memory_order_relaxed))
888 return 1;
889 } else if (cmp == kGuardDone) {
890 if (!thr->in_ignored_lib)
891 Acquire(thr, pc, addr: (uptr)g);
892 return 0;
893 } else {
894 if ((cmp & kGuardWaiter) ||
895 atomic_compare_exchange_strong(a: g, cmp: &cmp, xchg: cmp | kGuardWaiter,
896 mo: memory_order_relaxed))
897 FutexWait(p: g, cmp: cmp | kGuardWaiter);
898 }
899 }
900}
901
902static void guard_release(ThreadState *thr, uptr pc, atomic_uint32_t *g,
903 u32 v) {
904 if (!thr->in_ignored_lib)
905 Release(thr, pc, addr: (uptr)g);
906 u32 old = atomic_exchange(a: g, v, mo: memory_order_release);
907 if (old & kGuardWaiter)
908 FutexWake(p: g, count: 1 << 30);
909}
910
911// __cxa_guard_acquire and friends need to be intercepted in a special way -
912// regular interceptors will break statically-linked libstdc++. Linux
913// interceptors are especially defined as weak functions (so that they don't
914// cause link errors when user defines them as well). So they silently
915// auto-disable themselves when such symbol is already present in the binary. If
916// we link libstdc++ statically, it will bring own __cxa_guard_acquire which
917// will silently replace our interceptor. That's why on Linux we simply export
918// these interceptors with INTERFACE_ATTRIBUTE.
919// On OS X, we don't support statically linking, so we just use a regular
920// interceptor.
921#if SANITIZER_APPLE
922#define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR
923#else
924#define STDCXX_INTERCEPTOR(rettype, name, ...) \
925 extern "C" rettype INTERFACE_ATTRIBUTE name(__VA_ARGS__)
926#endif
927
928// Used in thread-safe function static initialization.
929STDCXX_INTERCEPTOR(int, __cxa_guard_acquire, atomic_uint32_t *g) {
930 SCOPED_INTERCEPTOR_RAW(__cxa_guard_acquire, g);
931 return guard_acquire(thr, pc, g);
932}
933
934STDCXX_INTERCEPTOR(void, __cxa_guard_release, atomic_uint32_t *g) {
935 SCOPED_INTERCEPTOR_RAW(__cxa_guard_release, g);
936 guard_release(thr, pc, g, v: kGuardDone);
937}
938
939STDCXX_INTERCEPTOR(void, __cxa_guard_abort, atomic_uint32_t *g) {
940 SCOPED_INTERCEPTOR_RAW(__cxa_guard_abort, g);
941 guard_release(thr, pc, g, v: kGuardInit);
942}
943
944namespace __tsan {
945void DestroyThreadState() {
946 ThreadState *thr = cur_thread();
947 Processor *proc = thr->proc();
948 ThreadFinish(thr);
949 ProcUnwire(proc, thr);
950 ProcDestroy(proc);
951 DTLS_Destroy();
952 cur_thread_finalize();
953}
954
955void PlatformCleanUpThreadState(ThreadState *thr) {
956 ThreadSignalContext *sctx = (ThreadSignalContext *)atomic_load(
957 a: &thr->signal_ctx, mo: memory_order_relaxed);
958 if (sctx) {
959 atomic_store(a: &thr->signal_ctx, v: 0, mo: memory_order_relaxed);
960 UnmapOrDie(addr: sctx, size: sizeof(*sctx));
961 }
962}
963} // namespace __tsan
964
965#if !SANITIZER_APPLE && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
966static void thread_finalize(void *v) {
967 uptr iter = (uptr)v;
968 if (iter > 1) {
969 if (pthread_setspecific(key: interceptor_ctx()->finalize_key,
970 v: (void*)(iter - 1))) {
971 Printf(format: "ThreadSanitizer: failed to set thread key\n");
972 Die();
973 }
974 return;
975 }
976 DestroyThreadState();
977}
978#endif
979
980
981struct ThreadParam {
982 void* (*callback)(void *arg);
983 void *param;
984 Tid tid;
985 Semaphore created;
986 Semaphore started;
987};
988
989extern "C" void *__tsan_thread_start_func(void *arg) {
990 ThreadParam *p = (ThreadParam*)arg;
991 void* (*callback)(void *arg) = p->callback;
992 void *param = p->param;
993 {
994 ThreadState *thr = cur_thread_init();
995 // Thread-local state is not initialized yet.
996 ScopedIgnoreInterceptors ignore;
997#if !SANITIZER_APPLE && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
998 ThreadIgnoreBegin(thr, pc: 0);
999 if (pthread_setspecific(key: interceptor_ctx()->finalize_key,
1000 v: (void *)GetPthreadDestructorIterations())) {
1001 Printf(format: "ThreadSanitizer: failed to set thread key\n");
1002 Die();
1003 }
1004 ThreadIgnoreEnd(thr);
1005#endif
1006 p->created.Wait();
1007 Processor *proc = ProcCreate();
1008 ProcWire(proc, thr);
1009 ThreadStart(thr, tid: p->tid, os_id: GetTid(), thread_type: ThreadType::Regular);
1010 p->started.Post();
1011 }
1012 void *res = callback(param);
1013 // Prevent the callback from being tail called,
1014 // it mixes up stack traces.
1015 volatile int foo = 42;
1016 foo++;
1017 return res;
1018}
1019
1020TSAN_INTERCEPTOR(int, pthread_create,
1021 void *th, void *attr, void *(*callback)(void*), void * param) {
1022 SCOPED_INTERCEPTOR_RAW(pthread_create, th, attr, callback, param);
1023
1024 MaybeSpawnBackgroundThread();
1025
1026 if (ctx->after_multithreaded_fork) {
1027 if (flags()->die_after_fork) {
1028 Report(format: "ThreadSanitizer: starting new threads after multi-threaded "
1029 "fork is not supported. Dying (set die_after_fork=0 to override)\n");
1030 Die();
1031 } else {
1032 VPrintf(1,
1033 "ThreadSanitizer: starting new threads after multi-threaded "
1034 "fork is not supported (pid %lu). Continuing because of "
1035 "die_after_fork=0, but you are on your own\n",
1036 internal_getpid());
1037 }
1038 }
1039 __sanitizer_pthread_attr_t myattr;
1040 if (attr == 0) {
1041 pthread_attr_init(attr: &myattr);
1042 attr = &myattr;
1043 }
1044 int detached = 0;
1045 REAL(pthread_attr_getdetachstate)(attr, &detached);
1046 AdjustStackSize(attr);
1047
1048 ThreadParam p;
1049 p.callback = callback;
1050 p.param = param;
1051 p.tid = kMainTid;
1052 int res = -1;
1053 {
1054 // Otherwise we see false positives in pthread stack manipulation.
1055 ScopedIgnoreInterceptors ignore;
1056 ThreadIgnoreBegin(thr, pc);
1057 res = REAL(pthread_create)(th, attr, __tsan_thread_start_func, &p);
1058 ThreadIgnoreEnd(thr);
1059 }
1060 if (res == 0) {
1061 p.tid = ThreadCreate(thr, pc, uid: *(uptr *)th, detached: IsStateDetached(state: detached));
1062 CHECK_NE(p.tid, kMainTid);
1063 // Synchronization on p.tid serves two purposes:
1064 // 1. ThreadCreate must finish before the new thread starts.
1065 // Otherwise the new thread can call pthread_detach, but the pthread_t
1066 // identifier is not yet registered in ThreadRegistry by ThreadCreate.
1067 // 2. ThreadStart must finish before this thread continues.
1068 // Otherwise, this thread can call pthread_detach and reset thr->sync
1069 // before the new thread got a chance to acquire from it in ThreadStart.
1070 p.created.Post();
1071 p.started.Wait();
1072 }
1073 if (attr == &myattr)
1074 pthread_attr_destroy(attr: &myattr);
1075 return res;
1076}
1077
1078TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) {
1079 SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret);
1080 Tid tid = ThreadConsumeTid(thr, pc, uid: (uptr)th);
1081 ThreadIgnoreBegin(thr, pc);
1082 int res = BLOCK_REAL(pthread_join)(th, ret);
1083 ThreadIgnoreEnd(thr);
1084 if (res == 0) {
1085 ThreadJoin(thr, pc, tid);
1086 }
1087 return res;
1088}
1089
1090DEFINE_REAL_PTHREAD_FUNCTIONS
1091
1092TSAN_INTERCEPTOR(int, pthread_detach, void *th) {
1093 SCOPED_INTERCEPTOR_RAW(pthread_detach, th);
1094 Tid tid = ThreadConsumeTid(thr, pc, uid: (uptr)th);
1095 int res = REAL(pthread_detach)(th);
1096 if (res == 0) {
1097 ThreadDetach(thr, pc, tid);
1098 }
1099 return res;
1100}
1101
1102TSAN_INTERCEPTOR(void, pthread_exit, void *retval) {
1103 {
1104 SCOPED_INTERCEPTOR_RAW(pthread_exit, retval);
1105#if !SANITIZER_APPLE && !SANITIZER_ANDROID
1106 CHECK_EQ(thr, &cur_thread_placeholder);
1107#endif
1108 }
1109 REAL(pthread_exit)(retval);
1110}
1111
1112#if SANITIZER_LINUX
1113TSAN_INTERCEPTOR(int, pthread_tryjoin_np, void *th, void **ret) {
1114 SCOPED_INTERCEPTOR_RAW(pthread_tryjoin_np, th, ret);
1115 Tid tid = ThreadConsumeTid(thr, pc, uid: (uptr)th);
1116 ThreadIgnoreBegin(thr, pc);
1117 int res = REAL(pthread_tryjoin_np)(th, ret);
1118 ThreadIgnoreEnd(thr);
1119 if (res == 0)
1120 ThreadJoin(thr, pc, tid);
1121 else
1122 ThreadNotJoined(thr, pc, tid, uid: (uptr)th);
1123 return res;
1124}
1125
1126TSAN_INTERCEPTOR(int, pthread_timedjoin_np, void *th, void **ret,
1127 const struct timespec *abstime) {
1128 SCOPED_INTERCEPTOR_RAW(pthread_timedjoin_np, th, ret, abstime);
1129 Tid tid = ThreadConsumeTid(thr, pc, uid: (uptr)th);
1130 ThreadIgnoreBegin(thr, pc);
1131 int res = BLOCK_REAL(pthread_timedjoin_np)(th, ret, abstime);
1132 ThreadIgnoreEnd(thr);
1133 if (res == 0)
1134 ThreadJoin(thr, pc, tid);
1135 else
1136 ThreadNotJoined(thr, pc, tid, uid: (uptr)th);
1137 return res;
1138}
1139#endif
1140
1141// Problem:
1142// NPTL implementation of pthread_cond has 2 versions (2.2.5 and 2.3.2).
1143// pthread_cond_t has different size in the different versions.
1144// If call new REAL functions for old pthread_cond_t, they will corrupt memory
1145// after pthread_cond_t (old cond is smaller).
1146// If we call old REAL functions for new pthread_cond_t, we will lose some
1147// functionality (e.g. old functions do not support waiting against
1148// CLOCK_REALTIME).
1149// Proper handling would require to have 2 versions of interceptors as well.
1150// But this is messy, in particular requires linker scripts when sanitizer
1151// runtime is linked into a shared library.
1152// Instead we assume we don't have dynamic libraries built against old
1153// pthread (2.2.5 is dated by 2002). And provide legacy_pthread_cond flag
1154// that allows to work with old libraries (but this mode does not support
1155// some features, e.g. pthread_condattr_getpshared).
1156static void *init_cond(void *c, bool force = false) {
1157 // sizeof(pthread_cond_t) >= sizeof(uptr) in both versions.
1158 // So we allocate additional memory on the side large enough to hold
1159 // any pthread_cond_t object. Always call new REAL functions, but pass
1160 // the aux object to them.
1161 // Note: the code assumes that PTHREAD_COND_INITIALIZER initializes
1162 // first word of pthread_cond_t to zero.
1163 // It's all relevant only for linux.
1164 if (!common_flags()->legacy_pthread_cond)
1165 return c;
1166 atomic_uintptr_t *p = (atomic_uintptr_t*)c;
1167 uptr cond = atomic_load(a: p, mo: memory_order_acquire);
1168 if (!force && cond != 0)
1169 return (void*)cond;
1170 void *newcond = WRAP(malloc)(size: pthread_cond_t_sz);
1171 internal_memset(s: newcond, c: 0, n: pthread_cond_t_sz);
1172 if (atomic_compare_exchange_strong(a: p, cmp: &cond, xchg: (uptr)newcond,
1173 mo: memory_order_acq_rel))
1174 return newcond;
1175 WRAP(free)(p: newcond);
1176 return (void*)cond;
1177}
1178
1179namespace {
1180
1181template <class Fn>
1182struct CondMutexUnlockCtx {
1183 ScopedInterceptor *si;
1184 ThreadState *thr;
1185 uptr pc;
1186 void *m;
1187 void *c;
1188 const Fn &fn;
1189
1190 int Cancel() const { return fn(); }
1191 void Unlock() const;
1192};
1193
1194template <class Fn>
1195void CondMutexUnlockCtx<Fn>::Unlock() const {
1196 // pthread_cond_wait interceptor has enabled async signal delivery
1197 // (see BlockingCall below). Disable async signals since we are running
1198 // tsan code. Also ScopedInterceptor and BlockingCall destructors won't run
1199 // since the thread is cancelled, so we have to manually execute them
1200 // (the thread still can run some user code due to pthread_cleanup_push).
1201 CHECK_EQ(atomic_load(&thr->in_blocking_func, memory_order_relaxed), 1);
1202 atomic_store(a: &thr->in_blocking_func, v: 0, mo: memory_order_relaxed);
1203 MutexPostLock(thr, pc, addr: (uptr)m, flagz: MutexFlagDoPreLockOnPostLock);
1204 // Undo BlockingCall ctor effects.
1205 thr->ignore_interceptors--;
1206 si->~ScopedInterceptor();
1207}
1208} // namespace
1209
1210INTERCEPTOR(int, pthread_cond_init, void *c, void *a) {
1211 void *cond = init_cond(c, force: true);
1212 SCOPED_TSAN_INTERCEPTOR(pthread_cond_init, cond, a);
1213 MemoryAccessRange(thr, pc, addr: (uptr)c, size: sizeof(uptr), is_write: true);
1214 return REAL(pthread_cond_init)(cond, a);
1215}
1216
1217template <class Fn>
1218int cond_wait(ThreadState *thr, uptr pc, ScopedInterceptor *si, const Fn &fn,
1219 void *c, void *m) {
1220 MemoryAccessRange(thr, pc, addr: (uptr)c, size: sizeof(uptr), is_write: false);
1221 MutexUnlock(thr, pc, addr: (uptr)m);
1222 int res = 0;
1223 // This ensures that we handle mutex lock even in case of pthread_cancel.
1224 // See test/tsan/cond_cancel.cpp.
1225 {
1226 // Enable signal delivery while the thread is blocked.
1227 BlockingCall bc(thr);
1228 CondMutexUnlockCtx<Fn> arg = {si, thr, pc, m, c, fn};
1229 res = call_pthread_cancel_with_cleanup(
1230 [](void *arg) -> int {
1231 return ((const CondMutexUnlockCtx<Fn> *)arg)->Cancel();
1232 },
1233 [](void *arg) { ((const CondMutexUnlockCtx<Fn> *)arg)->Unlock(); },
1234 &arg);
1235 }
1236 if (res == errno_EOWNERDEAD) MutexRepair(thr, pc, addr: (uptr)m);
1237 MutexPostLock(thr, pc, addr: (uptr)m, flagz: MutexFlagDoPreLockOnPostLock);
1238 return res;
1239}
1240
1241INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
1242 void *cond = init_cond(c);
1243 SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m);
1244 return cond_wait(
1245 thr, pc, si: &si, fn: [=]() { return REAL(pthread_cond_wait)(cond, m); }, c: cond,
1246 m);
1247}
1248
1249INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) {
1250 void *cond = init_cond(c);
1251 SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, cond, m, abstime);
1252 return cond_wait(
1253 thr, pc, si: &si,
1254 fn: [=]() { return REAL(pthread_cond_timedwait)(cond, m, abstime); }, c: cond,
1255 m);
1256}
1257
1258#if SANITIZER_LINUX
1259INTERCEPTOR(int, pthread_cond_clockwait, void *c, void *m,
1260 __sanitizer_clockid_t clock, void *abstime) {
1261 void *cond = init_cond(c);
1262 SCOPED_TSAN_INTERCEPTOR(pthread_cond_clockwait, cond, m, clock, abstime);
1263 return cond_wait(
1264 thr, pc, si: &si,
1265 fn: [=]() { return REAL(pthread_cond_clockwait)(cond, m, clock, abstime); },
1266 c: cond, m);
1267}
1268#define TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT TSAN_INTERCEPT(pthread_cond_clockwait)
1269#else
1270#define TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT
1271#endif
1272
1273#if SANITIZER_APPLE
1274INTERCEPTOR(int, pthread_cond_timedwait_relative_np, void *c, void *m,
1275 void *reltime) {
1276 void *cond = init_cond(c);
1277 SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait_relative_np, cond, m, reltime);
1278 return cond_wait(
1279 thr, pc, &si,
1280 [=]() {
1281 return REAL(pthread_cond_timedwait_relative_np)(cond, m, reltime);
1282 },
1283 cond, m);
1284}
1285#endif
1286
1287INTERCEPTOR(int, pthread_cond_signal, void *c) {
1288 void *cond = init_cond(c);
1289 SCOPED_TSAN_INTERCEPTOR(pthread_cond_signal, cond);
1290 MemoryAccessRange(thr, pc, addr: (uptr)c, size: sizeof(uptr), is_write: false);
1291 return REAL(pthread_cond_signal)(cond);
1292}
1293
1294INTERCEPTOR(int, pthread_cond_broadcast, void *c) {
1295 void *cond = init_cond(c);
1296 SCOPED_TSAN_INTERCEPTOR(pthread_cond_broadcast, cond);
1297 MemoryAccessRange(thr, pc, addr: (uptr)c, size: sizeof(uptr), is_write: false);
1298 return REAL(pthread_cond_broadcast)(cond);
1299}
1300
1301INTERCEPTOR(int, pthread_cond_destroy, void *c) {
1302 void *cond = init_cond(c);
1303 SCOPED_TSAN_INTERCEPTOR(pthread_cond_destroy, cond);
1304 MemoryAccessRange(thr, pc, addr: (uptr)c, size: sizeof(uptr), is_write: true);
1305 int res = REAL(pthread_cond_destroy)(cond);
1306 if (common_flags()->legacy_pthread_cond) {
1307 // Free our aux cond and zero the pointer to not leave dangling pointers.
1308 WRAP(free)(p: cond);
1309 atomic_store(a: (atomic_uintptr_t*)c, v: 0, mo: memory_order_relaxed);
1310 }
1311 return res;
1312}
1313
1314TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) {
1315 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_init, m, a);
1316 int res = REAL(pthread_mutex_init)(m, a);
1317 if (res == 0) {
1318 u32 flagz = 0;
1319 if (a) {
1320 int type = 0;
1321 if (REAL(pthread_mutexattr_gettype)(a, &type) == 0)
1322 if (type == PTHREAD_MUTEX_RECURSIVE ||
1323 type == PTHREAD_MUTEX_RECURSIVE_NP)
1324 flagz |= MutexFlagWriteReentrant;
1325 }
1326 MutexCreate(thr, pc, addr: (uptr)m, flagz);
1327 }
1328 return res;
1329}
1330
1331TSAN_INTERCEPTOR(int, pthread_mutex_destroy, void *m) {
1332 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_destroy, m);
1333 int res = REAL(pthread_mutex_destroy)(m);
1334 if (res == 0 || res == errno_EBUSY) {
1335 MutexDestroy(thr, pc, addr: (uptr)m);
1336 }
1337 return res;
1338}
1339
1340TSAN_INTERCEPTOR(int, pthread_mutex_lock, void *m) {
1341 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_lock, m);
1342 MutexPreLock(thr, pc, addr: (uptr)m);
1343 int res = REAL(pthread_mutex_lock)(m);
1344 if (res == errno_EOWNERDEAD)
1345 MutexRepair(thr, pc, addr: (uptr)m);
1346 if (res == 0 || res == errno_EOWNERDEAD)
1347 MutexPostLock(thr, pc, addr: (uptr)m);
1348 if (res == errno_EINVAL)
1349 MutexInvalidAccess(thr, pc, addr: (uptr)m);
1350 return res;
1351}
1352
1353TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) {
1354 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_trylock, m);
1355 int res = REAL(pthread_mutex_trylock)(m);
1356 if (res == errno_EOWNERDEAD)
1357 MutexRepair(thr, pc, addr: (uptr)m);
1358 if (res == 0 || res == errno_EOWNERDEAD)
1359 MutexPostLock(thr, pc, addr: (uptr)m, flagz: MutexFlagTryLock);
1360 return res;
1361}
1362
1363#if !SANITIZER_APPLE
1364TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) {
1365 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_timedlock, m, abstime);
1366 int res = REAL(pthread_mutex_timedlock)(m, abstime);
1367 if (res == 0) {
1368 MutexPostLock(thr, pc, addr: (uptr)m, flagz: MutexFlagTryLock);
1369 }
1370 return res;
1371}
1372#endif
1373
1374TSAN_INTERCEPTOR(int, pthread_mutex_unlock, void *m) {
1375 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_unlock, m);
1376 MutexUnlock(thr, pc, addr: (uptr)m);
1377 int res = REAL(pthread_mutex_unlock)(m);
1378 if (res == errno_EINVAL)
1379 MutexInvalidAccess(thr, pc, addr: (uptr)m);
1380 return res;
1381}
1382
1383#if SANITIZER_LINUX
1384TSAN_INTERCEPTOR(int, pthread_mutex_clocklock, void *m,
1385 __sanitizer_clockid_t clock, void *abstime) {
1386 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_clocklock, m, clock, abstime);
1387 MutexPreLock(thr, pc, addr: (uptr)m);
1388 int res = REAL(pthread_mutex_clocklock)(m, clock, abstime);
1389 if (res == errno_EOWNERDEAD)
1390 MutexRepair(thr, pc, addr: (uptr)m);
1391 if (res == 0 || res == errno_EOWNERDEAD)
1392 MutexPostLock(thr, pc, addr: (uptr)m);
1393 if (res == errno_EINVAL)
1394 MutexInvalidAccess(thr, pc, addr: (uptr)m);
1395 return res;
1396}
1397#endif
1398
1399#if SANITIZER_GLIBC
1400# if !__GLIBC_PREREQ(2, 34)
1401// glibc 2.34 applies a non-default version for the two functions. They are no
1402// longer expected to be intercepted by programs.
1403TSAN_INTERCEPTOR(int, __pthread_mutex_lock, void *m) {
1404 SCOPED_TSAN_INTERCEPTOR(__pthread_mutex_lock, m);
1405 MutexPreLock(thr, pc, (uptr)m);
1406 int res = REAL(__pthread_mutex_lock)(m);
1407 if (res == errno_EOWNERDEAD)
1408 MutexRepair(thr, pc, (uptr)m);
1409 if (res == 0 || res == errno_EOWNERDEAD)
1410 MutexPostLock(thr, pc, (uptr)m);
1411 if (res == errno_EINVAL)
1412 MutexInvalidAccess(thr, pc, (uptr)m);
1413 return res;
1414}
1415
1416TSAN_INTERCEPTOR(int, __pthread_mutex_unlock, void *m) {
1417 SCOPED_TSAN_INTERCEPTOR(__pthread_mutex_unlock, m);
1418 MutexUnlock(thr, pc, (uptr)m);
1419 int res = REAL(__pthread_mutex_unlock)(m);
1420 if (res == errno_EINVAL)
1421 MutexInvalidAccess(thr, pc, (uptr)m);
1422 return res;
1423}
1424# endif
1425#endif
1426
1427#if !SANITIZER_APPLE
1428TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) {
1429 SCOPED_TSAN_INTERCEPTOR(pthread_spin_init, m, pshared);
1430 int res = REAL(pthread_spin_init)(m, pshared);
1431 if (res == 0) {
1432 MutexCreate(thr, pc, addr: (uptr)m);
1433 }
1434 return res;
1435}
1436
1437TSAN_INTERCEPTOR(int, pthread_spin_destroy, void *m) {
1438 SCOPED_TSAN_INTERCEPTOR(pthread_spin_destroy, m);
1439 int res = REAL(pthread_spin_destroy)(m);
1440 if (res == 0) {
1441 MutexDestroy(thr, pc, addr: (uptr)m);
1442 }
1443 return res;
1444}
1445
1446TSAN_INTERCEPTOR(int, pthread_spin_lock, void *m) {
1447 SCOPED_TSAN_INTERCEPTOR(pthread_spin_lock, m);
1448 MutexPreLock(thr, pc, addr: (uptr)m);
1449 int res = REAL(pthread_spin_lock)(m);
1450 if (res == 0) {
1451 MutexPostLock(thr, pc, addr: (uptr)m);
1452 }
1453 return res;
1454}
1455
1456TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) {
1457 SCOPED_TSAN_INTERCEPTOR(pthread_spin_trylock, m);
1458 int res = REAL(pthread_spin_trylock)(m);
1459 if (res == 0) {
1460 MutexPostLock(thr, pc, addr: (uptr)m, flagz: MutexFlagTryLock);
1461 }
1462 return res;
1463}
1464
1465TSAN_INTERCEPTOR(int, pthread_spin_unlock, void *m) {
1466 SCOPED_TSAN_INTERCEPTOR(pthread_spin_unlock, m);
1467 MutexUnlock(thr, pc, addr: (uptr)m);
1468 int res = REAL(pthread_spin_unlock)(m);
1469 return res;
1470}
1471#endif
1472
1473TSAN_INTERCEPTOR(int, pthread_rwlock_init, void *m, void *a) {
1474 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_init, m, a);
1475 int res = REAL(pthread_rwlock_init)(m, a);
1476 if (res == 0) {
1477 MutexCreate(thr, pc, addr: (uptr)m);
1478 }
1479 return res;
1480}
1481
1482TSAN_INTERCEPTOR(int, pthread_rwlock_destroy, void *m) {
1483 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_destroy, m);
1484 int res = REAL(pthread_rwlock_destroy)(m);
1485 if (res == 0) {
1486 MutexDestroy(thr, pc, addr: (uptr)m);
1487 }
1488 return res;
1489}
1490
1491TSAN_INTERCEPTOR(int, pthread_rwlock_rdlock, void *m) {
1492 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_rdlock, m);
1493 MutexPreReadLock(thr, pc, addr: (uptr)m);
1494 int res = REAL(pthread_rwlock_rdlock)(m);
1495 if (res == 0) {
1496 MutexPostReadLock(thr, pc, addr: (uptr)m);
1497 }
1498 return res;
1499}
1500
1501TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) {
1502 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_tryrdlock, m);
1503 int res = REAL(pthread_rwlock_tryrdlock)(m);
1504 if (res == 0) {
1505 MutexPostReadLock(thr, pc, addr: (uptr)m, flagz: MutexFlagTryLock);
1506 }
1507 return res;
1508}
1509
1510#if !SANITIZER_APPLE
1511TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) {
1512 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedrdlock, m, abstime);
1513 int res = REAL(pthread_rwlock_timedrdlock)(m, abstime);
1514 if (res == 0) {
1515 MutexPostReadLock(thr, pc, addr: (uptr)m);
1516 }
1517 return res;
1518}
1519#endif
1520
1521TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) {
1522 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_wrlock, m);
1523 MutexPreLock(thr, pc, addr: (uptr)m);
1524 int res = REAL(pthread_rwlock_wrlock)(m);
1525 if (res == 0) {
1526 MutexPostLock(thr, pc, addr: (uptr)m);
1527 }
1528 return res;
1529}
1530
1531TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) {
1532 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_trywrlock, m);
1533 int res = REAL(pthread_rwlock_trywrlock)(m);
1534 if (res == 0) {
1535 MutexPostLock(thr, pc, addr: (uptr)m, flagz: MutexFlagTryLock);
1536 }
1537 return res;
1538}
1539
1540#if !SANITIZER_APPLE
1541TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) {
1542 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedwrlock, m, abstime);
1543 int res = REAL(pthread_rwlock_timedwrlock)(m, abstime);
1544 if (res == 0) {
1545 MutexPostLock(thr, pc, addr: (uptr)m, flagz: MutexFlagTryLock);
1546 }
1547 return res;
1548}
1549#endif
1550
1551TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) {
1552 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_unlock, m);
1553 MutexReadOrWriteUnlock(thr, pc, addr: (uptr)m);
1554 int res = REAL(pthread_rwlock_unlock)(m);
1555 return res;
1556}
1557
1558#if !SANITIZER_APPLE
1559TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) {
1560 SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count);
1561 MemoryAccess(thr, pc, addr: (uptr)b, size: 1, typ: kAccessWrite);
1562 int res = REAL(pthread_barrier_init)(b, a, count);
1563 return res;
1564}
1565
1566TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) {
1567 SCOPED_TSAN_INTERCEPTOR(pthread_barrier_destroy, b);
1568 MemoryAccess(thr, pc, addr: (uptr)b, size: 1, typ: kAccessWrite);
1569 int res = REAL(pthread_barrier_destroy)(b);
1570 return res;
1571}
1572
1573TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) {
1574 SCOPED_TSAN_INTERCEPTOR(pthread_barrier_wait, b);
1575 Release(thr, pc, addr: (uptr)b);
1576 MemoryAccess(thr, pc, addr: (uptr)b, size: 1, typ: kAccessRead);
1577 int res = REAL(pthread_barrier_wait)(b);
1578 MemoryAccess(thr, pc, addr: (uptr)b, size: 1, typ: kAccessRead);
1579 if (res == 0 || res == PTHREAD_BARRIER_SERIAL_THREAD) {
1580 Acquire(thr, pc, addr: (uptr)b);
1581 }
1582 return res;
1583}
1584#endif
1585
1586TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) {
1587 SCOPED_INTERCEPTOR_RAW(pthread_once, o, f);
1588 if (o == 0 || f == 0)
1589 return errno_EINVAL;
1590 atomic_uint32_t *a;
1591
1592 if (SANITIZER_APPLE)
1593 a = static_cast<atomic_uint32_t*>((void *)((char *)o + sizeof(long_t)));
1594 else if (SANITIZER_NETBSD)
1595 a = static_cast<atomic_uint32_t*>
1596 ((void *)((char *)o + __sanitizer::pthread_mutex_t_sz));
1597 else
1598 a = static_cast<atomic_uint32_t*>(o);
1599
1600 // Mac OS X appears to use pthread_once() where calling BlockingRegion hooks
1601 // result in crashes due to too little stack space.
1602 if (guard_acquire(thr, pc, g: a, blocking_hooks: !SANITIZER_APPLE)) {
1603 (*f)();
1604 guard_release(thr, pc, g: a, v: kGuardDone);
1605 }
1606 return 0;
1607}
1608
1609#if SANITIZER_GLIBC
1610TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) {
1611 SCOPED_TSAN_INTERCEPTOR(__fxstat, version, fd, buf);
1612 if (fd > 0)
1613 FdAccess(thr, pc, fd);
1614 return REAL(__fxstat)(version, fd, buf);
1615}
1616#define TSAN_MAYBE_INTERCEPT___FXSTAT TSAN_INTERCEPT(__fxstat)
1617#else
1618#define TSAN_MAYBE_INTERCEPT___FXSTAT
1619#endif
1620
1621TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) {
1622#if SANITIZER_GLIBC
1623 SCOPED_TSAN_INTERCEPTOR(__fxstat, 0, fd, buf);
1624 if (fd > 0)
1625 FdAccess(thr, pc, fd);
1626 return REAL(__fxstat)(0, fd, buf);
1627#else
1628 SCOPED_TSAN_INTERCEPTOR(fstat, fd, buf);
1629 if (fd > 0)
1630 FdAccess(thr, pc, fd);
1631 return REAL(fstat)(fd, buf);
1632#endif
1633}
1634
1635#if SANITIZER_GLIBC
1636TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) {
1637 SCOPED_TSAN_INTERCEPTOR(__fxstat64, version, fd, buf);
1638 if (fd > 0)
1639 FdAccess(thr, pc, fd);
1640 return REAL(__fxstat64)(version, fd, buf);
1641}
1642#define TSAN_MAYBE_INTERCEPT___FXSTAT64 TSAN_INTERCEPT(__fxstat64)
1643#else
1644#define TSAN_MAYBE_INTERCEPT___FXSTAT64
1645#endif
1646
1647#if SANITIZER_GLIBC
1648TSAN_INTERCEPTOR(int, fstat64, int fd, void *buf) {
1649 SCOPED_TSAN_INTERCEPTOR(__fxstat64, 0, fd, buf);
1650 if (fd > 0)
1651 FdAccess(thr, pc, fd);
1652 return REAL(__fxstat64)(0, fd, buf);
1653}
1654#define TSAN_MAYBE_INTERCEPT_FSTAT64 TSAN_INTERCEPT(fstat64)
1655#else
1656#define TSAN_MAYBE_INTERCEPT_FSTAT64
1657#endif
1658
1659TSAN_INTERCEPTOR(int, open, const char *name, int oflag, ...) {
1660 va_list ap;
1661 va_start(ap, oflag);
1662 mode_t mode = va_arg(ap, int);
1663 va_end(ap);
1664 SCOPED_TSAN_INTERCEPTOR(open, name, oflag, mode);
1665 READ_STRING(thr, pc, name, 0);
1666 int fd = REAL(open)(name, oflag, mode);
1667 if (fd >= 0)
1668 FdFileCreate(thr, pc, fd);
1669 return fd;
1670}
1671
1672#if SANITIZER_LINUX
1673TSAN_INTERCEPTOR(int, open64, const char *name, int oflag, ...) {
1674 va_list ap;
1675 va_start(ap, oflag);
1676 mode_t mode = va_arg(ap, int);
1677 va_end(ap);
1678 SCOPED_TSAN_INTERCEPTOR(open64, name, oflag, mode);
1679 READ_STRING(thr, pc, name, 0);
1680 int fd = REAL(open64)(name, oflag, mode);
1681 if (fd >= 0)
1682 FdFileCreate(thr, pc, fd);
1683 return fd;
1684}
1685#define TSAN_MAYBE_INTERCEPT_OPEN64 TSAN_INTERCEPT(open64)
1686#else
1687#define TSAN_MAYBE_INTERCEPT_OPEN64
1688#endif
1689
1690TSAN_INTERCEPTOR(int, creat, const char *name, int mode) {
1691 SCOPED_TSAN_INTERCEPTOR(creat, name, mode);
1692 READ_STRING(thr, pc, name, 0);
1693 int fd = REAL(creat)(name, mode);
1694 if (fd >= 0)
1695 FdFileCreate(thr, pc, fd);
1696 return fd;
1697}
1698
1699#if SANITIZER_LINUX
1700TSAN_INTERCEPTOR(int, creat64, const char *name, int mode) {
1701 SCOPED_TSAN_INTERCEPTOR(creat64, name, mode);
1702 READ_STRING(thr, pc, name, 0);
1703 int fd = REAL(creat64)(name, mode);
1704 if (fd >= 0)
1705 FdFileCreate(thr, pc, fd);
1706 return fd;
1707}
1708#define TSAN_MAYBE_INTERCEPT_CREAT64 TSAN_INTERCEPT(creat64)
1709#else
1710#define TSAN_MAYBE_INTERCEPT_CREAT64
1711#endif
1712
1713TSAN_INTERCEPTOR(int, dup, int oldfd) {
1714 SCOPED_TSAN_INTERCEPTOR(dup, oldfd);
1715 int newfd = REAL(dup)(oldfd);
1716 if (oldfd >= 0 && newfd >= 0 && newfd != oldfd)
1717 FdDup(thr, pc, oldfd, newfd, write: true);
1718 return newfd;
1719}
1720
1721TSAN_INTERCEPTOR(int, dup2, int oldfd, int newfd) {
1722 SCOPED_TSAN_INTERCEPTOR(dup2, oldfd, newfd);
1723 int newfd2 = REAL(dup2)(oldfd, newfd);
1724 if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
1725 FdDup(thr, pc, oldfd, newfd: newfd2, write: false);
1726 return newfd2;
1727}
1728
1729#if !SANITIZER_APPLE
1730TSAN_INTERCEPTOR(int, dup3, int oldfd, int newfd, int flags) {
1731 SCOPED_TSAN_INTERCEPTOR(dup3, oldfd, newfd, flags);
1732 int newfd2 = REAL(dup3)(oldfd, newfd, flags);
1733 if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
1734 FdDup(thr, pc, oldfd, newfd: newfd2, write: false);
1735 return newfd2;
1736}
1737#endif
1738
1739#if SANITIZER_LINUX
1740TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) {
1741 SCOPED_TSAN_INTERCEPTOR(eventfd, initval, flags);
1742 int fd = REAL(eventfd)(initval, flags);
1743 if (fd >= 0)
1744 FdEventCreate(thr, pc, fd);
1745 return fd;
1746}
1747#define TSAN_MAYBE_INTERCEPT_EVENTFD TSAN_INTERCEPT(eventfd)
1748#else
1749#define TSAN_MAYBE_INTERCEPT_EVENTFD
1750#endif
1751
1752#if SANITIZER_LINUX
1753TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) {
1754 SCOPED_INTERCEPTOR_RAW(signalfd, fd, mask, flags);
1755 FdClose(thr, pc, fd);
1756 fd = REAL(signalfd)(fd, mask, flags);
1757 if (!MustIgnoreInterceptor(thr))
1758 FdSignalCreate(thr, pc, fd);
1759 return fd;
1760}
1761#define TSAN_MAYBE_INTERCEPT_SIGNALFD TSAN_INTERCEPT(signalfd)
1762#else
1763#define TSAN_MAYBE_INTERCEPT_SIGNALFD
1764#endif
1765
1766#if SANITIZER_LINUX
1767TSAN_INTERCEPTOR(int, inotify_init, int fake) {
1768 SCOPED_TSAN_INTERCEPTOR(inotify_init, fake);
1769 int fd = REAL(inotify_init)(fake);
1770 if (fd >= 0)
1771 FdInotifyCreate(thr, pc, fd);
1772 return fd;
1773}
1774#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT TSAN_INTERCEPT(inotify_init)
1775#else
1776#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT
1777#endif
1778
1779#if SANITIZER_LINUX
1780TSAN_INTERCEPTOR(int, inotify_init1, int flags) {
1781 SCOPED_TSAN_INTERCEPTOR(inotify_init1, flags);
1782 int fd = REAL(inotify_init1)(flags);
1783 if (fd >= 0)
1784 FdInotifyCreate(thr, pc, fd);
1785 return fd;
1786}
1787#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1 TSAN_INTERCEPT(inotify_init1)
1788#else
1789#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1
1790#endif
1791
1792TSAN_INTERCEPTOR(int, socket, int domain, int type, int protocol) {
1793 SCOPED_TSAN_INTERCEPTOR(socket, domain, type, protocol);
1794 int fd = REAL(socket)(domain, type, protocol);
1795 if (fd >= 0)
1796 FdSocketCreate(thr, pc, fd);
1797 return fd;
1798}
1799
1800TSAN_INTERCEPTOR(int, socketpair, int domain, int type, int protocol, int *fd) {
1801 SCOPED_TSAN_INTERCEPTOR(socketpair, domain, type, protocol, fd);
1802 int res = REAL(socketpair)(domain, type, protocol, fd);
1803 if (res == 0 && fd[0] >= 0 && fd[1] >= 0)
1804 FdPipeCreate(thr, pc, rfd: fd[0], wfd: fd[1]);
1805 return res;
1806}
1807
1808TSAN_INTERCEPTOR(int, connect, int fd, void *addr, unsigned addrlen) {
1809 SCOPED_TSAN_INTERCEPTOR(connect, fd, addr, addrlen);
1810 FdSocketConnecting(thr, pc, fd);
1811 int res = REAL(connect)(fd, addr, addrlen);
1812 if (res == 0 && fd >= 0)
1813 FdSocketConnect(thr, pc, fd);
1814 return res;
1815}
1816
1817TSAN_INTERCEPTOR(int, bind, int fd, void *addr, unsigned addrlen) {
1818 SCOPED_TSAN_INTERCEPTOR(bind, fd, addr, addrlen);
1819 int res = REAL(bind)(fd, addr, addrlen);
1820 if (fd > 0 && res == 0)
1821 FdAccess(thr, pc, fd);
1822 return res;
1823}
1824
1825TSAN_INTERCEPTOR(int, listen, int fd, int backlog) {
1826 SCOPED_TSAN_INTERCEPTOR(listen, fd, backlog);
1827 int res = REAL(listen)(fd, backlog);
1828 if (fd > 0 && res == 0)
1829 FdAccess(thr, pc, fd);
1830 return res;
1831}
1832
1833TSAN_INTERCEPTOR(int, close, int fd) {
1834 SCOPED_INTERCEPTOR_RAW(close, fd);
1835 if (!in_symbolizer())
1836 FdClose(thr, pc, fd);
1837 return REAL(close)(fd);
1838}
1839
1840#if SANITIZER_LINUX
1841TSAN_INTERCEPTOR(int, __close, int fd) {
1842 SCOPED_INTERCEPTOR_RAW(__close, fd);
1843 FdClose(thr, pc, fd);
1844 return REAL(__close)(fd);
1845}
1846#define TSAN_MAYBE_INTERCEPT___CLOSE TSAN_INTERCEPT(__close)
1847#else
1848#define TSAN_MAYBE_INTERCEPT___CLOSE
1849#endif
1850
1851// glibc guts
1852#if SANITIZER_LINUX && !SANITIZER_ANDROID
1853TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) {
1854 SCOPED_INTERCEPTOR_RAW(__res_iclose, state, free_addr);
1855 int fds[64];
1856 int cnt = ExtractResolvFDs(state, fds, ARRAY_SIZE(fds));
1857 for (int i = 0; i < cnt; i++) FdClose(thr, pc, fd: fds[i]);
1858 REAL(__res_iclose)(state, free_addr);
1859}
1860#define TSAN_MAYBE_INTERCEPT___RES_ICLOSE TSAN_INTERCEPT(__res_iclose)
1861#else
1862#define TSAN_MAYBE_INTERCEPT___RES_ICLOSE
1863#endif
1864
1865TSAN_INTERCEPTOR(int, pipe, int *pipefd) {
1866 SCOPED_TSAN_INTERCEPTOR(pipe, pipefd);
1867 int res = REAL(pipe)(pipefd);
1868 if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
1869 FdPipeCreate(thr, pc, rfd: pipefd[0], wfd: pipefd[1]);
1870 return res;
1871}
1872
1873#if !SANITIZER_APPLE
1874TSAN_INTERCEPTOR(int, pipe2, int *pipefd, int flags) {
1875 SCOPED_TSAN_INTERCEPTOR(pipe2, pipefd, flags);
1876 int res = REAL(pipe2)(pipefd, flags);
1877 if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
1878 FdPipeCreate(thr, pc, rfd: pipefd[0], wfd: pipefd[1]);
1879 return res;
1880}
1881#endif
1882
1883TSAN_INTERCEPTOR(int, unlink, char *path) {
1884 SCOPED_TSAN_INTERCEPTOR(unlink, path);
1885 Release(thr, pc, addr: File2addr(path));
1886 int res = REAL(unlink)(path);
1887 return res;
1888}
1889
1890TSAN_INTERCEPTOR(void*, tmpfile, int fake) {
1891 SCOPED_TSAN_INTERCEPTOR(tmpfile, fake);
1892 void *res = REAL(tmpfile)(fake);
1893 if (res) {
1894 int fd = fileno_unlocked(stream: res);
1895 if (fd >= 0)
1896 FdFileCreate(thr, pc, fd);
1897 }
1898 return res;
1899}
1900
1901#if SANITIZER_LINUX
1902TSAN_INTERCEPTOR(void*, tmpfile64, int fake) {
1903 SCOPED_TSAN_INTERCEPTOR(tmpfile64, fake);
1904 void *res = REAL(tmpfile64)(fake);
1905 if (res) {
1906 int fd = fileno_unlocked(stream: res);
1907 if (fd >= 0)
1908 FdFileCreate(thr, pc, fd);
1909 }
1910 return res;
1911}
1912#define TSAN_MAYBE_INTERCEPT_TMPFILE64 TSAN_INTERCEPT(tmpfile64)
1913#else
1914#define TSAN_MAYBE_INTERCEPT_TMPFILE64
1915#endif
1916
1917static void FlushStreams() {
1918 // Flushing all the streams here may freeze the process if a child thread is
1919 // performing file stream operations at the same time.
1920 REAL(fflush)(stdout);
1921 REAL(fflush)(stderr);
1922}
1923
1924TSAN_INTERCEPTOR(void, abort, int fake) {
1925 SCOPED_TSAN_INTERCEPTOR(abort, fake);
1926 FlushStreams();
1927 REAL(abort)(fake);
1928}
1929
1930TSAN_INTERCEPTOR(int, rmdir, char *path) {
1931 SCOPED_TSAN_INTERCEPTOR(rmdir, path);
1932 Release(thr, pc, addr: Dir2addr(path));
1933 int res = REAL(rmdir)(path);
1934 return res;
1935}
1936
1937TSAN_INTERCEPTOR(int, closedir, void *dirp) {
1938 SCOPED_INTERCEPTOR_RAW(closedir, dirp);
1939 if (dirp) {
1940 int fd = dirfd(dirp);
1941 FdClose(thr, pc, fd);
1942 }
1943 return REAL(closedir)(dirp);
1944}
1945
1946#if SANITIZER_LINUX
1947TSAN_INTERCEPTOR(int, epoll_create, int size) {
1948 SCOPED_TSAN_INTERCEPTOR(epoll_create, size);
1949 int fd = REAL(epoll_create)(size);
1950 if (fd >= 0)
1951 FdPollCreate(thr, pc, fd);
1952 return fd;
1953}
1954
1955TSAN_INTERCEPTOR(int, epoll_create1, int flags) {
1956 SCOPED_TSAN_INTERCEPTOR(epoll_create1, flags);
1957 int fd = REAL(epoll_create1)(flags);
1958 if (fd >= 0)
1959 FdPollCreate(thr, pc, fd);
1960 return fd;
1961}
1962
1963TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) {
1964 SCOPED_TSAN_INTERCEPTOR(epoll_ctl, epfd, op, fd, ev);
1965 if (epfd >= 0)
1966 FdAccess(thr, pc, fd: epfd);
1967 if (epfd >= 0 && fd >= 0)
1968 FdAccess(thr, pc, fd);
1969 if (op == EPOLL_CTL_ADD && epfd >= 0) {
1970 FdPollAdd(thr, pc, epfd, fd);
1971 FdRelease(thr, pc, fd: epfd);
1972 }
1973 int res = REAL(epoll_ctl)(epfd, op, fd, ev);
1974 return res;
1975}
1976
1977TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) {
1978 SCOPED_TSAN_INTERCEPTOR(epoll_wait, epfd, ev, cnt, timeout);
1979 if (epfd >= 0)
1980 FdAccess(thr, pc, fd: epfd);
1981 int res = BLOCK_REAL(epoll_wait)(epfd, ev, cnt, timeout);
1982 if (res > 0 && epfd >= 0)
1983 FdAcquire(thr, pc, fd: epfd);
1984 return res;
1985}
1986
1987TSAN_INTERCEPTOR(int, epoll_pwait, int epfd, void *ev, int cnt, int timeout,
1988 void *sigmask) {
1989 SCOPED_TSAN_INTERCEPTOR(epoll_pwait, epfd, ev, cnt, timeout, sigmask);
1990 if (epfd >= 0)
1991 FdAccess(thr, pc, fd: epfd);
1992 int res = BLOCK_REAL(epoll_pwait)(epfd, ev, cnt, timeout, sigmask);
1993 if (res > 0 && epfd >= 0)
1994 FdAcquire(thr, pc, fd: epfd);
1995 return res;
1996}
1997
1998TSAN_INTERCEPTOR(int, epoll_pwait2, int epfd, void *ev, int cnt, void *timeout,
1999 void *sigmask) {
2000 SCOPED_INTERCEPTOR_RAW(epoll_pwait2, epfd, ev, cnt, timeout, sigmask);
2001 // This function is new and may not be present in libc and/or kernel.
2002 // Since we effectively add it to libc (as will be probed by the program
2003 // using dlsym or a weak function pointer) we need to handle the case
2004 // when it's not present in the actual libc.
2005 if (!REAL(epoll_pwait2)) {
2006 errno = errno_ENOSYS;
2007 return -1;
2008 }
2009 if (MustIgnoreInterceptor(thr))
2010 REAL(epoll_pwait2)(epfd, ev, cnt, timeout, sigmask);
2011 if (epfd >= 0)
2012 FdAccess(thr, pc, fd: epfd);
2013 int res = BLOCK_REAL(epoll_pwait2)(epfd, ev, cnt, timeout, sigmask);
2014 if (res > 0 && epfd >= 0)
2015 FdAcquire(thr, pc, fd: epfd);
2016 return res;
2017}
2018
2019# define TSAN_MAYBE_INTERCEPT_EPOLL \
2020 TSAN_INTERCEPT(epoll_create); \
2021 TSAN_INTERCEPT(epoll_create1); \
2022 TSAN_INTERCEPT(epoll_ctl); \
2023 TSAN_INTERCEPT(epoll_wait); \
2024 TSAN_INTERCEPT(epoll_pwait); \
2025 TSAN_INTERCEPT(epoll_pwait2)
2026#else
2027#define TSAN_MAYBE_INTERCEPT_EPOLL
2028#endif
2029
2030// The following functions are intercepted merely to process pending signals.
2031// If program blocks signal X, we must deliver the signal before the function
2032// returns. Similarly, if program unblocks a signal (or returns from sigsuspend)
2033// it's better to deliver the signal straight away.
2034TSAN_INTERCEPTOR(int, sigsuspend, const __sanitizer_sigset_t *mask) {
2035 SCOPED_TSAN_INTERCEPTOR(sigsuspend, mask);
2036 return REAL(sigsuspend)(mask);
2037}
2038
2039TSAN_INTERCEPTOR(int, sigblock, int mask) {
2040 SCOPED_TSAN_INTERCEPTOR(sigblock, mask);
2041 return REAL(sigblock)(mask);
2042}
2043
2044TSAN_INTERCEPTOR(int, sigsetmask, int mask) {
2045 SCOPED_TSAN_INTERCEPTOR(sigsetmask, mask);
2046 return REAL(sigsetmask)(mask);
2047}
2048
2049TSAN_INTERCEPTOR(int, pthread_sigmask, int how, const __sanitizer_sigset_t *set,
2050 __sanitizer_sigset_t *oldset) {
2051 SCOPED_TSAN_INTERCEPTOR(pthread_sigmask, how, set, oldset);
2052 return REAL(pthread_sigmask)(how, set, oldset);
2053}
2054
2055namespace __tsan {
2056
2057static void ReportErrnoSpoiling(ThreadState *thr, uptr pc, int sig) {
2058 VarSizeStackTrace stack;
2059 // StackTrace::GetNestInstructionPc(pc) is used because return address is
2060 // expected, OutputReport() will undo this.
2061 ObtainCurrentStack(thr, toppc: StackTrace::GetNextInstructionPc(pc), stack: &stack);
2062 ThreadRegistryLock l(&ctx->thread_registry);
2063 ScopedReport rep(ReportTypeErrnoInSignal);
2064 rep.SetSigNum(sig);
2065 if (!IsFiredSuppression(ctx, type: ReportTypeErrnoInSignal, trace: stack)) {
2066 rep.AddStack(stack, suppressable: true);
2067 OutputReport(thr, srep: rep);
2068 }
2069}
2070
2071static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
2072 int sig, __sanitizer_siginfo *info,
2073 void *uctx) {
2074 CHECK(thr->slot);
2075 __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions;
2076 if (acquire)
2077 Acquire(thr, pc: 0, addr: (uptr)&sigactions[sig]);
2078 // Signals are generally asynchronous, so if we receive a signals when
2079 // ignores are enabled we should disable ignores. This is critical for sync
2080 // and interceptors, because otherwise we can miss synchronization and report
2081 // false races.
2082 int ignore_reads_and_writes = thr->ignore_reads_and_writes;
2083 int ignore_interceptors = thr->ignore_interceptors;
2084 int ignore_sync = thr->ignore_sync;
2085 // For symbolizer we only process SIGSEGVs synchronously
2086 // (bug in symbolizer or in tsan). But we want to reset
2087 // in_symbolizer to fail gracefully. Symbolizer and user code
2088 // use different memory allocators, so if we don't reset
2089 // in_symbolizer we can get memory allocated with one being
2090 // feed with another, which can cause more crashes.
2091 int in_symbolizer = thr->in_symbolizer;
2092 if (!ctx->after_multithreaded_fork) {
2093 thr->ignore_reads_and_writes = 0;
2094 thr->fast_state.ClearIgnoreBit();
2095 thr->ignore_interceptors = 0;
2096 thr->ignore_sync = 0;
2097 thr->in_symbolizer = 0;
2098 }
2099 // Ensure that the handler does not spoil errno.
2100 const int saved_errno = errno;
2101 errno = 99;
2102 // This code races with sigaction. Be careful to not read sa_sigaction twice.
2103 // Also need to remember pc for reporting before the call,
2104 // because the handler can reset it.
2105 volatile uptr pc = (sigactions[sig].sa_flags & SA_SIGINFO)
2106 ? (uptr)sigactions[sig].sigaction
2107 : (uptr)sigactions[sig].handler;
2108 if (pc != sig_dfl && pc != sig_ign) {
2109 // The callback can be either sa_handler or sa_sigaction.
2110 // They have different signatures, but we assume that passing
2111 // additional arguments to sa_handler works and is harmless.
2112 ((__sanitizer_sigactionhandler_ptr)pc)(sig, info, uctx);
2113 }
2114 if (!ctx->after_multithreaded_fork) {
2115 thr->ignore_reads_and_writes = ignore_reads_and_writes;
2116 if (ignore_reads_and_writes)
2117 thr->fast_state.SetIgnoreBit();
2118 thr->ignore_interceptors = ignore_interceptors;
2119 thr->ignore_sync = ignore_sync;
2120 thr->in_symbolizer = in_symbolizer;
2121 }
2122 // We do not detect errno spoiling for SIGTERM,
2123 // because some SIGTERM handlers do spoil errno but reraise SIGTERM,
2124 // tsan reports false positive in such case.
2125 // It's difficult to properly detect this situation (reraise),
2126 // because in async signal processing case (when handler is called directly
2127 // from rtl_generic_sighandler) we have not yet received the reraised
2128 // signal; and it looks too fragile to intercept all ways to reraise a signal.
2129 if (ShouldReport(thr, typ: ReportTypeErrnoInSignal) && !sync && sig != SIGTERM &&
2130 errno != 99)
2131 ReportErrnoSpoiling(thr, pc, sig);
2132 errno = saved_errno;
2133}
2134
2135void ProcessPendingSignalsImpl(ThreadState *thr) {
2136 atomic_store(a: &thr->pending_signals, v: 0, mo: memory_order_relaxed);
2137 ThreadSignalContext *sctx = SigCtx(thr);
2138 if (sctx == 0)
2139 return;
2140 atomic_fetch_add(a: &thr->in_signal_handler, v: 1, mo: memory_order_relaxed);
2141 internal_sigfillset(set: &sctx->emptyset);
2142 int res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->emptyset, &sctx->oldset);
2143 CHECK_EQ(res, 0);
2144 for (int sig = 0; sig < kSigCount; sig++) {
2145 SignalDesc *signal = &sctx->pending_signals[sig];
2146 if (signal->armed) {
2147 signal->armed = false;
2148 CallUserSignalHandler(thr, sync: false, acquire: true, sig, info: &signal->siginfo,
2149 uctx: &signal->ctx);
2150 }
2151 }
2152 res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->oldset, 0);
2153 CHECK_EQ(res, 0);
2154 atomic_fetch_add(a: &thr->in_signal_handler, v: -1, mo: memory_order_relaxed);
2155}
2156
2157} // namespace __tsan
2158
2159static bool is_sync_signal(ThreadSignalContext *sctx, int sig,
2160 __sanitizer_siginfo *info) {
2161 // If we are sending signal to ourselves, we must process it now.
2162 if (sctx && sig == sctx->int_signal_send)
2163 return true;
2164#if SANITIZER_HAS_SIGINFO
2165 // POSIX timers can be configured to send any kind of signal; however, it
2166 // doesn't make any sense to consider a timer signal as synchronous!
2167 if (info->si_code == SI_TIMER)
2168 return false;
2169#endif
2170 return sig == SIGSEGV || sig == SIGBUS || sig == SIGILL || sig == SIGTRAP ||
2171 sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || sig == SIGSYS;
2172}
2173
2174void sighandler(int sig, __sanitizer_siginfo *info, void *ctx) {
2175 ThreadState *thr = cur_thread_init();
2176 ThreadSignalContext *sctx = SigCtx(thr);
2177 if (sig < 0 || sig >= kSigCount) {
2178 VPrintf(1, "ThreadSanitizer: ignoring signal %d\n", sig);
2179 return;
2180 }
2181 // Don't mess with synchronous signals.
2182 const bool sync = is_sync_signal(sctx, sig, info);
2183 if (sync ||
2184 // If we are in blocking function, we can safely process it now
2185 // (but check if we are in a recursive interceptor,
2186 // i.e. pthread_join()->munmap()).
2187 atomic_load(a: &thr->in_blocking_func, mo: memory_order_relaxed)) {
2188 atomic_fetch_add(a: &thr->in_signal_handler, v: 1, mo: memory_order_relaxed);
2189 if (atomic_load(a: &thr->in_blocking_func, mo: memory_order_relaxed)) {
2190 atomic_store(a: &thr->in_blocking_func, v: 0, mo: memory_order_relaxed);
2191 CallUserSignalHandler(thr, sync, acquire: true, sig, info, uctx: ctx);
2192 atomic_store(a: &thr->in_blocking_func, v: 1, mo: memory_order_relaxed);
2193 } else {
2194 // Be very conservative with when we do acquire in this case.
2195 // It's unsafe to do acquire in async handlers, because ThreadState
2196 // can be in inconsistent state.
2197 // SIGSYS looks relatively safe -- it's synchronous and can actually
2198 // need some global state.
2199 bool acq = (sig == SIGSYS);
2200 CallUserSignalHandler(thr, sync, acquire: acq, sig, info, uctx: ctx);
2201 }
2202 atomic_fetch_add(a: &thr->in_signal_handler, v: -1, mo: memory_order_relaxed);
2203 return;
2204 }
2205
2206 if (sctx == 0)
2207 return;
2208 SignalDesc *signal = &sctx->pending_signals[sig];
2209 if (signal->armed == false) {
2210 signal->armed = true;
2211 internal_memcpy(dest: &signal->siginfo, src: info, n: sizeof(*info));
2212 internal_memcpy(dest: &signal->ctx, src: ctx, n: sizeof(signal->ctx));
2213 atomic_store(a: &thr->pending_signals, v: 1, mo: memory_order_relaxed);
2214 }
2215}
2216
2217TSAN_INTERCEPTOR(int, raise, int sig) {
2218 SCOPED_TSAN_INTERCEPTOR(raise, sig);
2219 ThreadSignalContext *sctx = SigCtx(thr);
2220 CHECK_NE(sctx, 0);
2221 int prev = sctx->int_signal_send;
2222 sctx->int_signal_send = sig;
2223 int res = REAL(raise)(sig);
2224 CHECK_EQ(sctx->int_signal_send, sig);
2225 sctx->int_signal_send = prev;
2226 return res;
2227}
2228
2229TSAN_INTERCEPTOR(int, kill, int pid, int sig) {
2230 SCOPED_TSAN_INTERCEPTOR(kill, pid, sig);
2231 ThreadSignalContext *sctx = SigCtx(thr);
2232 CHECK_NE(sctx, 0);
2233 int prev = sctx->int_signal_send;
2234 if (pid == (int)internal_getpid()) {
2235 sctx->int_signal_send = sig;
2236 }
2237 int res = REAL(kill)(pid, sig);
2238 if (pid == (int)internal_getpid()) {
2239 CHECK_EQ(sctx->int_signal_send, sig);
2240 sctx->int_signal_send = prev;
2241 }
2242 return res;
2243}
2244
2245TSAN_INTERCEPTOR(int, pthread_kill, void *tid, int sig) {
2246 SCOPED_TSAN_INTERCEPTOR(pthread_kill, tid, sig);
2247 ThreadSignalContext *sctx = SigCtx(thr);
2248 CHECK_NE(sctx, 0);
2249 int prev = sctx->int_signal_send;
2250 bool self = pthread_equal(t1: tid, t2: pthread_self());
2251 if (self)
2252 sctx->int_signal_send = sig;
2253 int res = REAL(pthread_kill)(tid, sig);
2254 if (self) {
2255 CHECK_EQ(sctx->int_signal_send, sig);
2256 sctx->int_signal_send = prev;
2257 }
2258 return res;
2259}
2260
2261TSAN_INTERCEPTOR(int, gettimeofday, void *tv, void *tz) {
2262 SCOPED_TSAN_INTERCEPTOR(gettimeofday, tv, tz);
2263 // It's intercepted merely to process pending signals.
2264 return REAL(gettimeofday)(tv, tz);
2265}
2266
2267TSAN_INTERCEPTOR(int, getaddrinfo, void *node, void *service,
2268 void *hints, void *rv) {
2269 SCOPED_TSAN_INTERCEPTOR(getaddrinfo, node, service, hints, rv);
2270 // We miss atomic synchronization in getaddrinfo,
2271 // and can report false race between malloc and free
2272 // inside of getaddrinfo. So ignore memory accesses.
2273 ThreadIgnoreBegin(thr, pc);
2274 int res = REAL(getaddrinfo)(node, service, hints, rv);
2275 ThreadIgnoreEnd(thr);
2276 return res;
2277}
2278
2279TSAN_INTERCEPTOR(int, fork, int fake) {
2280 if (in_symbolizer())
2281 return REAL(fork)(fake);
2282 SCOPED_INTERCEPTOR_RAW(fork, fake);
2283 return REAL(fork)(fake);
2284}
2285
2286void atfork_prepare() {
2287 if (in_symbolizer())
2288 return;
2289 ThreadState *thr = cur_thread();
2290 const uptr pc = StackTrace::GetCurrentPc();
2291 ForkBefore(thr, pc);
2292}
2293
2294void atfork_parent() {
2295 if (in_symbolizer())
2296 return;
2297 ThreadState *thr = cur_thread();
2298 const uptr pc = StackTrace::GetCurrentPc();
2299 ForkParentAfter(thr, pc);
2300}
2301
2302void atfork_child() {
2303 if (in_symbolizer())
2304 return;
2305 ThreadState *thr = cur_thread();
2306 const uptr pc = StackTrace::GetCurrentPc();
2307 ForkChildAfter(thr, pc, start_thread: true);
2308 FdOnFork(thr, pc);
2309}
2310
2311#if !SANITIZER_IOS
2312TSAN_INTERCEPTOR(int, vfork, int fake) {
2313 // Some programs (e.g. openjdk) call close for all file descriptors
2314 // in the child process. Under tsan it leads to false positives, because
2315 // address space is shared, so the parent process also thinks that
2316 // the descriptors are closed (while they are actually not).
2317 // This leads to false positives due to missed synchronization.
2318 // Strictly saying this is undefined behavior, because vfork child is not
2319 // allowed to call any functions other than exec/exit. But this is what
2320 // openjdk does, so we want to handle it.
2321 // We could disable interceptors in the child process. But it's not possible
2322 // to simply intercept and wrap vfork, because vfork child is not allowed
2323 // to return from the function that calls vfork, and that's exactly what
2324 // we would do. So this would require some assembly trickery as well.
2325 // Instead we simply turn vfork into fork.
2326 return WRAP(fork)(fake);
2327}
2328#endif
2329
2330#if SANITIZER_LINUX
2331TSAN_INTERCEPTOR(int, clone, int (*fn)(void *), void *stack, int flags,
2332 void *arg, int *parent_tid, void *tls, pid_t *child_tid) {
2333 SCOPED_INTERCEPTOR_RAW(clone, fn, stack, flags, arg, parent_tid, tls,
2334 child_tid);
2335 struct Arg {
2336 int (*fn)(void *);
2337 void *arg;
2338 };
2339 auto wrapper = +[](void *p) -> int {
2340 auto *thr = cur_thread();
2341 uptr pc = GET_CURRENT_PC();
2342 // Start the background thread for fork, but not for clone.
2343 // For fork we did this always and it's known to work (or user code has
2344 // adopted). But if we do this for the new clone interceptor some code
2345 // (sandbox2) fails. So model we used to do for years and don't start the
2346 // background thread after clone.
2347 ForkChildAfter(thr, pc, start_thread: false);
2348 FdOnFork(thr, pc);
2349 auto *arg = static_cast<Arg *>(p);
2350 return arg->fn(arg->arg);
2351 };
2352 ForkBefore(thr, pc);
2353 Arg arg_wrapper = {.fn: fn, .arg: arg};
2354 int pid = REAL(clone)(wrapper, stack, flags, &arg_wrapper, parent_tid, tls,
2355 child_tid);
2356 ForkParentAfter(thr, pc);
2357 return pid;
2358}
2359#endif
2360
2361#if !SANITIZER_APPLE && !SANITIZER_ANDROID
2362typedef int (*dl_iterate_phdr_cb_t)(__sanitizer_dl_phdr_info *info, SIZE_T size,
2363 void *data);
2364struct dl_iterate_phdr_data {
2365 ThreadState *thr;
2366 uptr pc;
2367 dl_iterate_phdr_cb_t cb;
2368 void *data;
2369};
2370
2371static bool IsAppNotRodata(uptr addr) {
2372 return IsAppMem(mem: addr) && *MemToShadow(x: addr) != Shadow::kRodata;
2373}
2374
2375static int dl_iterate_phdr_cb(__sanitizer_dl_phdr_info *info, SIZE_T size,
2376 void *data) {
2377 dl_iterate_phdr_data *cbdata = (dl_iterate_phdr_data *)data;
2378 // dlopen/dlclose allocate/free dynamic-linker-internal memory, which is later
2379 // accessible in dl_iterate_phdr callback. But we don't see synchronization
2380 // inside of dynamic linker, so we "unpoison" it here in order to not
2381 // produce false reports. Ignoring malloc/free in dlopen/dlclose is not enough
2382 // because some libc functions call __libc_dlopen.
2383 if (info && IsAppNotRodata(addr: (uptr)info->dlpi_name))
2384 MemoryResetRange(thr: cbdata->thr, pc: cbdata->pc, addr: (uptr)info->dlpi_name,
2385 size: internal_strlen(s: info->dlpi_name));
2386 int res = cbdata->cb(info, size, cbdata->data);
2387 // Perform the check one more time in case info->dlpi_name was overwritten
2388 // by user callback.
2389 if (info && IsAppNotRodata(addr: (uptr)info->dlpi_name))
2390 MemoryResetRange(thr: cbdata->thr, pc: cbdata->pc, addr: (uptr)info->dlpi_name,
2391 size: internal_strlen(s: info->dlpi_name));
2392 return res;
2393}
2394
2395TSAN_INTERCEPTOR(int, dl_iterate_phdr, dl_iterate_phdr_cb_t cb, void *data) {
2396 SCOPED_TSAN_INTERCEPTOR(dl_iterate_phdr, cb, data);
2397 dl_iterate_phdr_data cbdata;
2398 cbdata.thr = thr;
2399 cbdata.pc = pc;
2400 cbdata.cb = cb;
2401 cbdata.data = data;
2402 int res = REAL(dl_iterate_phdr)(dl_iterate_phdr_cb, &cbdata);
2403 return res;
2404}
2405#endif
2406
2407static int OnExit(ThreadState *thr) {
2408 int status = Finalize(thr);
2409 FlushStreams();
2410 return status;
2411}
2412
2413#if !SANITIZER_APPLE
2414static void HandleRecvmsg(ThreadState *thr, uptr pc,
2415 __sanitizer_msghdr *msg) {
2416 int fds[64];
2417 int cnt = ExtractRecvmsgFDs(msg, fds, ARRAY_SIZE(fds));
2418 for (int i = 0; i < cnt; i++)
2419 FdEventCreate(thr, pc, fd: fds[i]);
2420}
2421#endif
2422
2423#include "sanitizer_common/sanitizer_platform_interceptors.h"
2424// Causes interceptor recursion (getaddrinfo() and fopen())
2425#undef SANITIZER_INTERCEPT_GETADDRINFO
2426// We define our own.
2427#if SANITIZER_INTERCEPT_TLS_GET_ADDR
2428#define NEED_TLS_GET_ADDR
2429#endif
2430#undef SANITIZER_INTERCEPT_TLS_GET_ADDR
2431#define SANITIZER_INTERCEPT_TLS_GET_OFFSET 1
2432#undef SANITIZER_INTERCEPT_PTHREAD_SIGMASK
2433
2434#define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \
2435 INTERCEPT_FUNCTION_VER(name, ver)
2436#define COMMON_INTERCEPT_FUNCTION_VER_UNVERSIONED_FALLBACK(name, ver) \
2437 (INTERCEPT_FUNCTION_VER(name, ver) || INTERCEPT_FUNCTION(name))
2438
2439#define COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, func, ...) \
2440 SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
2441 TsanInterceptorContext _ctx = {thr, pc}; \
2442 ctx = (void *)&_ctx; \
2443 (void)ctx;
2444
2445#define COMMON_INTERCEPTOR_FILE_OPEN(ctx, file, path) \
2446 if (path) \
2447 Acquire(thr, pc, File2addr(path)); \
2448 if (file) { \
2449 int fd = fileno_unlocked(file); \
2450 if (fd >= 0) FdFileCreate(thr, pc, fd); \
2451 }
2452
2453#define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) \
2454 if (file) { \
2455 int fd = fileno_unlocked(file); \
2456 FdClose(thr, pc, fd); \
2457 }
2458
2459#define COMMON_INTERCEPTOR_DLOPEN(filename, flag) \
2460 ({ \
2461 CheckNoDeepBind(filename, flag); \
2462 ThreadIgnoreBegin(thr, 0); \
2463 void *res = REAL(dlopen)(filename, flag); \
2464 ThreadIgnoreEnd(thr); \
2465 res; \
2466 })
2467
2468// Ignore interceptors in OnLibraryLoaded()/Unloaded(). These hooks use code
2469// (ListOfModules::init, MemoryMappingLayout::DumpListOfModules) that make
2470// intercepted calls, which can cause deadlockes with ReportRace() which also
2471// uses this code.
2472#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \
2473 ({ \
2474 ScopedIgnoreInterceptors ignore_interceptors; \
2475 libignore()->OnLibraryLoaded(filename); \
2476 })
2477
2478#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() \
2479 ({ \
2480 ScopedIgnoreInterceptors ignore_interceptors; \
2481 libignore()->OnLibraryUnloaded(); \
2482 })
2483
2484#define COMMON_INTERCEPTOR_ACQUIRE(ctx, u) \
2485 Acquire(((TsanInterceptorContext *) ctx)->thr, pc, u)
2486
2487#define COMMON_INTERCEPTOR_RELEASE(ctx, u) \
2488 Release(((TsanInterceptorContext *) ctx)->thr, pc, u)
2489
2490#define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
2491 Acquire(((TsanInterceptorContext *) ctx)->thr, pc, Dir2addr(path))
2492
2493#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
2494 FdAcquire(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2495
2496#define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \
2497 FdRelease(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2498
2499#define COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd) \
2500 FdAccess(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2501
2502#define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \
2503 FdSocketAccept(((TsanInterceptorContext *) ctx)->thr, pc, fd, newfd)
2504
2505#define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \
2506 ThreadSetName(((TsanInterceptorContext *) ctx)->thr, name)
2507
2508#define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \
2509 if (pthread_equal(pthread_self(), reinterpret_cast<void *>(thread))) \
2510 COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name); \
2511 else \
2512 __tsan::ctx->thread_registry.SetThreadNameByUserId(thread, name)
2513
2514#define COMMON_INTERCEPTOR_BLOCK_REAL(name) BLOCK_REAL(name)
2515
2516#define COMMON_INTERCEPTOR_ON_EXIT(ctx) \
2517 OnExit(((TsanInterceptorContext *) ctx)->thr)
2518
2519#define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, sz, prot, flags, fd, \
2520 off) \
2521 do { \
2522 return mmap_interceptor(thr, pc, REAL(mmap), addr, sz, prot, flags, fd, \
2523 off); \
2524 } while (false)
2525
2526#define COMMON_INTERCEPTOR_MUNMAP_IMPL(ctx, addr, sz) \
2527 do { \
2528 return munmap_interceptor(thr, pc, REAL(munmap), addr, sz); \
2529 } while (false)
2530
2531#if !SANITIZER_APPLE
2532#define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) \
2533 HandleRecvmsg(((TsanInterceptorContext *)ctx)->thr, \
2534 ((TsanInterceptorContext *)ctx)->pc, msg)
2535#endif
2536
2537#define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \
2538 if (TsanThread *t = GetCurrentThread()) { \
2539 *begin = t->tls_begin(); \
2540 *end = t->tls_end(); \
2541 } else { \
2542 *begin = *end = 0; \
2543 }
2544
2545#define COMMON_INTERCEPTOR_USER_CALLBACK_START() \
2546 SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START()
2547
2548#define COMMON_INTERCEPTOR_USER_CALLBACK_END() \
2549 SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END()
2550
2551#include "sanitizer_common/sanitizer_common_interceptors.inc"
2552
2553static int sigaction_impl(int sig, const __sanitizer_sigaction *act,
2554 __sanitizer_sigaction *old);
2555static __sanitizer_sighandler_ptr signal_impl(int sig,
2556 __sanitizer_sighandler_ptr h);
2557
2558#define SIGNAL_INTERCEPTOR_SIGACTION_IMPL(signo, act, oldact) \
2559 { return sigaction_impl(signo, act, oldact); }
2560
2561#define SIGNAL_INTERCEPTOR_SIGNAL_IMPL(func, signo, handler) \
2562 { return (uptr)signal_impl(signo, (__sanitizer_sighandler_ptr)handler); }
2563
2564#define SIGNAL_INTERCEPTOR_ENTER() LazyInitialize(cur_thread_init())
2565
2566#include "sanitizer_common/sanitizer_signal_interceptors.inc"
2567
2568int sigaction_impl(int sig, const __sanitizer_sigaction *act,
2569 __sanitizer_sigaction *old) {
2570 // Note: if we call REAL(sigaction) directly for any reason without proxying
2571 // the signal handler through sighandler, very bad things will happen.
2572 // The handler will run synchronously and corrupt tsan per-thread state.
2573 SCOPED_INTERCEPTOR_RAW(sigaction, sig, act, old);
2574 if (sig <= 0 || sig >= kSigCount) {
2575 errno = errno_EINVAL;
2576 return -1;
2577 }
2578 __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions;
2579 __sanitizer_sigaction old_stored;
2580 if (old) internal_memcpy(dest: &old_stored, src: &sigactions[sig], n: sizeof(old_stored));
2581 __sanitizer_sigaction newact;
2582 if (act) {
2583 // Copy act into sigactions[sig].
2584 // Can't use struct copy, because compiler can emit call to memcpy.
2585 // Can't use internal_memcpy, because it copies byte-by-byte,
2586 // and signal handler reads the handler concurrently. It can read
2587 // some bytes from old value and some bytes from new value.
2588 // Use volatile to prevent insertion of memcpy.
2589 sigactions[sig].handler =
2590 *(volatile __sanitizer_sighandler_ptr const *)&act->handler;
2591 sigactions[sig].sa_flags = *(volatile int const *)&act->sa_flags;
2592 internal_memcpy(dest: &sigactions[sig].sa_mask, src: &act->sa_mask,
2593 n: sizeof(sigactions[sig].sa_mask));
2594#if !SANITIZER_FREEBSD && !SANITIZER_APPLE && !SANITIZER_NETBSD
2595 sigactions[sig].sa_restorer = act->sa_restorer;
2596#endif
2597 internal_memcpy(dest: &newact, src: act, n: sizeof(newact));
2598 internal_sigfillset(set: &newact.sa_mask);
2599 if ((act->sa_flags & SA_SIGINFO) ||
2600 ((uptr)act->handler != sig_ign && (uptr)act->handler != sig_dfl)) {
2601 newact.sa_flags |= SA_SIGINFO;
2602 newact.sigaction = sighandler;
2603 }
2604 ReleaseStore(thr, pc, addr: (uptr)&sigactions[sig]);
2605 act = &newact;
2606 }
2607 int res = REAL(sigaction)(sig, act, old);
2608 if (res == 0 && old && old->sigaction == sighandler)
2609 internal_memcpy(dest: old, src: &old_stored, n: sizeof(*old));
2610 return res;
2611}
2612
2613static __sanitizer_sighandler_ptr signal_impl(int sig,
2614 __sanitizer_sighandler_ptr h) {
2615 __sanitizer_sigaction act;
2616 act.handler = h;
2617 internal_memset(s: &act.sa_mask, c: -1, n: sizeof(act.sa_mask));
2618 act.sa_flags = 0;
2619 __sanitizer_sigaction old;
2620 int res = sigaction_symname(signum: sig, act: &act, oldact: &old);
2621 if (res) return (__sanitizer_sighandler_ptr)sig_err;
2622 return old.handler;
2623}
2624
2625#define TSAN_SYSCALL() \
2626 ThreadState *thr = cur_thread(); \
2627 if (thr->ignore_interceptors) \
2628 return; \
2629 ScopedSyscall scoped_syscall(thr)
2630
2631struct ScopedSyscall {
2632 ThreadState *thr;
2633
2634 explicit ScopedSyscall(ThreadState *thr) : thr(thr) { LazyInitialize(thr); }
2635
2636 ~ScopedSyscall() {
2637 ProcessPendingSignals(thr);
2638 }
2639};
2640
2641#if !SANITIZER_FREEBSD && !SANITIZER_APPLE
2642static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) {
2643 TSAN_SYSCALL();
2644 MemoryAccessRange(thr, pc, addr: p, size: s, is_write: write);
2645}
2646
2647static USED void syscall_acquire(uptr pc, uptr addr) {
2648 TSAN_SYSCALL();
2649 Acquire(thr, pc, addr);
2650 DPrintf("syscall_acquire(0x%zx))\n", addr);
2651}
2652
2653static USED void syscall_release(uptr pc, uptr addr) {
2654 TSAN_SYSCALL();
2655 DPrintf("syscall_release(0x%zx)\n", addr);
2656 Release(thr, pc, addr);
2657}
2658
2659static void syscall_fd_close(uptr pc, int fd) {
2660 auto *thr = cur_thread();
2661 FdClose(thr, pc, fd);
2662}
2663
2664static USED void syscall_fd_acquire(uptr pc, int fd) {
2665 TSAN_SYSCALL();
2666 FdAcquire(thr, pc, fd);
2667 DPrintf("syscall_fd_acquire(%d)\n", fd);
2668}
2669
2670static USED void syscall_fd_release(uptr pc, int fd) {
2671 TSAN_SYSCALL();
2672 DPrintf("syscall_fd_release(%d)\n", fd);
2673 FdRelease(thr, pc, fd);
2674}
2675
2676static void syscall_pre_fork(uptr pc) { ForkBefore(thr: cur_thread(), pc); }
2677
2678static void syscall_post_fork(uptr pc, int pid) {
2679 ThreadState *thr = cur_thread();
2680 if (pid == 0) {
2681 // child
2682 ForkChildAfter(thr, pc, start_thread: true);
2683 FdOnFork(thr, pc);
2684 } else if (pid > 0) {
2685 // parent
2686 ForkParentAfter(thr, pc);
2687 } else {
2688 // error
2689 ForkParentAfter(thr, pc);
2690 }
2691}
2692#endif
2693
2694#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) \
2695 syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), false)
2696
2697#define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) \
2698 syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), true)
2699
2700#define COMMON_SYSCALL_POST_READ_RANGE(p, s) \
2701 do { \
2702 (void)(p); \
2703 (void)(s); \
2704 } while (false)
2705
2706#define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \
2707 do { \
2708 (void)(p); \
2709 (void)(s); \
2710 } while (false)
2711
2712#define COMMON_SYSCALL_ACQUIRE(addr) \
2713 syscall_acquire(GET_CALLER_PC(), (uptr)(addr))
2714
2715#define COMMON_SYSCALL_RELEASE(addr) \
2716 syscall_release(GET_CALLER_PC(), (uptr)(addr))
2717
2718#define COMMON_SYSCALL_FD_CLOSE(fd) syscall_fd_close(GET_CALLER_PC(), fd)
2719
2720#define COMMON_SYSCALL_FD_ACQUIRE(fd) syscall_fd_acquire(GET_CALLER_PC(), fd)
2721
2722#define COMMON_SYSCALL_FD_RELEASE(fd) syscall_fd_release(GET_CALLER_PC(), fd)
2723
2724#define COMMON_SYSCALL_PRE_FORK() \
2725 syscall_pre_fork(GET_CALLER_PC())
2726
2727#define COMMON_SYSCALL_POST_FORK(res) \
2728 syscall_post_fork(GET_CALLER_PC(), res)
2729
2730#include "sanitizer_common/sanitizer_common_syscalls.inc"
2731#include "sanitizer_common/sanitizer_syscalls_netbsd.inc"
2732
2733#ifdef NEED_TLS_GET_ADDR
2734
2735static void handle_tls_addr(void *arg, void *res) {
2736 ThreadState *thr = cur_thread();
2737 if (!thr)
2738 return;
2739 DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res, static_tls_begin: thr->tls_addr,
2740 static_tls_end: thr->tls_addr + thr->tls_size);
2741 if (!dtv)
2742 return;
2743 // New DTLS block has been allocated.
2744 MemoryResetRange(thr, pc: 0, addr: dtv->beg, size: dtv->size);
2745}
2746
2747#if !SANITIZER_S390
2748// Define own interceptor instead of sanitizer_common's for three reasons:
2749// 1. It must not process pending signals.
2750// Signal handlers may contain MOVDQA instruction (see below).
2751// 2. It must be as simple as possible to not contain MOVDQA.
2752// 3. Sanitizer_common version uses COMMON_INTERCEPTOR_INITIALIZE_RANGE which
2753// is empty for tsan (meant only for msan).
2754// Note: __tls_get_addr can be called with mis-aligned stack due to:
2755// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066
2756// So the interceptor must work with mis-aligned stack, in particular, does not
2757// execute MOVDQA with stack addresses.
2758TSAN_INTERCEPTOR(void *, __tls_get_addr, void *arg) {
2759 void *res = REAL(__tls_get_addr)(arg);
2760 handle_tls_addr(arg, res);
2761 return res;
2762}
2763#else // SANITIZER_S390
2764TSAN_INTERCEPTOR(uptr, __tls_get_addr_internal, void *arg) {
2765 uptr res = __tls_get_offset_wrapper(arg, REAL(__tls_get_offset));
2766 char *tp = static_cast<char *>(__builtin_thread_pointer());
2767 handle_tls_addr(arg, res + tp);
2768 return res;
2769}
2770#endif
2771#endif
2772
2773#if SANITIZER_NETBSD
2774TSAN_INTERCEPTOR(void, _lwp_exit) {
2775 SCOPED_TSAN_INTERCEPTOR(_lwp_exit);
2776 DestroyThreadState();
2777 REAL(_lwp_exit)();
2778}
2779#define TSAN_MAYBE_INTERCEPT__LWP_EXIT TSAN_INTERCEPT(_lwp_exit)
2780#else
2781#define TSAN_MAYBE_INTERCEPT__LWP_EXIT
2782#endif
2783
2784#if SANITIZER_FREEBSD
2785TSAN_INTERCEPTOR(void, thr_exit, tid_t *state) {
2786 SCOPED_TSAN_INTERCEPTOR(thr_exit, state);
2787 DestroyThreadState();
2788 REAL(thr_exit(state));
2789}
2790#define TSAN_MAYBE_INTERCEPT_THR_EXIT TSAN_INTERCEPT(thr_exit)
2791#else
2792#define TSAN_MAYBE_INTERCEPT_THR_EXIT
2793#endif
2794
2795TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_init, void *c, void *a)
2796TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_destroy, void *c)
2797TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_signal, void *c)
2798TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_broadcast, void *c)
2799TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_wait, void *c, void *m)
2800TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_init, void *m, void *a)
2801TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_destroy, void *m)
2802TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_lock, void *m)
2803TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_trylock, void *m)
2804TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_unlock, void *m)
2805TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_init, void *l, void *a)
2806TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_destroy, void *l)
2807TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_rdlock, void *l)
2808TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_tryrdlock, void *l)
2809TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_wrlock, void *l)
2810TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_trywrlock, void *l)
2811TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_unlock, void *l)
2812TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, once, void *o, void (*i)())
2813TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, sigmask, int f, void *n, void *o)
2814
2815TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_init, void *c, void *a)
2816TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_signal, void *c)
2817TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_broadcast, void *c)
2818TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_wait, void *c, void *m)
2819TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_destroy, void *c)
2820TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_init, void *m, void *a)
2821TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_destroy, void *m)
2822TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_lock, void *m)
2823TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_trylock, void *m)
2824TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_unlock, void *m)
2825TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_init, void *m, void *a)
2826TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_destroy, void *m)
2827TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_rdlock, void *m)
2828TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_tryrdlock, void *m)
2829TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_wrlock, void *m)
2830TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_trywrlock, void *m)
2831TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_unlock, void *m)
2832TSAN_INTERCEPTOR_NETBSD_ALIAS_THR(int, once, void *o, void (*f)())
2833TSAN_INTERCEPTOR_NETBSD_ALIAS_THR2(int, sigsetmask, sigmask, int a, void *b,
2834 void *c)
2835
2836namespace __tsan {
2837
2838static void finalize(void *arg) {
2839 ThreadState *thr = cur_thread();
2840 int status = Finalize(thr);
2841 // Make sure the output is not lost.
2842 FlushStreams();
2843 if (status)
2844 Die();
2845}
2846
2847#if !SANITIZER_APPLE && !SANITIZER_ANDROID
2848static void unreachable() {
2849 Report(format: "FATAL: ThreadSanitizer: unreachable called\n");
2850 Die();
2851}
2852#endif
2853
2854// Define default implementation since interception of libdispatch is optional.
2855SANITIZER_WEAK_ATTRIBUTE void InitializeLibdispatchInterceptors() {}
2856
2857void InitializeInterceptors() {
2858#if !SANITIZER_APPLE
2859 // We need to setup it early, because functions like dlsym() can call it.
2860 REAL(memset) = internal_memset;
2861 REAL(memcpy) = internal_memcpy;
2862#endif
2863
2864 new(interceptor_ctx()) InterceptorContext();
2865
2866 InitializeCommonInterceptors();
2867 InitializeSignalInterceptors();
2868 InitializeLibdispatchInterceptors();
2869
2870#if !SANITIZER_APPLE
2871 InitializeSetjmpInterceptors();
2872#endif
2873
2874 TSAN_INTERCEPT(longjmp_symname);
2875 TSAN_INTERCEPT(siglongjmp_symname);
2876#if SANITIZER_NETBSD
2877 TSAN_INTERCEPT(_longjmp);
2878#endif
2879
2880 TSAN_INTERCEPT(malloc);
2881 TSAN_INTERCEPT(__libc_memalign);
2882 TSAN_INTERCEPT(calloc);
2883 TSAN_INTERCEPT(realloc);
2884 TSAN_INTERCEPT(reallocarray);
2885 TSAN_INTERCEPT(free);
2886 TSAN_INTERCEPT(cfree);
2887 TSAN_INTERCEPT(munmap);
2888 TSAN_MAYBE_INTERCEPT_MEMALIGN;
2889 TSAN_INTERCEPT(valloc);
2890 TSAN_MAYBE_INTERCEPT_PVALLOC;
2891 TSAN_INTERCEPT(posix_memalign);
2892
2893 TSAN_INTERCEPT(strcpy);
2894 TSAN_INTERCEPT(strncpy);
2895 TSAN_INTERCEPT(strdup);
2896
2897 TSAN_INTERCEPT(pthread_create);
2898 TSAN_INTERCEPT(pthread_join);
2899 TSAN_INTERCEPT(pthread_detach);
2900 TSAN_INTERCEPT(pthread_exit);
2901 #if SANITIZER_LINUX
2902 TSAN_INTERCEPT(pthread_tryjoin_np);
2903 TSAN_INTERCEPT(pthread_timedjoin_np);
2904 #endif
2905
2906 TSAN_INTERCEPT_VER(pthread_cond_init, PTHREAD_ABI_BASE);
2907 TSAN_INTERCEPT_VER(pthread_cond_signal, PTHREAD_ABI_BASE);
2908 TSAN_INTERCEPT_VER(pthread_cond_broadcast, PTHREAD_ABI_BASE);
2909 TSAN_INTERCEPT_VER(pthread_cond_wait, PTHREAD_ABI_BASE);
2910 TSAN_INTERCEPT_VER(pthread_cond_timedwait, PTHREAD_ABI_BASE);
2911 TSAN_INTERCEPT_VER(pthread_cond_destroy, PTHREAD_ABI_BASE);
2912
2913 TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT;
2914
2915 TSAN_INTERCEPT(pthread_mutex_init);
2916 TSAN_INTERCEPT(pthread_mutex_destroy);
2917 TSAN_INTERCEPT(pthread_mutex_lock);
2918 TSAN_INTERCEPT(pthread_mutex_trylock);
2919 TSAN_INTERCEPT(pthread_mutex_timedlock);
2920 TSAN_INTERCEPT(pthread_mutex_unlock);
2921#if SANITIZER_LINUX
2922 TSAN_INTERCEPT(pthread_mutex_clocklock);
2923#endif
2924#if SANITIZER_GLIBC
2925# if !__GLIBC_PREREQ(2, 34)
2926 TSAN_INTERCEPT(__pthread_mutex_lock);
2927 TSAN_INTERCEPT(__pthread_mutex_unlock);
2928# endif
2929#endif
2930
2931 TSAN_INTERCEPT(pthread_spin_init);
2932 TSAN_INTERCEPT(pthread_spin_destroy);
2933 TSAN_INTERCEPT(pthread_spin_lock);
2934 TSAN_INTERCEPT(pthread_spin_trylock);
2935 TSAN_INTERCEPT(pthread_spin_unlock);
2936
2937 TSAN_INTERCEPT(pthread_rwlock_init);
2938 TSAN_INTERCEPT(pthread_rwlock_destroy);
2939 TSAN_INTERCEPT(pthread_rwlock_rdlock);
2940 TSAN_INTERCEPT(pthread_rwlock_tryrdlock);
2941 TSAN_INTERCEPT(pthread_rwlock_timedrdlock);
2942 TSAN_INTERCEPT(pthread_rwlock_wrlock);
2943 TSAN_INTERCEPT(pthread_rwlock_trywrlock);
2944 TSAN_INTERCEPT(pthread_rwlock_timedwrlock);
2945 TSAN_INTERCEPT(pthread_rwlock_unlock);
2946
2947 TSAN_INTERCEPT(pthread_barrier_init);
2948 TSAN_INTERCEPT(pthread_barrier_destroy);
2949 TSAN_INTERCEPT(pthread_barrier_wait);
2950
2951 TSAN_INTERCEPT(pthread_once);
2952
2953 TSAN_INTERCEPT(fstat);
2954 TSAN_MAYBE_INTERCEPT___FXSTAT;
2955 TSAN_MAYBE_INTERCEPT_FSTAT64;
2956 TSAN_MAYBE_INTERCEPT___FXSTAT64;
2957 TSAN_INTERCEPT(open);
2958 TSAN_MAYBE_INTERCEPT_OPEN64;
2959 TSAN_INTERCEPT(creat);
2960 TSAN_MAYBE_INTERCEPT_CREAT64;
2961 TSAN_INTERCEPT(dup);
2962 TSAN_INTERCEPT(dup2);
2963 TSAN_INTERCEPT(dup3);
2964 TSAN_MAYBE_INTERCEPT_EVENTFD;
2965 TSAN_MAYBE_INTERCEPT_SIGNALFD;
2966 TSAN_MAYBE_INTERCEPT_INOTIFY_INIT;
2967 TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1;
2968 TSAN_INTERCEPT(socket);
2969 TSAN_INTERCEPT(socketpair);
2970 TSAN_INTERCEPT(connect);
2971 TSAN_INTERCEPT(bind);
2972 TSAN_INTERCEPT(listen);
2973 TSAN_MAYBE_INTERCEPT_EPOLL;
2974 TSAN_INTERCEPT(close);
2975 TSAN_MAYBE_INTERCEPT___CLOSE;
2976 TSAN_MAYBE_INTERCEPT___RES_ICLOSE;
2977 TSAN_INTERCEPT(pipe);
2978 TSAN_INTERCEPT(pipe2);
2979
2980 TSAN_INTERCEPT(unlink);
2981 TSAN_INTERCEPT(tmpfile);
2982 TSAN_MAYBE_INTERCEPT_TMPFILE64;
2983 TSAN_INTERCEPT(abort);
2984 TSAN_INTERCEPT(rmdir);
2985 TSAN_INTERCEPT(closedir);
2986
2987 TSAN_INTERCEPT(sigsuspend);
2988 TSAN_INTERCEPT(sigblock);
2989 TSAN_INTERCEPT(sigsetmask);
2990 TSAN_INTERCEPT(pthread_sigmask);
2991 TSAN_INTERCEPT(raise);
2992 TSAN_INTERCEPT(kill);
2993 TSAN_INTERCEPT(pthread_kill);
2994 TSAN_INTERCEPT(sleep);
2995 TSAN_INTERCEPT(usleep);
2996 TSAN_INTERCEPT(nanosleep);
2997 TSAN_INTERCEPT(pause);
2998 TSAN_INTERCEPT(gettimeofday);
2999 TSAN_INTERCEPT(getaddrinfo);
3000
3001 TSAN_INTERCEPT(fork);
3002 TSAN_INTERCEPT(vfork);
3003#if SANITIZER_LINUX
3004 TSAN_INTERCEPT(clone);
3005#endif
3006#if !SANITIZER_ANDROID
3007 TSAN_INTERCEPT(dl_iterate_phdr);
3008#endif
3009 TSAN_MAYBE_INTERCEPT_ON_EXIT;
3010 TSAN_INTERCEPT(__cxa_atexit);
3011 TSAN_INTERCEPT(_exit);
3012
3013#ifdef NEED_TLS_GET_ADDR
3014#if !SANITIZER_S390
3015 TSAN_INTERCEPT(__tls_get_addr);
3016#else
3017 TSAN_INTERCEPT(__tls_get_addr_internal);
3018 TSAN_INTERCEPT(__tls_get_offset);
3019#endif
3020#endif
3021
3022 TSAN_MAYBE_INTERCEPT__LWP_EXIT;
3023 TSAN_MAYBE_INTERCEPT_THR_EXIT;
3024
3025#if !SANITIZER_APPLE && !SANITIZER_ANDROID
3026 // Need to setup it, because interceptors check that the function is resolved.
3027 // But atexit is emitted directly into the module, so can't be resolved.
3028 REAL(atexit) = (int(*)(void(*)()))unreachable;
3029#endif
3030
3031 if (REAL(__cxa_atexit)(&finalize, 0, 0)) {
3032 Printf(format: "ThreadSanitizer: failed to setup atexit callback\n");
3033 Die();
3034 }
3035 if (pthread_atfork(prepare: atfork_prepare, parent: atfork_parent, child: atfork_child)) {
3036 Printf(format: "ThreadSanitizer: failed to setup atfork callbacks\n");
3037 Die();
3038 }
3039
3040#if !SANITIZER_APPLE && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
3041 if (pthread_key_create(key: &interceptor_ctx()->finalize_key, destructor: &thread_finalize)) {
3042 Printf(format: "ThreadSanitizer: failed to create thread key\n");
3043 Die();
3044 }
3045#endif
3046
3047 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_init);
3048 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_destroy);
3049 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_signal);
3050 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_broadcast);
3051 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_wait);
3052 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_init);
3053 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_destroy);
3054 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_lock);
3055 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_trylock);
3056 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_unlock);
3057 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_init);
3058 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_destroy);
3059 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_rdlock);
3060 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_tryrdlock);
3061 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_wrlock);
3062 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_trywrlock);
3063 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_unlock);
3064 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(once);
3065 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(sigmask);
3066
3067 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_init);
3068 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_signal);
3069 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_broadcast);
3070 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_wait);
3071 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_destroy);
3072 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_init);
3073 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_destroy);
3074 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_lock);
3075 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_trylock);
3076 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_unlock);
3077 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_init);
3078 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_destroy);
3079 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_rdlock);
3080 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_tryrdlock);
3081 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_wrlock);
3082 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_trywrlock);
3083 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_unlock);
3084 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(once);
3085 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(sigsetmask);
3086
3087 FdInit();
3088}
3089
3090} // namespace __tsan
3091
3092// Invisible barrier for tests.
3093// There were several unsuccessful iterations for this functionality:
3094// 1. Initially it was implemented in user code using
3095// REAL(pthread_barrier_wait). But pthread_barrier_wait is not supported on
3096// MacOS. Futexes are linux-specific for this matter.
3097// 2. Then we switched to atomics+usleep(10). But usleep produced parasitic
3098// "as-if synchronized via sleep" messages in reports which failed some
3099// output tests.
3100// 3. Then we switched to atomics+sched_yield. But this produced tons of tsan-
3101// visible events, which lead to "failed to restore stack trace" failures.
3102// Note that no_sanitize_thread attribute does not turn off atomic interception
3103// so attaching it to the function defined in user code does not help.
3104// That's why we now have what we have.
3105constexpr u32 kBarrierThreadBits = 10;
3106constexpr u32 kBarrierThreads = 1 << kBarrierThreadBits;
3107
3108extern "C" {
3109
3110SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_init(
3111 atomic_uint32_t *barrier, u32 num_threads) {
3112 if (num_threads >= kBarrierThreads) {
3113 Printf(format: "barrier_init: count is too large (%d)\n", num_threads);
3114 Die();
3115 }
3116 // kBarrierThreadBits lsb is thread count,
3117 // the remaining are count of entered threads.
3118 atomic_store(a: barrier, v: num_threads, mo: memory_order_relaxed);
3119}
3120
3121static u32 barrier_epoch(u32 value) {
3122 return (value >> kBarrierThreadBits) / (value & (kBarrierThreads - 1));
3123}
3124
3125SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_wait(
3126 atomic_uint32_t *barrier) {
3127 u32 old = atomic_fetch_add(a: barrier, v: kBarrierThreads, mo: memory_order_relaxed);
3128 u32 old_epoch = barrier_epoch(value: old);
3129 if (barrier_epoch(value: old + kBarrierThreads) != old_epoch) {
3130 FutexWake(p: barrier, count: (1 << 30));
3131 return;
3132 }
3133 for (;;) {
3134 u32 cur = atomic_load(a: barrier, mo: memory_order_relaxed);
3135 if (barrier_epoch(value: cur) != old_epoch)
3136 return;
3137 FutexWait(p: barrier, cmp: cur);
3138 }
3139}
3140
3141} // extern "C"
3142

source code of compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp