| 1 | //===-- tsan_interceptors_mac.cpp -----------------------------------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file is a part of ThreadSanitizer (TSan), a race detector. |
| 10 | // |
| 11 | // Mac-specific interceptors. |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
| 14 | #include "sanitizer_common/sanitizer_platform.h" |
| 15 | #if SANITIZER_APPLE |
| 16 | |
| 17 | # include <errno.h> |
| 18 | # include <libkern/OSAtomic.h> |
| 19 | # include <objc/objc-sync.h> |
| 20 | # include <os/lock.h> |
| 21 | # include <sys/ucontext.h> |
| 22 | |
| 23 | # include "interception/interception.h" |
| 24 | # include "sanitizer_common/sanitizer_addrhashmap.h" |
| 25 | # include "tsan_interceptors.h" |
| 26 | # include "tsan_interface.h" |
| 27 | # include "tsan_interface_ann.h" |
| 28 | |
| 29 | # if defined(__has_include) && __has_include(<xpc/xpc.h>) |
| 30 | # include <xpc/xpc.h> |
| 31 | # endif // #if defined(__has_include) && __has_include(<xpc/xpc.h>) |
| 32 | |
| 33 | typedef long long_t; |
| 34 | |
| 35 | extern "C" { |
| 36 | int getcontext(ucontext_t *ucp) __attribute__((returns_twice)); |
| 37 | int setcontext(const ucontext_t *ucp); |
| 38 | } |
| 39 | |
| 40 | namespace __tsan { |
| 41 | |
| 42 | // The non-barrier versions of OSAtomic* functions are semantically mo_relaxed, |
| 43 | // but the two variants (e.g. OSAtomicAdd32 and OSAtomicAdd32Barrier) are |
| 44 | // actually aliases of each other, and we cannot have different interceptors for |
| 45 | // them, because they're actually the same function. Thus, we have to stay |
| 46 | // conservative and treat the non-barrier versions as mo_acq_rel. |
| 47 | static constexpr morder kMacOrderBarrier = mo_acq_rel; |
| 48 | static constexpr morder kMacOrderNonBarrier = mo_acq_rel; |
| 49 | static constexpr morder kMacFailureOrder = mo_relaxed; |
| 50 | |
| 51 | # define OSATOMIC_INTERCEPTOR(return_t, t, tsan_t, f, tsan_atomic_f, mo) \ |
| 52 | TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \ |
| 53 | SCOPED_TSAN_INTERCEPTOR(f, x, ptr); \ |
| 54 | return tsan_atomic_f((volatile tsan_t *)ptr, x, mo); \ |
| 55 | } |
| 56 | |
| 57 | # define OSATOMIC_INTERCEPTOR_PLUS_X(return_t, t, tsan_t, f, tsan_atomic_f, \ |
| 58 | mo) \ |
| 59 | TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \ |
| 60 | SCOPED_TSAN_INTERCEPTOR(f, x, ptr); \ |
| 61 | return tsan_atomic_f((volatile tsan_t *)ptr, x, mo) + x; \ |
| 62 | } |
| 63 | |
| 64 | # define OSATOMIC_INTERCEPTOR_PLUS_1(return_t, t, tsan_t, f, tsan_atomic_f, \ |
| 65 | mo) \ |
| 66 | TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) { \ |
| 67 | SCOPED_TSAN_INTERCEPTOR(f, ptr); \ |
| 68 | return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) + 1; \ |
| 69 | } |
| 70 | |
| 71 | # define OSATOMIC_INTERCEPTOR_MINUS_1(return_t, t, tsan_t, f, tsan_atomic_f, \ |
| 72 | mo) \ |
| 73 | TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) { \ |
| 74 | SCOPED_TSAN_INTERCEPTOR(f, ptr); \ |
| 75 | return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) - 1; \ |
| 76 | } |
| 77 | |
| 78 | # define OSATOMIC_INTERCEPTORS_ARITHMETIC(f, tsan_atomic_f, m) \ |
| 79 | m(int32_t, int32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f, \ |
| 80 | kMacOrderNonBarrier) \ |
| 81 | m(int32_t, int32_t, a32, f##32##Barrier, \ |
| 82 | __tsan_atomic32_##tsan_atomic_f, kMacOrderBarrier) \ |
| 83 | m(int64_t, int64_t, a64, f##64, __tsan_atomic64_##tsan_atomic_f, \ |
| 84 | kMacOrderNonBarrier) \ |
| 85 | m(int64_t, int64_t, a64, f##64##Barrier, \ |
| 86 | __tsan_atomic64_##tsan_atomic_f, kMacOrderBarrier) |
| 87 | |
| 88 | # define OSATOMIC_INTERCEPTORS_BITWISE(f, tsan_atomic_f, m, m_orig) \ |
| 89 | m(int32_t, uint32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f, \ |
| 90 | kMacOrderNonBarrier) \ |
| 91 | m(int32_t, uint32_t, a32, f##32##Barrier, \ |
| 92 | __tsan_atomic32_##tsan_atomic_f, kMacOrderBarrier) \ |
| 93 | m_orig(int32_t, uint32_t, a32, f##32##Orig, \ |
| 94 | __tsan_atomic32_##tsan_atomic_f, kMacOrderNonBarrier) \ |
| 95 | m_orig(int32_t, uint32_t, a32, f##32##OrigBarrier, \ |
| 96 | __tsan_atomic32_##tsan_atomic_f, kMacOrderBarrier) |
| 97 | |
| 98 | # pragma clang diagnostic push // OSAtomic* deprecation |
| 99 | # pragma clang diagnostic ignored "-Wdeprecated-declarations" |
| 100 | OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicAdd, fetch_add, |
| 101 | OSATOMIC_INTERCEPTOR_PLUS_X) |
| 102 | OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicIncrement, fetch_add, |
| 103 | OSATOMIC_INTERCEPTOR_PLUS_1) |
| 104 | OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicDecrement, fetch_sub, |
| 105 | OSATOMIC_INTERCEPTOR_MINUS_1) |
| 106 | OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicOr, fetch_or, OSATOMIC_INTERCEPTOR_PLUS_X, |
| 107 | OSATOMIC_INTERCEPTOR) |
| 108 | OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicAnd, fetch_and, |
| 109 | OSATOMIC_INTERCEPTOR_PLUS_X, OSATOMIC_INTERCEPTOR) |
| 110 | OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicXor, fetch_xor, |
| 111 | OSATOMIC_INTERCEPTOR_PLUS_X, OSATOMIC_INTERCEPTOR) |
| 112 | # pragma clang diagnostic pop // OSAtomic* deprecation |
| 113 | |
| 114 | # define OSATOMIC_INTERCEPTORS_CAS(f, tsan_atomic_f, tsan_t, t) \ |
| 115 | TSAN_INTERCEPTOR(bool, f, t old_value, t new_value, t volatile *ptr) { \ |
| 116 | SCOPED_TSAN_INTERCEPTOR(f, old_value, new_value, ptr); \ |
| 117 | return tsan_atomic_f##_compare_exchange_strong( \ |
| 118 | (volatile tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value, \ |
| 119 | kMacOrderNonBarrier, kMacFailureOrder); \ |
| 120 | } \ |
| 121 | \ |
| 122 | TSAN_INTERCEPTOR(bool, f##Barrier, t old_value, t new_value, \ |
| 123 | t volatile *ptr) { \ |
| 124 | SCOPED_TSAN_INTERCEPTOR(f##Barrier, old_value, new_value, ptr); \ |
| 125 | return tsan_atomic_f##_compare_exchange_strong( \ |
| 126 | (volatile tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value, \ |
| 127 | kMacOrderBarrier, kMacFailureOrder); \ |
| 128 | } |
| 129 | |
| 130 | # pragma clang diagnostic push // OSAtomicCompareAndSwap* deprecation |
| 131 | # pragma clang diagnostic ignored "-Wdeprecated-declarations" |
| 132 | OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapInt, __tsan_atomic32, a32, int) |
| 133 | OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapLong, __tsan_atomic64, a64, |
| 134 | long_t) |
| 135 | OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapPtr, __tsan_atomic64, a64, |
| 136 | void *) |
| 137 | OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwap32, __tsan_atomic32, a32, |
| 138 | int32_t) |
| 139 | OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwap64, __tsan_atomic64, a64, |
| 140 | int64_t) |
| 141 | # pragma clang diagnostic pop // OSAtomicCompareAndSwap* deprecation |
| 142 | |
| 143 | # define OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, mo) \ |
| 144 | TSAN_INTERCEPTOR(bool, f, uint32_t n, volatile void *ptr) { \ |
| 145 | SCOPED_TSAN_INTERCEPTOR(f, n, ptr); \ |
| 146 | volatile char *byte_ptr = ((volatile char *)ptr) + (n >> 3); \ |
| 147 | char bit = 0x80u >> (n & 7); \ |
| 148 | char mask = clear ? ~bit : bit; \ |
| 149 | char orig_byte = op((volatile a8 *)byte_ptr, mask, mo); \ |
| 150 | return orig_byte & bit; \ |
| 151 | } |
| 152 | |
| 153 | # define OSATOMIC_INTERCEPTORS_BITOP(f, op, clear) \ |
| 154 | OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, kMacOrderNonBarrier) \ |
| 155 | OSATOMIC_INTERCEPTOR_BITOP(f##Barrier, op, clear, kMacOrderBarrier) |
| 156 | |
| 157 | # pragma clang diagnostic push // OSAtomicTestAnd* deprecation |
| 158 | # pragma clang diagnostic ignored "-Wdeprecated-declarations" |
| 159 | OSATOMIC_INTERCEPTORS_BITOP(OSAtomicTestAndSet, __tsan_atomic8_fetch_or, false) |
| 160 | OSATOMIC_INTERCEPTORS_BITOP(OSAtomicTestAndClear, __tsan_atomic8_fetch_and, |
| 161 | true) |
| 162 | # pragma clang diagnostic pop // OSAtomicTestAnd* deprecation |
| 163 | |
| 164 | TSAN_INTERCEPTOR(void, OSAtomicEnqueue, OSQueueHead *list, void *item, |
| 165 | size_t offset) { |
| 166 | SCOPED_TSAN_INTERCEPTOR(OSAtomicEnqueue, list, item, offset); |
| 167 | __tsan_release(item); |
| 168 | REAL(OSAtomicEnqueue)(list, item, offset); |
| 169 | } |
| 170 | |
| 171 | TSAN_INTERCEPTOR(void *, OSAtomicDequeue, OSQueueHead *list, size_t offset) { |
| 172 | SCOPED_TSAN_INTERCEPTOR(OSAtomicDequeue, list, offset); |
| 173 | void *item = REAL(OSAtomicDequeue)(list, offset); |
| 174 | if (item) |
| 175 | __tsan_acquire(item); |
| 176 | return item; |
| 177 | } |
| 178 | |
| 179 | // OSAtomicFifoEnqueue and OSAtomicFifoDequeue are only on OS X. |
| 180 | # if !SANITIZER_IOS |
| 181 | |
| 182 | TSAN_INTERCEPTOR(void, OSAtomicFifoEnqueue, OSFifoQueueHead *list, void *item, |
| 183 | size_t offset) { |
| 184 | SCOPED_TSAN_INTERCEPTOR(OSAtomicFifoEnqueue, list, item, offset); |
| 185 | __tsan_release(item); |
| 186 | REAL(OSAtomicFifoEnqueue)(list, item, offset); |
| 187 | } |
| 188 | |
| 189 | TSAN_INTERCEPTOR(void *, OSAtomicFifoDequeue, OSFifoQueueHead *list, |
| 190 | size_t offset) { |
| 191 | SCOPED_TSAN_INTERCEPTOR(OSAtomicFifoDequeue, list, offset); |
| 192 | void *item = REAL(OSAtomicFifoDequeue)(list, offset); |
| 193 | if (item) |
| 194 | __tsan_acquire(item); |
| 195 | return item; |
| 196 | } |
| 197 | |
| 198 | # endif |
| 199 | |
| 200 | // If `OSSPINLOCK_USE_INLINED=1` is set, then SDK headers don't declare these |
| 201 | // as functions, but macros that call non-deprecated APIs. Undefine these |
| 202 | // macros so they don't interfere with the interceptor machinery. |
| 203 | # undef OSSpinLockLock |
| 204 | # undef OSSpinLockTry |
| 205 | # undef OSSpinLockUnlock |
| 206 | |
| 207 | # pragma clang diagnostic push // OSSpinLock* deprecation |
| 208 | # pragma clang diagnostic ignored "-Wdeprecated-declarations" |
| 209 | |
| 210 | TSAN_INTERCEPTOR(void, OSSpinLockLock, volatile OSSpinLock *lock) { |
| 211 | CHECK(!cur_thread()->is_dead); |
| 212 | if (!cur_thread()->is_inited) { |
| 213 | return REAL(OSSpinLockLock)(lock); |
| 214 | } |
| 215 | SCOPED_TSAN_INTERCEPTOR(OSSpinLockLock, lock); |
| 216 | REAL(OSSpinLockLock)(lock); |
| 217 | Acquire(thr, pc, (uptr)lock); |
| 218 | } |
| 219 | |
| 220 | TSAN_INTERCEPTOR(bool, OSSpinLockTry, volatile OSSpinLock *lock) { |
| 221 | CHECK(!cur_thread()->is_dead); |
| 222 | if (!cur_thread()->is_inited) { |
| 223 | return REAL(OSSpinLockTry)(lock); |
| 224 | } |
| 225 | SCOPED_TSAN_INTERCEPTOR(OSSpinLockTry, lock); |
| 226 | bool result = REAL(OSSpinLockTry)(lock); |
| 227 | if (result) |
| 228 | Acquire(thr, pc, (uptr)lock); |
| 229 | return result; |
| 230 | } |
| 231 | |
| 232 | TSAN_INTERCEPTOR(void, OSSpinLockUnlock, volatile OSSpinLock *lock) { |
| 233 | CHECK(!cur_thread()->is_dead); |
| 234 | if (!cur_thread()->is_inited) { |
| 235 | return REAL(OSSpinLockUnlock)(lock); |
| 236 | } |
| 237 | SCOPED_TSAN_INTERCEPTOR(OSSpinLockUnlock, lock); |
| 238 | Release(thr, pc, (uptr)lock); |
| 239 | REAL(OSSpinLockUnlock)(lock); |
| 240 | } |
| 241 | # pragma clang diagnostic pop // OSSpinLock* deprecation |
| 242 | |
| 243 | TSAN_INTERCEPTOR(void, os_lock_lock, void *lock) { |
| 244 | CHECK(!cur_thread()->is_dead); |
| 245 | if (!cur_thread()->is_inited) { |
| 246 | return REAL(os_lock_lock)(lock); |
| 247 | } |
| 248 | SCOPED_TSAN_INTERCEPTOR(os_lock_lock, lock); |
| 249 | REAL(os_lock_lock)(lock); |
| 250 | Acquire(thr, pc, (uptr)lock); |
| 251 | } |
| 252 | |
| 253 | TSAN_INTERCEPTOR(bool, os_lock_trylock, void *lock) { |
| 254 | CHECK(!cur_thread()->is_dead); |
| 255 | if (!cur_thread()->is_inited) { |
| 256 | return REAL(os_lock_trylock)(lock); |
| 257 | } |
| 258 | SCOPED_TSAN_INTERCEPTOR(os_lock_trylock, lock); |
| 259 | bool result = REAL(os_lock_trylock)(lock); |
| 260 | if (result) |
| 261 | Acquire(thr, pc, (uptr)lock); |
| 262 | return result; |
| 263 | } |
| 264 | |
| 265 | TSAN_INTERCEPTOR(void, os_lock_unlock, void *lock) { |
| 266 | CHECK(!cur_thread()->is_dead); |
| 267 | if (!cur_thread()->is_inited) { |
| 268 | return REAL(os_lock_unlock)(lock); |
| 269 | } |
| 270 | SCOPED_TSAN_INTERCEPTOR(os_lock_unlock, lock); |
| 271 | Release(thr, pc, (uptr)lock); |
| 272 | REAL(os_lock_unlock)(lock); |
| 273 | } |
| 274 | |
| 275 | TSAN_INTERCEPTOR(void, os_unfair_lock_lock, os_unfair_lock_t lock) { |
| 276 | if (!cur_thread()->is_inited || cur_thread()->is_dead) { |
| 277 | return REAL(os_unfair_lock_lock)(lock); |
| 278 | } |
| 279 | SCOPED_TSAN_INTERCEPTOR(os_unfair_lock_lock, lock); |
| 280 | REAL(os_unfair_lock_lock)(lock); |
| 281 | Acquire(thr, pc, (uptr)lock); |
| 282 | } |
| 283 | |
| 284 | TSAN_INTERCEPTOR(void, os_unfair_lock_lock_with_options, os_unfair_lock_t lock, |
| 285 | u32 options) { |
| 286 | if (!cur_thread()->is_inited || cur_thread()->is_dead) { |
| 287 | return REAL(os_unfair_lock_lock_with_options)(lock, options); |
| 288 | } |
| 289 | SCOPED_TSAN_INTERCEPTOR(os_unfair_lock_lock_with_options, lock, options); |
| 290 | REAL(os_unfair_lock_lock_with_options)(lock, options); |
| 291 | Acquire(thr, pc, (uptr)lock); |
| 292 | } |
| 293 | |
| 294 | TSAN_INTERCEPTOR(bool, os_unfair_lock_trylock, os_unfair_lock_t lock) { |
| 295 | if (!cur_thread()->is_inited || cur_thread()->is_dead) { |
| 296 | return REAL(os_unfair_lock_trylock)(lock); |
| 297 | } |
| 298 | SCOPED_TSAN_INTERCEPTOR(os_unfair_lock_trylock, lock); |
| 299 | bool result = REAL(os_unfair_lock_trylock)(lock); |
| 300 | if (result) |
| 301 | Acquire(thr, pc, (uptr)lock); |
| 302 | return result; |
| 303 | } |
| 304 | |
| 305 | TSAN_INTERCEPTOR(void, os_unfair_lock_unlock, os_unfair_lock_t lock) { |
| 306 | if (!cur_thread()->is_inited || cur_thread()->is_dead) { |
| 307 | return REAL(os_unfair_lock_unlock)(lock); |
| 308 | } |
| 309 | SCOPED_TSAN_INTERCEPTOR(os_unfair_lock_unlock, lock); |
| 310 | Release(thr, pc, (uptr)lock); |
| 311 | REAL(os_unfair_lock_unlock)(lock); |
| 312 | } |
| 313 | |
| 314 | # if defined(__has_include) && __has_include(<xpc/xpc.h>) |
| 315 | |
| 316 | TSAN_INTERCEPTOR(void, xpc_connection_set_event_handler, |
| 317 | xpc_connection_t connection, xpc_handler_t handler) { |
| 318 | SCOPED_TSAN_INTERCEPTOR(xpc_connection_set_event_handler, connection, |
| 319 | handler); |
| 320 | Release(thr, pc, (uptr)connection); |
| 321 | xpc_handler_t new_handler = ^(xpc_object_t object) { |
| 322 | { |
| 323 | SCOPED_INTERCEPTOR_RAW(xpc_connection_set_event_handler); |
| 324 | Acquire(thr, pc, (uptr)connection); |
| 325 | } |
| 326 | handler(object); |
| 327 | }; |
| 328 | REAL(xpc_connection_set_event_handler)(connection, new_handler); |
| 329 | } |
| 330 | |
| 331 | TSAN_INTERCEPTOR(void, xpc_connection_send_barrier, xpc_connection_t connection, |
| 332 | dispatch_block_t barrier) { |
| 333 | SCOPED_TSAN_INTERCEPTOR(xpc_connection_send_barrier, connection, barrier); |
| 334 | Release(thr, pc, (uptr)connection); |
| 335 | dispatch_block_t new_barrier = ^() { |
| 336 | { |
| 337 | SCOPED_INTERCEPTOR_RAW(xpc_connection_send_barrier); |
| 338 | Acquire(thr, pc, (uptr)connection); |
| 339 | } |
| 340 | barrier(); |
| 341 | }; |
| 342 | REAL(xpc_connection_send_barrier)(connection, new_barrier); |
| 343 | } |
| 344 | |
| 345 | TSAN_INTERCEPTOR(void, xpc_connection_send_message_with_reply, |
| 346 | xpc_connection_t connection, xpc_object_t message, |
| 347 | dispatch_queue_t replyq, xpc_handler_t handler) { |
| 348 | SCOPED_TSAN_INTERCEPTOR(xpc_connection_send_message_with_reply, connection, |
| 349 | message, replyq, handler); |
| 350 | Release(thr, pc, (uptr)connection); |
| 351 | xpc_handler_t new_handler = ^(xpc_object_t object) { |
| 352 | { |
| 353 | SCOPED_INTERCEPTOR_RAW(xpc_connection_send_message_with_reply); |
| 354 | Acquire(thr, pc, (uptr)connection); |
| 355 | } |
| 356 | handler(object); |
| 357 | }; |
| 358 | REAL(xpc_connection_send_message_with_reply) |
| 359 | (connection, message, replyq, new_handler); |
| 360 | } |
| 361 | |
| 362 | TSAN_INTERCEPTOR(void, xpc_connection_cancel, xpc_connection_t connection) { |
| 363 | SCOPED_TSAN_INTERCEPTOR(xpc_connection_cancel, connection); |
| 364 | Release(thr, pc, (uptr)connection); |
| 365 | REAL(xpc_connection_cancel)(connection); |
| 366 | } |
| 367 | |
| 368 | # endif // #if defined(__has_include) && __has_include(<xpc/xpc.h>) |
| 369 | |
| 370 | // Determines whether the Obj-C object pointer is a tagged pointer. Tagged |
| 371 | // pointers encode the object data directly in their pointer bits and do not |
| 372 | // have an associated memory allocation. The Obj-C runtime uses tagged pointers |
| 373 | // to transparently optimize small objects. |
| 374 | static bool IsTaggedObjCPointer(id obj) { |
| 375 | const uptr kPossibleTaggedBits = 0x8000000000000001ull; |
| 376 | return ((uptr)obj & kPossibleTaggedBits) != 0; |
| 377 | } |
| 378 | |
| 379 | // Returns an address which can be used to inform TSan about synchronization |
| 380 | // points (MutexLock/Unlock). The TSan infrastructure expects this to be a valid |
| 381 | // address in the process space. We do a small allocation here to obtain a |
| 382 | // stable address (the array backing the hash map can change). The memory is |
| 383 | // never free'd (leaked) and allocation and locking are slow, but this code only |
| 384 | // runs for @synchronized with tagged pointers, which is very rare. |
| 385 | static uptr GetOrCreateSyncAddress(uptr addr, ThreadState *thr, uptr pc) { |
| 386 | typedef AddrHashMap<uptr, 5> Map; |
| 387 | static Map Addresses; |
| 388 | Map::Handle h(&Addresses, addr); |
| 389 | if (h.created()) { |
| 390 | ThreadIgnoreBegin(thr, pc); |
| 391 | *h = (uptr)user_alloc(thr, pc, /*size=*/1); |
| 392 | ThreadIgnoreEnd(thr); |
| 393 | } |
| 394 | return *h; |
| 395 | } |
| 396 | |
| 397 | // Returns an address on which we can synchronize given an Obj-C object pointer. |
| 398 | // For normal object pointers, this is just the address of the object in memory. |
| 399 | // Tagged pointers are not backed by an actual memory allocation, so we need to |
| 400 | // synthesize a valid address. |
| 401 | static uptr SyncAddressForObjCObject(id obj, ThreadState *thr, uptr pc) { |
| 402 | if (IsTaggedObjCPointer(obj)) |
| 403 | return GetOrCreateSyncAddress((uptr)obj, thr, pc); |
| 404 | return (uptr)obj; |
| 405 | } |
| 406 | |
| 407 | TSAN_INTERCEPTOR(int, objc_sync_enter, id obj) { |
| 408 | SCOPED_TSAN_INTERCEPTOR(objc_sync_enter, obj); |
| 409 | if (!obj) |
| 410 | return REAL(objc_sync_enter)(obj); |
| 411 | uptr addr = SyncAddressForObjCObject(obj, thr, pc); |
| 412 | MutexPreLock(thr, pc, addr, MutexFlagWriteReentrant); |
| 413 | int result = REAL(objc_sync_enter)(obj); |
| 414 | CHECK_EQ(result, OBJC_SYNC_SUCCESS); |
| 415 | MutexPostLock(thr, pc, addr, MutexFlagWriteReentrant); |
| 416 | return result; |
| 417 | } |
| 418 | |
| 419 | TSAN_INTERCEPTOR(int, objc_sync_exit, id obj) { |
| 420 | SCOPED_TSAN_INTERCEPTOR(objc_sync_exit, obj); |
| 421 | if (!obj) |
| 422 | return REAL(objc_sync_exit)(obj); |
| 423 | uptr addr = SyncAddressForObjCObject(obj, thr, pc); |
| 424 | MutexUnlock(thr, pc, addr); |
| 425 | int result = REAL(objc_sync_exit)(obj); |
| 426 | if (result != OBJC_SYNC_SUCCESS) |
| 427 | MutexInvalidAccess(thr, pc, addr); |
| 428 | return result; |
| 429 | } |
| 430 | |
| 431 | TSAN_INTERCEPTOR(int, swapcontext, ucontext_t *oucp, const ucontext_t *ucp) { |
| 432 | { |
| 433 | SCOPED_INTERCEPTOR_RAW(swapcontext, oucp, ucp); |
| 434 | } |
| 435 | // Because of swapcontext() semantics we have no option but to copy its |
| 436 | // implementation here |
| 437 | if (!oucp || !ucp) { |
| 438 | errno = EINVAL; |
| 439 | return -1; |
| 440 | } |
| 441 | ThreadState *thr = cur_thread(); |
| 442 | const int UCF_SWAPPED = 0x80000000; |
| 443 | oucp->uc_onstack &= ~UCF_SWAPPED; |
| 444 | thr->ignore_interceptors++; |
| 445 | int ret = getcontext(oucp); |
| 446 | if (!(oucp->uc_onstack & UCF_SWAPPED)) { |
| 447 | thr->ignore_interceptors--; |
| 448 | if (!ret) { |
| 449 | oucp->uc_onstack |= UCF_SWAPPED; |
| 450 | ret = setcontext(ucp); |
| 451 | } |
| 452 | } |
| 453 | return ret; |
| 454 | } |
| 455 | |
| 456 | // On macOS, libc++ is always linked dynamically, so intercepting works the |
| 457 | // usual way. |
| 458 | # define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR |
| 459 | |
| 460 | namespace { |
| 461 | struct fake_shared_weak_count { |
| 462 | volatile a64 shared_owners; |
| 463 | volatile a64 shared_weak_owners; |
| 464 | virtual void _unused_0x0() = 0; |
| 465 | virtual void _unused_0x8() = 0; |
| 466 | virtual void on_zero_shared() = 0; |
| 467 | virtual void _unused_0x18() = 0; |
| 468 | virtual void on_zero_shared_weak() = 0; |
| 469 | virtual ~fake_shared_weak_count() = 0; // suppress -Wnon-virtual-dtor |
| 470 | }; |
| 471 | } // namespace |
| 472 | |
| 473 | // The following code adds libc++ interceptors for: |
| 474 | // void __shared_weak_count::__release_shared() _NOEXCEPT; |
| 475 | // bool __shared_count::__release_shared() _NOEXCEPT; |
| 476 | // Shared and weak pointers in C++ maintain reference counts via atomics in |
| 477 | // libc++.dylib, which are TSan-invisible, and this leads to false positives in |
| 478 | // destructor code. These interceptors re-implements the whole functions so that |
| 479 | // the mo_acq_rel semantics of the atomic decrement are visible. |
| 480 | // |
| 481 | // Unfortunately, the interceptors cannot simply Acquire/Release some sync |
| 482 | // object and call the original function, because it would have a race between |
| 483 | // the sync and the destruction of the object. Calling both under a lock will |
| 484 | // not work because the destructor can invoke this interceptor again (and even |
| 485 | // in a different thread, so recursive locks don't help). |
| 486 | |
| 487 | STDCXX_INTERCEPTOR(void, _ZNSt3__119__shared_weak_count16__release_sharedEv, |
| 488 | fake_shared_weak_count *o) { |
| 489 | if (!flags()->shared_ptr_interceptor) |
| 490 | return REAL(_ZNSt3__119__shared_weak_count16__release_sharedEv)(o); |
| 491 | |
| 492 | SCOPED_TSAN_INTERCEPTOR(_ZNSt3__119__shared_weak_count16__release_sharedEv, |
| 493 | o); |
| 494 | if (__tsan_atomic64_fetch_add(&o->shared_owners, -1, mo_release) == 0) { |
| 495 | Acquire(thr, pc, (uptr)&o->shared_owners); |
| 496 | o->on_zero_shared(); |
| 497 | if (__tsan_atomic64_fetch_add(&o->shared_weak_owners, -1, mo_release) == |
| 498 | 0) { |
| 499 | Acquire(thr, pc, (uptr)&o->shared_weak_owners); |
| 500 | o->on_zero_shared_weak(); |
| 501 | } |
| 502 | } |
| 503 | } |
| 504 | |
| 505 | STDCXX_INTERCEPTOR(bool, _ZNSt3__114__shared_count16__release_sharedEv, |
| 506 | fake_shared_weak_count *o) { |
| 507 | if (!flags()->shared_ptr_interceptor) |
| 508 | return REAL(_ZNSt3__114__shared_count16__release_sharedEv)(o); |
| 509 | |
| 510 | SCOPED_TSAN_INTERCEPTOR(_ZNSt3__114__shared_count16__release_sharedEv, o); |
| 511 | if (__tsan_atomic64_fetch_add(&o->shared_owners, -1, mo_release) == 0) { |
| 512 | Acquire(thr, pc, (uptr)&o->shared_owners); |
| 513 | o->on_zero_shared(); |
| 514 | return true; |
| 515 | } |
| 516 | return false; |
| 517 | } |
| 518 | |
| 519 | namespace { |
| 520 | struct call_once_callback_args { |
| 521 | void (*orig_func)(void *arg); |
| 522 | void *orig_arg; |
| 523 | void *flag; |
| 524 | }; |
| 525 | |
| 526 | void call_once_callback_wrapper(void *arg) { |
| 527 | call_once_callback_args *new_args = (call_once_callback_args *)arg; |
| 528 | new_args->orig_func(new_args->orig_arg); |
| 529 | __tsan_release(new_args->flag); |
| 530 | } |
| 531 | } // namespace |
| 532 | |
| 533 | // This adds a libc++ interceptor for: |
| 534 | // void __call_once(volatile unsigned long&, void*, void(*)(void*)); |
| 535 | // C++11 call_once is implemented via an internal function __call_once which is |
| 536 | // inside libc++.dylib, and the atomic release store inside it is thus |
| 537 | // TSan-invisible. To avoid false positives, this interceptor wraps the callback |
| 538 | // function and performs an explicit Release after the user code has run. |
| 539 | STDCXX_INTERCEPTOR(void, _ZNSt3__111__call_onceERVmPvPFvS2_E, void *flag, |
| 540 | void *arg, void (*func)(void *arg)) { |
| 541 | call_once_callback_args new_args = {func, arg, flag}; |
| 542 | REAL(_ZNSt3__111__call_onceERVmPvPFvS2_E)(flag, &new_args, |
| 543 | call_once_callback_wrapper); |
| 544 | } |
| 545 | |
| 546 | } // namespace __tsan |
| 547 | |
| 548 | #endif // SANITIZER_APPLE |
| 549 | |