1 | //===-- tsan_interceptors_mac.cpp -----------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file is a part of ThreadSanitizer (TSan), a race detector. |
10 | // |
11 | // Mac-specific interceptors. |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #include "sanitizer_common/sanitizer_platform.h" |
15 | #if SANITIZER_APPLE |
16 | |
17 | #include "interception/interception.h" |
18 | #include "tsan_interceptors.h" |
19 | #include "tsan_interface.h" |
20 | #include "tsan_interface_ann.h" |
21 | #include "tsan_spinlock_defs_mac.h" |
22 | #include "sanitizer_common/sanitizer_addrhashmap.h" |
23 | |
24 | #include <errno.h> |
25 | #include <libkern/OSAtomic.h> |
26 | #include <objc/objc-sync.h> |
27 | #include <os/lock.h> |
28 | #include <sys/ucontext.h> |
29 | |
30 | #if defined(__has_include) && __has_include(<xpc/xpc.h>) |
31 | #include <xpc/xpc.h> |
32 | #endif // #if defined(__has_include) && __has_include(<xpc/xpc.h>) |
33 | |
34 | typedef long long_t; |
35 | |
36 | extern "C" { |
37 | int getcontext(ucontext_t *ucp) __attribute__((returns_twice)); |
38 | int setcontext(const ucontext_t *ucp); |
39 | } |
40 | |
41 | namespace __tsan { |
42 | |
43 | // The non-barrier versions of OSAtomic* functions are semantically mo_relaxed, |
44 | // but the two variants (e.g. OSAtomicAdd32 and OSAtomicAdd32Barrier) are |
45 | // actually aliases of each other, and we cannot have different interceptors for |
46 | // them, because they're actually the same function. Thus, we have to stay |
47 | // conservative and treat the non-barrier versions as mo_acq_rel. |
48 | static constexpr morder kMacOrderBarrier = mo_acq_rel; |
49 | static constexpr morder kMacOrderNonBarrier = mo_acq_rel; |
50 | static constexpr morder kMacFailureOrder = mo_relaxed; |
51 | |
52 | #define OSATOMIC_INTERCEPTOR(return_t, t, tsan_t, f, tsan_atomic_f, mo) \ |
53 | TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \ |
54 | SCOPED_TSAN_INTERCEPTOR(f, x, ptr); \ |
55 | return tsan_atomic_f((volatile tsan_t *)ptr, x, mo); \ |
56 | } |
57 | |
58 | #define OSATOMIC_INTERCEPTOR_PLUS_X(return_t, t, tsan_t, f, tsan_atomic_f, mo) \ |
59 | TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \ |
60 | SCOPED_TSAN_INTERCEPTOR(f, x, ptr); \ |
61 | return tsan_atomic_f((volatile tsan_t *)ptr, x, mo) + x; \ |
62 | } |
63 | |
64 | #define OSATOMIC_INTERCEPTOR_PLUS_1(return_t, t, tsan_t, f, tsan_atomic_f, mo) \ |
65 | TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) { \ |
66 | SCOPED_TSAN_INTERCEPTOR(f, ptr); \ |
67 | return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) + 1; \ |
68 | } |
69 | |
70 | #define OSATOMIC_INTERCEPTOR_MINUS_1(return_t, t, tsan_t, f, tsan_atomic_f, \ |
71 | mo) \ |
72 | TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) { \ |
73 | SCOPED_TSAN_INTERCEPTOR(f, ptr); \ |
74 | return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) - 1; \ |
75 | } |
76 | |
77 | #define OSATOMIC_INTERCEPTORS_ARITHMETIC(f, tsan_atomic_f, m) \ |
78 | m(int32_t, int32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f, \ |
79 | kMacOrderNonBarrier) \ |
80 | m(int32_t, int32_t, a32, f##32##Barrier, __tsan_atomic32_##tsan_atomic_f, \ |
81 | kMacOrderBarrier) \ |
82 | m(int64_t, int64_t, a64, f##64, __tsan_atomic64_##tsan_atomic_f, \ |
83 | kMacOrderNonBarrier) \ |
84 | m(int64_t, int64_t, a64, f##64##Barrier, __tsan_atomic64_##tsan_atomic_f, \ |
85 | kMacOrderBarrier) |
86 | |
87 | #define OSATOMIC_INTERCEPTORS_BITWISE(f, tsan_atomic_f, m, m_orig) \ |
88 | m(int32_t, uint32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f, \ |
89 | kMacOrderNonBarrier) \ |
90 | m(int32_t, uint32_t, a32, f##32##Barrier, __tsan_atomic32_##tsan_atomic_f, \ |
91 | kMacOrderBarrier) \ |
92 | m_orig(int32_t, uint32_t, a32, f##32##Orig, __tsan_atomic32_##tsan_atomic_f, \ |
93 | kMacOrderNonBarrier) \ |
94 | m_orig(int32_t, uint32_t, a32, f##32##OrigBarrier, \ |
95 | __tsan_atomic32_##tsan_atomic_f, kMacOrderBarrier) |
96 | |
97 | OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicAdd, fetch_add, |
98 | OSATOMIC_INTERCEPTOR_PLUS_X) |
99 | OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicIncrement, fetch_add, |
100 | OSATOMIC_INTERCEPTOR_PLUS_1) |
101 | OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicDecrement, fetch_sub, |
102 | OSATOMIC_INTERCEPTOR_MINUS_1) |
103 | OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicOr, fetch_or, OSATOMIC_INTERCEPTOR_PLUS_X, |
104 | OSATOMIC_INTERCEPTOR) |
105 | OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicAnd, fetch_and, |
106 | OSATOMIC_INTERCEPTOR_PLUS_X, OSATOMIC_INTERCEPTOR) |
107 | OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicXor, fetch_xor, |
108 | OSATOMIC_INTERCEPTOR_PLUS_X, OSATOMIC_INTERCEPTOR) |
109 | |
110 | #define OSATOMIC_INTERCEPTORS_CAS(f, tsan_atomic_f, tsan_t, t) \ |
111 | TSAN_INTERCEPTOR(bool, f, t old_value, t new_value, t volatile *ptr) { \ |
112 | SCOPED_TSAN_INTERCEPTOR(f, old_value, new_value, ptr); \ |
113 | return tsan_atomic_f##_compare_exchange_strong( \ |
114 | (volatile tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value, \ |
115 | kMacOrderNonBarrier, kMacFailureOrder); \ |
116 | } \ |
117 | \ |
118 | TSAN_INTERCEPTOR(bool, f##Barrier, t old_value, t new_value, \ |
119 | t volatile *ptr) { \ |
120 | SCOPED_TSAN_INTERCEPTOR(f##Barrier, old_value, new_value, ptr); \ |
121 | return tsan_atomic_f##_compare_exchange_strong( \ |
122 | (volatile tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value, \ |
123 | kMacOrderBarrier, kMacFailureOrder); \ |
124 | } |
125 | |
126 | OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapInt, __tsan_atomic32, a32, int) |
127 | OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapLong, __tsan_atomic64, a64, |
128 | long_t) |
129 | OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapPtr, __tsan_atomic64, a64, |
130 | void *) |
131 | OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwap32, __tsan_atomic32, a32, |
132 | int32_t) |
133 | OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwap64, __tsan_atomic64, a64, |
134 | int64_t) |
135 | |
136 | #define OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, mo) \ |
137 | TSAN_INTERCEPTOR(bool, f, uint32_t n, volatile void *ptr) { \ |
138 | SCOPED_TSAN_INTERCEPTOR(f, n, ptr); \ |
139 | volatile char *byte_ptr = ((volatile char *)ptr) + (n >> 3); \ |
140 | char bit = 0x80u >> (n & 7); \ |
141 | char mask = clear ? ~bit : bit; \ |
142 | char orig_byte = op((volatile a8 *)byte_ptr, mask, mo); \ |
143 | return orig_byte & bit; \ |
144 | } |
145 | |
146 | #define OSATOMIC_INTERCEPTORS_BITOP(f, op, clear) \ |
147 | OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, kMacOrderNonBarrier) \ |
148 | OSATOMIC_INTERCEPTOR_BITOP(f##Barrier, op, clear, kMacOrderBarrier) |
149 | |
150 | OSATOMIC_INTERCEPTORS_BITOP(OSAtomicTestAndSet, __tsan_atomic8_fetch_or, false) |
151 | OSATOMIC_INTERCEPTORS_BITOP(OSAtomicTestAndClear, __tsan_atomic8_fetch_and, |
152 | true) |
153 | |
154 | TSAN_INTERCEPTOR(void, OSAtomicEnqueue, OSQueueHead *list, void *item, |
155 | size_t offset) { |
156 | SCOPED_TSAN_INTERCEPTOR(OSAtomicEnqueue, list, item, offset); |
157 | __tsan_release(item); |
158 | REAL(OSAtomicEnqueue)(list, item, offset); |
159 | } |
160 | |
161 | TSAN_INTERCEPTOR(void *, OSAtomicDequeue, OSQueueHead *list, size_t offset) { |
162 | SCOPED_TSAN_INTERCEPTOR(OSAtomicDequeue, list, offset); |
163 | void *item = REAL(OSAtomicDequeue)(list, offset); |
164 | if (item) __tsan_acquire(item); |
165 | return item; |
166 | } |
167 | |
168 | // OSAtomicFifoEnqueue and OSAtomicFifoDequeue are only on OS X. |
169 | #if !SANITIZER_IOS |
170 | |
171 | TSAN_INTERCEPTOR(void, OSAtomicFifoEnqueue, OSFifoQueueHead *list, void *item, |
172 | size_t offset) { |
173 | SCOPED_TSAN_INTERCEPTOR(OSAtomicFifoEnqueue, list, item, offset); |
174 | __tsan_release(item); |
175 | REAL(OSAtomicFifoEnqueue)(list, item, offset); |
176 | } |
177 | |
178 | TSAN_INTERCEPTOR(void *, OSAtomicFifoDequeue, OSFifoQueueHead *list, |
179 | size_t offset) { |
180 | SCOPED_TSAN_INTERCEPTOR(OSAtomicFifoDequeue, list, offset); |
181 | void *item = REAL(OSAtomicFifoDequeue)(list, offset); |
182 | if (item) __tsan_acquire(item); |
183 | return item; |
184 | } |
185 | |
186 | #endif |
187 | |
188 | TSAN_INTERCEPTOR(void, OSSpinLockLock, volatile OSSpinLock *lock) { |
189 | CHECK(!cur_thread()->is_dead); |
190 | if (!cur_thread()->is_inited) { |
191 | return REAL(OSSpinLockLock)(lock); |
192 | } |
193 | SCOPED_TSAN_INTERCEPTOR(OSSpinLockLock, lock); |
194 | REAL(OSSpinLockLock)(lock); |
195 | Acquire(thr, pc, (uptr)lock); |
196 | } |
197 | |
198 | TSAN_INTERCEPTOR(bool, OSSpinLockTry, volatile OSSpinLock *lock) { |
199 | CHECK(!cur_thread()->is_dead); |
200 | if (!cur_thread()->is_inited) { |
201 | return REAL(OSSpinLockTry)(lock); |
202 | } |
203 | SCOPED_TSAN_INTERCEPTOR(OSSpinLockTry, lock); |
204 | bool result = REAL(OSSpinLockTry)(lock); |
205 | if (result) |
206 | Acquire(thr, pc, (uptr)lock); |
207 | return result; |
208 | } |
209 | |
210 | TSAN_INTERCEPTOR(void, OSSpinLockUnlock, volatile OSSpinLock *lock) { |
211 | CHECK(!cur_thread()->is_dead); |
212 | if (!cur_thread()->is_inited) { |
213 | return REAL(OSSpinLockUnlock)(lock); |
214 | } |
215 | SCOPED_TSAN_INTERCEPTOR(OSSpinLockUnlock, lock); |
216 | Release(thr, pc, (uptr)lock); |
217 | REAL(OSSpinLockUnlock)(lock); |
218 | } |
219 | |
220 | TSAN_INTERCEPTOR(void, os_lock_lock, void *lock) { |
221 | CHECK(!cur_thread()->is_dead); |
222 | if (!cur_thread()->is_inited) { |
223 | return REAL(os_lock_lock)(lock); |
224 | } |
225 | SCOPED_TSAN_INTERCEPTOR(os_lock_lock, lock); |
226 | REAL(os_lock_lock)(lock); |
227 | Acquire(thr, pc, (uptr)lock); |
228 | } |
229 | |
230 | TSAN_INTERCEPTOR(bool, os_lock_trylock, void *lock) { |
231 | CHECK(!cur_thread()->is_dead); |
232 | if (!cur_thread()->is_inited) { |
233 | return REAL(os_lock_trylock)(lock); |
234 | } |
235 | SCOPED_TSAN_INTERCEPTOR(os_lock_trylock, lock); |
236 | bool result = REAL(os_lock_trylock)(lock); |
237 | if (result) |
238 | Acquire(thr, pc, (uptr)lock); |
239 | return result; |
240 | } |
241 | |
242 | TSAN_INTERCEPTOR(void, os_lock_unlock, void *lock) { |
243 | CHECK(!cur_thread()->is_dead); |
244 | if (!cur_thread()->is_inited) { |
245 | return REAL(os_lock_unlock)(lock); |
246 | } |
247 | SCOPED_TSAN_INTERCEPTOR(os_lock_unlock, lock); |
248 | Release(thr, pc, (uptr)lock); |
249 | REAL(os_lock_unlock)(lock); |
250 | } |
251 | |
252 | TSAN_INTERCEPTOR(void, os_unfair_lock_lock, os_unfair_lock_t lock) { |
253 | if (!cur_thread()->is_inited || cur_thread()->is_dead) { |
254 | return REAL(os_unfair_lock_lock)(lock); |
255 | } |
256 | SCOPED_TSAN_INTERCEPTOR(os_unfair_lock_lock, lock); |
257 | REAL(os_unfair_lock_lock)(lock); |
258 | Acquire(thr, pc, (uptr)lock); |
259 | } |
260 | |
261 | TSAN_INTERCEPTOR(void, os_unfair_lock_lock_with_options, os_unfair_lock_t lock, |
262 | u32 options) { |
263 | if (!cur_thread()->is_inited || cur_thread()->is_dead) { |
264 | return REAL(os_unfair_lock_lock_with_options)(lock, options); |
265 | } |
266 | SCOPED_TSAN_INTERCEPTOR(os_unfair_lock_lock_with_options, lock, options); |
267 | REAL(os_unfair_lock_lock_with_options)(lock, options); |
268 | Acquire(thr, pc, (uptr)lock); |
269 | } |
270 | |
271 | TSAN_INTERCEPTOR(bool, os_unfair_lock_trylock, os_unfair_lock_t lock) { |
272 | if (!cur_thread()->is_inited || cur_thread()->is_dead) { |
273 | return REAL(os_unfair_lock_trylock)(lock); |
274 | } |
275 | SCOPED_TSAN_INTERCEPTOR(os_unfair_lock_trylock, lock); |
276 | bool result = REAL(os_unfair_lock_trylock)(lock); |
277 | if (result) |
278 | Acquire(thr, pc, (uptr)lock); |
279 | return result; |
280 | } |
281 | |
282 | TSAN_INTERCEPTOR(void, os_unfair_lock_unlock, os_unfair_lock_t lock) { |
283 | if (!cur_thread()->is_inited || cur_thread()->is_dead) { |
284 | return REAL(os_unfair_lock_unlock)(lock); |
285 | } |
286 | SCOPED_TSAN_INTERCEPTOR(os_unfair_lock_unlock, lock); |
287 | Release(thr, pc, (uptr)lock); |
288 | REAL(os_unfair_lock_unlock)(lock); |
289 | } |
290 | |
291 | #if defined(__has_include) && __has_include(<xpc/xpc.h>) |
292 | |
293 | TSAN_INTERCEPTOR(void, xpc_connection_set_event_handler, |
294 | xpc_connection_t connection, xpc_handler_t handler) { |
295 | SCOPED_TSAN_INTERCEPTOR(xpc_connection_set_event_handler, connection, |
296 | handler); |
297 | Release(thr, pc, (uptr)connection); |
298 | xpc_handler_t new_handler = ^(xpc_object_t object) { |
299 | { |
300 | SCOPED_INTERCEPTOR_RAW(xpc_connection_set_event_handler); |
301 | Acquire(thr, pc, (uptr)connection); |
302 | } |
303 | handler(object); |
304 | }; |
305 | REAL(xpc_connection_set_event_handler)(connection, new_handler); |
306 | } |
307 | |
308 | TSAN_INTERCEPTOR(void, xpc_connection_send_barrier, xpc_connection_t connection, |
309 | dispatch_block_t barrier) { |
310 | SCOPED_TSAN_INTERCEPTOR(xpc_connection_send_barrier, connection, barrier); |
311 | Release(thr, pc, (uptr)connection); |
312 | dispatch_block_t new_barrier = ^() { |
313 | { |
314 | SCOPED_INTERCEPTOR_RAW(xpc_connection_send_barrier); |
315 | Acquire(thr, pc, (uptr)connection); |
316 | } |
317 | barrier(); |
318 | }; |
319 | REAL(xpc_connection_send_barrier)(connection, new_barrier); |
320 | } |
321 | |
322 | TSAN_INTERCEPTOR(void, xpc_connection_send_message_with_reply, |
323 | xpc_connection_t connection, xpc_object_t message, |
324 | dispatch_queue_t replyq, xpc_handler_t handler) { |
325 | SCOPED_TSAN_INTERCEPTOR(xpc_connection_send_message_with_reply, connection, |
326 | message, replyq, handler); |
327 | Release(thr, pc, (uptr)connection); |
328 | xpc_handler_t new_handler = ^(xpc_object_t object) { |
329 | { |
330 | SCOPED_INTERCEPTOR_RAW(xpc_connection_send_message_with_reply); |
331 | Acquire(thr, pc, (uptr)connection); |
332 | } |
333 | handler(object); |
334 | }; |
335 | REAL(xpc_connection_send_message_with_reply) |
336 | (connection, message, replyq, new_handler); |
337 | } |
338 | |
339 | TSAN_INTERCEPTOR(void, xpc_connection_cancel, xpc_connection_t connection) { |
340 | SCOPED_TSAN_INTERCEPTOR(xpc_connection_cancel, connection); |
341 | Release(thr, pc, (uptr)connection); |
342 | REAL(xpc_connection_cancel)(connection); |
343 | } |
344 | |
345 | #endif // #if defined(__has_include) && __has_include(<xpc/xpc.h>) |
346 | |
347 | // Determines whether the Obj-C object pointer is a tagged pointer. Tagged |
348 | // pointers encode the object data directly in their pointer bits and do not |
349 | // have an associated memory allocation. The Obj-C runtime uses tagged pointers |
350 | // to transparently optimize small objects. |
351 | static bool IsTaggedObjCPointer(id obj) { |
352 | const uptr kPossibleTaggedBits = 0x8000000000000001ull; |
353 | return ((uptr)obj & kPossibleTaggedBits) != 0; |
354 | } |
355 | |
356 | // Returns an address which can be used to inform TSan about synchronization |
357 | // points (MutexLock/Unlock). The TSan infrastructure expects this to be a valid |
358 | // address in the process space. We do a small allocation here to obtain a |
359 | // stable address (the array backing the hash map can change). The memory is |
360 | // never free'd (leaked) and allocation and locking are slow, but this code only |
361 | // runs for @synchronized with tagged pointers, which is very rare. |
362 | static uptr GetOrCreateSyncAddress(uptr addr, ThreadState *thr, uptr pc) { |
363 | typedef AddrHashMap<uptr, 5> Map; |
364 | static Map Addresses; |
365 | Map::Handle h(&Addresses, addr); |
366 | if (h.created()) { |
367 | ThreadIgnoreBegin(thr, pc); |
368 | *h = (uptr) user_alloc(thr, pc, /*size=*/1); |
369 | ThreadIgnoreEnd(thr); |
370 | } |
371 | return *h; |
372 | } |
373 | |
374 | // Returns an address on which we can synchronize given an Obj-C object pointer. |
375 | // For normal object pointers, this is just the address of the object in memory. |
376 | // Tagged pointers are not backed by an actual memory allocation, so we need to |
377 | // synthesize a valid address. |
378 | static uptr SyncAddressForObjCObject(id obj, ThreadState *thr, uptr pc) { |
379 | if (IsTaggedObjCPointer(obj)) |
380 | return GetOrCreateSyncAddress((uptr)obj, thr, pc); |
381 | return (uptr)obj; |
382 | } |
383 | |
384 | TSAN_INTERCEPTOR(int, objc_sync_enter, id obj) { |
385 | SCOPED_TSAN_INTERCEPTOR(objc_sync_enter, obj); |
386 | if (!obj) return REAL(objc_sync_enter)(obj); |
387 | uptr addr = SyncAddressForObjCObject(obj, thr, pc); |
388 | MutexPreLock(thr, pc, addr, MutexFlagWriteReentrant); |
389 | int result = REAL(objc_sync_enter)(obj); |
390 | CHECK_EQ(result, OBJC_SYNC_SUCCESS); |
391 | MutexPostLock(thr, pc, addr, MutexFlagWriteReentrant); |
392 | return result; |
393 | } |
394 | |
395 | TSAN_INTERCEPTOR(int, objc_sync_exit, id obj) { |
396 | SCOPED_TSAN_INTERCEPTOR(objc_sync_exit, obj); |
397 | if (!obj) return REAL(objc_sync_exit)(obj); |
398 | uptr addr = SyncAddressForObjCObject(obj, thr, pc); |
399 | MutexUnlock(thr, pc, addr); |
400 | int result = REAL(objc_sync_exit)(obj); |
401 | if (result != OBJC_SYNC_SUCCESS) MutexInvalidAccess(thr, pc, addr); |
402 | return result; |
403 | } |
404 | |
405 | TSAN_INTERCEPTOR(int, swapcontext, ucontext_t *oucp, const ucontext_t *ucp) { |
406 | { |
407 | SCOPED_INTERCEPTOR_RAW(swapcontext, oucp, ucp); |
408 | } |
409 | // Because of swapcontext() semantics we have no option but to copy its |
410 | // implementation here |
411 | if (!oucp || !ucp) { |
412 | errno = EINVAL; |
413 | return -1; |
414 | } |
415 | ThreadState *thr = cur_thread(); |
416 | const int UCF_SWAPPED = 0x80000000; |
417 | oucp->uc_onstack &= ~UCF_SWAPPED; |
418 | thr->ignore_interceptors++; |
419 | int ret = getcontext(oucp); |
420 | if (!(oucp->uc_onstack & UCF_SWAPPED)) { |
421 | thr->ignore_interceptors--; |
422 | if (!ret) { |
423 | oucp->uc_onstack |= UCF_SWAPPED; |
424 | ret = setcontext(ucp); |
425 | } |
426 | } |
427 | return ret; |
428 | } |
429 | |
430 | // On macOS, libc++ is always linked dynamically, so intercepting works the |
431 | // usual way. |
432 | #define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR |
433 | |
434 | namespace { |
435 | struct fake_shared_weak_count { |
436 | volatile a64 shared_owners; |
437 | volatile a64 shared_weak_owners; |
438 | virtual void _unused_0x0() = 0; |
439 | virtual void _unused_0x8() = 0; |
440 | virtual void on_zero_shared() = 0; |
441 | virtual void _unused_0x18() = 0; |
442 | virtual void on_zero_shared_weak() = 0; |
443 | virtual ~fake_shared_weak_count() = 0; // suppress -Wnon-virtual-dtor |
444 | }; |
445 | } // namespace |
446 | |
447 | // The following code adds libc++ interceptors for: |
448 | // void __shared_weak_count::__release_shared() _NOEXCEPT; |
449 | // bool __shared_count::__release_shared() _NOEXCEPT; |
450 | // Shared and weak pointers in C++ maintain reference counts via atomics in |
451 | // libc++.dylib, which are TSan-invisible, and this leads to false positives in |
452 | // destructor code. These interceptors re-implements the whole functions so that |
453 | // the mo_acq_rel semantics of the atomic decrement are visible. |
454 | // |
455 | // Unfortunately, the interceptors cannot simply Acquire/Release some sync |
456 | // object and call the original function, because it would have a race between |
457 | // the sync and the destruction of the object. Calling both under a lock will |
458 | // not work because the destructor can invoke this interceptor again (and even |
459 | // in a different thread, so recursive locks don't help). |
460 | |
461 | STDCXX_INTERCEPTOR(void, _ZNSt3__119__shared_weak_count16__release_sharedEv, |
462 | fake_shared_weak_count *o) { |
463 | if (!flags()->shared_ptr_interceptor) |
464 | return REAL(_ZNSt3__119__shared_weak_count16__release_sharedEv)(o); |
465 | |
466 | SCOPED_TSAN_INTERCEPTOR(_ZNSt3__119__shared_weak_count16__release_sharedEv, |
467 | o); |
468 | if (__tsan_atomic64_fetch_add(&o->shared_owners, -1, mo_release) == 0) { |
469 | Acquire(thr, pc, (uptr)&o->shared_owners); |
470 | o->on_zero_shared(); |
471 | if (__tsan_atomic64_fetch_add(&o->shared_weak_owners, -1, mo_release) == |
472 | 0) { |
473 | Acquire(thr, pc, (uptr)&o->shared_weak_owners); |
474 | o->on_zero_shared_weak(); |
475 | } |
476 | } |
477 | } |
478 | |
479 | STDCXX_INTERCEPTOR(bool, _ZNSt3__114__shared_count16__release_sharedEv, |
480 | fake_shared_weak_count *o) { |
481 | if (!flags()->shared_ptr_interceptor) |
482 | return REAL(_ZNSt3__114__shared_count16__release_sharedEv)(o); |
483 | |
484 | SCOPED_TSAN_INTERCEPTOR(_ZNSt3__114__shared_count16__release_sharedEv, o); |
485 | if (__tsan_atomic64_fetch_add(&o->shared_owners, -1, mo_release) == 0) { |
486 | Acquire(thr, pc, (uptr)&o->shared_owners); |
487 | o->on_zero_shared(); |
488 | return true; |
489 | } |
490 | return false; |
491 | } |
492 | |
493 | namespace { |
494 | struct call_once_callback_args { |
495 | void (*orig_func)(void *arg); |
496 | void *orig_arg; |
497 | void *flag; |
498 | }; |
499 | |
500 | void call_once_callback_wrapper(void *arg) { |
501 | call_once_callback_args *new_args = (call_once_callback_args *)arg; |
502 | new_args->orig_func(new_args->orig_arg); |
503 | __tsan_release(new_args->flag); |
504 | } |
505 | } // namespace |
506 | |
507 | // This adds a libc++ interceptor for: |
508 | // void __call_once(volatile unsigned long&, void*, void(*)(void*)); |
509 | // C++11 call_once is implemented via an internal function __call_once which is |
510 | // inside libc++.dylib, and the atomic release store inside it is thus |
511 | // TSan-invisible. To avoid false positives, this interceptor wraps the callback |
512 | // function and performs an explicit Release after the user code has run. |
513 | STDCXX_INTERCEPTOR(void, _ZNSt3__111__call_onceERVmPvPFvS2_E, void *flag, |
514 | void *arg, void (*func)(void *arg)) { |
515 | call_once_callback_args new_args = {func, arg, flag}; |
516 | REAL(_ZNSt3__111__call_onceERVmPvPFvS2_E)(flag, &new_args, |
517 | call_once_callback_wrapper); |
518 | } |
519 | |
520 | } // namespace __tsan |
521 | |
522 | #endif // SANITIZER_APPLE |
523 | |