1//===-- tsan_interface_atomic.cpp -----------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of ThreadSanitizer (TSan), a race detector.
10//
11//===----------------------------------------------------------------------===//
12
13// ThreadSanitizer atomic operations are based on C++11/C1x standards.
14// For background see C++11 standard. A slightly older, publicly
15// available draft of the standard (not entirely up-to-date, but close enough
16// for casual browsing) is available here:
17// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
18// The following page contains more background information:
19// http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
20
21#include "sanitizer_common/sanitizer_mutex.h"
22#include "sanitizer_common/sanitizer_placement_new.h"
23#include "sanitizer_common/sanitizer_stacktrace.h"
24#include "tsan_flags.h"
25#include "tsan_interface.h"
26#include "tsan_rtl.h"
27
28using namespace __tsan;
29
30#if !SANITIZER_GO && __TSAN_HAS_INT128
31// Protects emulation of 128-bit atomic operations.
32static StaticSpinMutex mutex128;
33#endif
34
35#if SANITIZER_DEBUG
36static bool IsLoadOrder(morder mo) {
37 return mo == mo_relaxed || mo == mo_consume || mo == mo_acquire ||
38 mo == mo_seq_cst;
39}
40
41static bool IsStoreOrder(morder mo) {
42 return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
43}
44#endif
45
46static bool IsReleaseOrder(morder mo) {
47 return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
48}
49
50static bool IsAcquireOrder(morder mo) {
51 return mo == mo_consume || mo == mo_acquire || mo == mo_acq_rel ||
52 mo == mo_seq_cst;
53}
54
55static bool IsAcqRelOrder(morder mo) {
56 return mo == mo_acq_rel || mo == mo_seq_cst;
57}
58
59template <typename T>
60T func_xchg(volatile T *v, T op) {
61 T res = __sync_lock_test_and_set(v, op);
62 // __sync_lock_test_and_set does not contain full barrier.
63 __sync_synchronize();
64 return res;
65}
66
67template <typename T>
68T func_add(volatile T *v, T op) {
69 return __sync_fetch_and_add(v, op);
70}
71
72template <typename T>
73T func_sub(volatile T *v, T op) {
74 return __sync_fetch_and_sub(v, op);
75}
76
77template <typename T>
78T func_and(volatile T *v, T op) {
79 return __sync_fetch_and_and(v, op);
80}
81
82template <typename T>
83T func_or(volatile T *v, T op) {
84 return __sync_fetch_and_or(v, op);
85}
86
87template <typename T>
88T func_xor(volatile T *v, T op) {
89 return __sync_fetch_and_xor(v, op);
90}
91
92template <typename T>
93T func_nand(volatile T *v, T op) {
94 // clang does not support __sync_fetch_and_nand.
95 T cmp = *v;
96 for (;;) {
97 T newv = ~(cmp & op);
98 T cur = __sync_val_compare_and_swap(v, cmp, newv);
99 if (cmp == cur)
100 return cmp;
101 cmp = cur;
102 }
103}
104
105template <typename T>
106T func_cas(volatile T *v, T cmp, T xch) {
107 return __sync_val_compare_and_swap(v, cmp, xch);
108}
109
110// clang does not support 128-bit atomic ops.
111// Atomic ops are executed under tsan internal mutex,
112// here we assume that the atomic variables are not accessed
113// from non-instrumented code.
114#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !SANITIZER_GO && \
115 __TSAN_HAS_INT128
116a128 func_xchg(volatile a128 *v, a128 op) {
117 SpinMutexLock lock(&mutex128);
118 a128 cmp = *v;
119 *v = op;
120 return cmp;
121}
122
123a128 func_add(volatile a128 *v, a128 op) {
124 SpinMutexLock lock(&mutex128);
125 a128 cmp = *v;
126 *v = cmp + op;
127 return cmp;
128}
129
130a128 func_sub(volatile a128 *v, a128 op) {
131 SpinMutexLock lock(&mutex128);
132 a128 cmp = *v;
133 *v = cmp - op;
134 return cmp;
135}
136
137a128 func_and(volatile a128 *v, a128 op) {
138 SpinMutexLock lock(&mutex128);
139 a128 cmp = *v;
140 *v = cmp & op;
141 return cmp;
142}
143
144a128 func_or(volatile a128 *v, a128 op) {
145 SpinMutexLock lock(&mutex128);
146 a128 cmp = *v;
147 *v = cmp | op;
148 return cmp;
149}
150
151a128 func_xor(volatile a128 *v, a128 op) {
152 SpinMutexLock lock(&mutex128);
153 a128 cmp = *v;
154 *v = cmp ^ op;
155 return cmp;
156}
157
158a128 func_nand(volatile a128 *v, a128 op) {
159 SpinMutexLock lock(&mutex128);
160 a128 cmp = *v;
161 *v = ~(cmp & op);
162 return cmp;
163}
164
165a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
166 SpinMutexLock lock(&mutex128);
167 a128 cur = *v;
168 if (cur == cmp)
169 *v = xch;
170 return cur;
171}
172#endif
173
174template <typename T>
175static int AccessSize() {
176 if (sizeof(T) <= 1)
177 return 1;
178 else if (sizeof(T) <= 2)
179 return 2;
180 else if (sizeof(T) <= 4)
181 return 4;
182 else
183 return 8;
184 // For 16-byte atomics we also use 8-byte memory access,
185 // this leads to false negatives only in very obscure cases.
186}
187
188#if !SANITIZER_GO
189static atomic_uint8_t *to_atomic(const volatile a8 *a) {
190 return reinterpret_cast<atomic_uint8_t *>(const_cast<a8 *>(a));
191}
192
193static atomic_uint16_t *to_atomic(const volatile a16 *a) {
194 return reinterpret_cast<atomic_uint16_t *>(const_cast<a16 *>(a));
195}
196#endif
197
198static atomic_uint32_t *to_atomic(const volatile a32 *a) {
199 return reinterpret_cast<atomic_uint32_t *>(const_cast<a32 *>(a));
200}
201
202static atomic_uint64_t *to_atomic(const volatile a64 *a) {
203 return reinterpret_cast<atomic_uint64_t *>(const_cast<a64 *>(a));
204}
205
206static memory_order to_mo(morder mo) {
207 switch (mo) {
208 case mo_relaxed:
209 return memory_order_relaxed;
210 case mo_consume:
211 return memory_order_consume;
212 case mo_acquire:
213 return memory_order_acquire;
214 case mo_release:
215 return memory_order_release;
216 case mo_acq_rel:
217 return memory_order_acq_rel;
218 case mo_seq_cst:
219 return memory_order_seq_cst;
220 }
221 DCHECK(0);
222 return memory_order_seq_cst;
223}
224
225namespace {
226
227template <typename T, T (*F)(volatile T *v, T op)>
228static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
229 MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
230 if (LIKELY(mo == mo_relaxed))
231 return F(a, v);
232 SlotLocker locker(thr);
233 {
234 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr: (uptr)a, save_stack: false);
235 RWLock lock(&s->mtx, IsReleaseOrder(mo));
236 if (IsAcqRelOrder(mo))
237 thr->clock.ReleaseAcquire(dstp: &s->clock);
238 else if (IsReleaseOrder(mo))
239 thr->clock.Release(dstp: &s->clock);
240 else if (IsAcquireOrder(mo))
241 thr->clock.Acquire(src: s->clock);
242 v = F(a, v);
243 }
244 if (IsReleaseOrder(mo))
245 IncrementEpoch(thr);
246 return v;
247}
248
249struct OpLoad {
250 template <typename T>
251 static T NoTsanAtomic(morder mo, const volatile T *a) {
252 return atomic_load(to_atomic(a), to_mo(mo));
253 }
254
255#if __TSAN_HAS_INT128 && !SANITIZER_GO
256 static a128 NoTsanAtomic(morder mo, const volatile a128 *a) {
257 SpinMutexLock lock(&mutex128);
258 return *a;
259 }
260#endif
261
262 template <typename T>
263 static T Atomic(ThreadState *thr, uptr pc, morder mo, const volatile T *a) {
264 DCHECK(IsLoadOrder(mo));
265 // This fast-path is critical for performance.
266 // Assume the access is atomic.
267 if (!IsAcquireOrder(mo)) {
268 MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(),
269 kAccessRead | kAccessAtomic);
270 return NoTsanAtomic(mo, a);
271 }
272 // Don't create sync object if it does not exist yet. For example, an atomic
273 // pointer is initialized to nullptr and then periodically acquire-loaded.
274 T v = NoTsanAtomic(mo, a);
275 SyncVar *s = ctx->metamap.GetSyncIfExists(addr: (uptr)a);
276 if (s) {
277 SlotLocker locker(thr);
278 ReadLock lock(&s->mtx);
279 thr->clock.Acquire(src: s->clock);
280 // Re-read under sync mutex because we need a consistent snapshot
281 // of the value and the clock we acquire.
282 v = NoTsanAtomic(mo, a);
283 }
284 MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(),
285 kAccessRead | kAccessAtomic);
286 return v;
287 }
288};
289
290struct OpStore {
291 template <typename T>
292 static void NoTsanAtomic(morder mo, volatile T *a, T v) {
293 atomic_store(to_atomic(a), v, to_mo(mo));
294 }
295
296#if __TSAN_HAS_INT128 && !SANITIZER_GO
297 static void NoTsanAtomic(morder mo, volatile a128 *a, a128 v) {
298 SpinMutexLock lock(&mutex128);
299 *a = v;
300 }
301#endif
302
303 template <typename T>
304 static void Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
305 DCHECK(IsStoreOrder(mo));
306 MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(),
307 kAccessWrite | kAccessAtomic);
308 // This fast-path is critical for performance.
309 // Assume the access is atomic.
310 // Strictly saying even relaxed store cuts off release sequence,
311 // so must reset the clock.
312 if (!IsReleaseOrder(mo)) {
313 NoTsanAtomic(mo, a, v);
314 return;
315 }
316 SlotLocker locker(thr);
317 {
318 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr: (uptr)a, save_stack: false);
319 Lock lock(&s->mtx);
320 thr->clock.ReleaseStore(dstp: &s->clock);
321 NoTsanAtomic(mo, a, v);
322 }
323 IncrementEpoch(thr);
324 }
325};
326
327struct OpExchange {
328 template <typename T>
329 static T NoTsanAtomic(morder mo, volatile T *a, T v) {
330 return func_xchg(a, v);
331 }
332 template <typename T>
333 static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
334 return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
335 }
336};
337
338struct OpFetchAdd {
339 template <typename T>
340 static T NoTsanAtomic(morder mo, volatile T *a, T v) {
341 return func_add(a, v);
342 }
343
344 template <typename T>
345 static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
346 return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
347 }
348};
349
350struct OpFetchSub {
351 template <typename T>
352 static T NoTsanAtomic(morder mo, volatile T *a, T v) {
353 return func_sub(a, v);
354 }
355
356 template <typename T>
357 static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
358 return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
359 }
360};
361
362struct OpFetchAnd {
363 template <typename T>
364 static T NoTsanAtomic(morder mo, volatile T *a, T v) {
365 return func_and(a, v);
366 }
367
368 template <typename T>
369 static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
370 return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
371 }
372};
373
374struct OpFetchOr {
375 template <typename T>
376 static T NoTsanAtomic(morder mo, volatile T *a, T v) {
377 return func_or(a, v);
378 }
379
380 template <typename T>
381 static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
382 return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
383 }
384};
385
386struct OpFetchXor {
387 template <typename T>
388 static T NoTsanAtomic(morder mo, volatile T *a, T v) {
389 return func_xor(a, v);
390 }
391
392 template <typename T>
393 static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
394 return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
395 }
396};
397
398struct OpFetchNand {
399 template <typename T>
400 static T NoTsanAtomic(morder mo, volatile T *a, T v) {
401 return func_nand(a, v);
402 }
403
404 template <typename T>
405 static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
406 return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
407 }
408};
409
410struct OpCAS {
411 template <typename T>
412 static bool NoTsanAtomic(morder mo, morder fmo, volatile T *a, T *c, T v) {
413 return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo));
414 }
415
416#if __TSAN_HAS_INT128
417 static bool NoTsanAtomic(morder mo, morder fmo, volatile a128 *a, a128 *c,
418 a128 v) {
419 a128 old = *c;
420 a128 cur = func_cas(v: a, cmp: old, xch: v);
421 if (cur == old)
422 return true;
423 *c = cur;
424 return false;
425 }
426#endif
427
428 template <typename T>
429 static T NoTsanAtomic(morder mo, morder fmo, volatile T *a, T c, T v) {
430 NoTsanAtomic(mo, fmo, a, &c, v);
431 return c;
432 }
433
434 template <typename T>
435 static bool Atomic(ThreadState *thr, uptr pc, morder mo, morder fmo,
436 volatile T *a, T *c, T v) {
437 // 31.7.2.18: "The failure argument shall not be memory_order_release
438 // nor memory_order_acq_rel". LLVM (2021-05) fallbacks to Monotonic
439 // (mo_relaxed) when those are used.
440 DCHECK(IsLoadOrder(fmo));
441
442 MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(),
443 kAccessWrite | kAccessAtomic);
444 if (LIKELY(mo == mo_relaxed && fmo == mo_relaxed)) {
445 T cc = *c;
446 T pr = func_cas(a, cc, v);
447 if (pr == cc)
448 return true;
449 *c = pr;
450 return false;
451 }
452 SlotLocker locker(thr);
453 bool release = IsReleaseOrder(mo);
454 bool success;
455 {
456 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr: (uptr)a, save_stack: false);
457 RWLock lock(&s->mtx, release);
458 T cc = *c;
459 T pr = func_cas(a, cc, v);
460 success = pr == cc;
461 if (!success) {
462 *c = pr;
463 mo = fmo;
464 }
465 if (success && IsAcqRelOrder(mo))
466 thr->clock.ReleaseAcquire(dstp: &s->clock);
467 else if (success && IsReleaseOrder(mo))
468 thr->clock.Release(dstp: &s->clock);
469 else if (IsAcquireOrder(mo))
470 thr->clock.Acquire(src: s->clock);
471 }
472 if (success && release)
473 IncrementEpoch(thr);
474 return success;
475 }
476
477 template <typename T>
478 static T Atomic(ThreadState *thr, uptr pc, morder mo, morder fmo,
479 volatile T *a, T c, T v) {
480 Atomic(thr, pc, mo, fmo, a, &c, v);
481 return c;
482 }
483};
484
485#if !SANITIZER_GO
486struct OpFence {
487 static void NoTsanAtomic(morder mo) { __sync_synchronize(); }
488
489 static void Atomic(ThreadState *thr, uptr pc, morder mo) {
490 // FIXME(dvyukov): not implemented.
491 __sync_synchronize();
492 }
493};
494#endif
495
496} // namespace
497
498// Interface functions follow.
499#if !SANITIZER_GO
500
501// C/C++
502
503static morder convert_morder(morder mo) {
504 return flags()->force_seq_cst_atomics ? mo_seq_cst : mo;
505}
506
507static morder to_morder(int mo) {
508 // Filter out additional memory order flags:
509 // MEMMODEL_SYNC = 1 << 15
510 // __ATOMIC_HLE_ACQUIRE = 1 << 16
511 // __ATOMIC_HLE_RELEASE = 1 << 17
512 //
513 // HLE is an optimization, and we pretend that elision always fails.
514 // MEMMODEL_SYNC is used when lowering __sync_ atomics,
515 // since we use __sync_ atomics for actual atomic operations,
516 // we can safely ignore it as well. It also subtly affects semantics,
517 // but we don't model the difference.
518 morder res = static_cast<morder>(static_cast<u8>(mo));
519 DCHECK_LE(res, mo_seq_cst);
520 return res;
521}
522
523template <class Op, class... Types>
524ALWAYS_INLINE auto AtomicImpl(morder mo, Types... args) {
525 ThreadState *const thr = cur_thread();
526 ProcessPendingSignals(thr);
527 if (UNLIKELY(thr->ignore_sync || thr->ignore_interceptors))
528 return Op::NoTsanAtomic(mo, args...);
529 return Op::Atomic(thr, GET_CALLER_PC(), convert_morder(mo), args...);
530}
531
532extern "C" {
533SANITIZER_INTERFACE_ATTRIBUTE
534a8 __tsan_atomic8_load(const volatile a8 *a, int mo) {
535 return AtomicImpl<OpLoad>(mo: to_morder(mo), args: a);
536}
537
538SANITIZER_INTERFACE_ATTRIBUTE
539a16 __tsan_atomic16_load(const volatile a16 *a, int mo) {
540 return AtomicImpl<OpLoad>(mo: to_morder(mo), args: a);
541}
542
543SANITIZER_INTERFACE_ATTRIBUTE
544a32 __tsan_atomic32_load(const volatile a32 *a, int mo) {
545 return AtomicImpl<OpLoad>(mo: to_morder(mo), args: a);
546}
547
548SANITIZER_INTERFACE_ATTRIBUTE
549a64 __tsan_atomic64_load(const volatile a64 *a, int mo) {
550 return AtomicImpl<OpLoad>(mo: to_morder(mo), args: a);
551}
552
553# if __TSAN_HAS_INT128
554SANITIZER_INTERFACE_ATTRIBUTE
555a128 __tsan_atomic128_load(const volatile a128 *a, int mo) {
556 return AtomicImpl<OpLoad>(mo: to_morder(mo), args: a);
557}
558# endif
559
560SANITIZER_INTERFACE_ATTRIBUTE
561void __tsan_atomic8_store(volatile a8 *a, a8 v, int mo) {
562 return AtomicImpl<OpStore>(mo: to_morder(mo), args: a, args: v);
563}
564
565SANITIZER_INTERFACE_ATTRIBUTE
566void __tsan_atomic16_store(volatile a16 *a, a16 v, int mo) {
567 return AtomicImpl<OpStore>(mo: to_morder(mo), args: a, args: v);
568}
569
570SANITIZER_INTERFACE_ATTRIBUTE
571void __tsan_atomic32_store(volatile a32 *a, a32 v, int mo) {
572 return AtomicImpl<OpStore>(mo: to_morder(mo), args: a, args: v);
573}
574
575SANITIZER_INTERFACE_ATTRIBUTE
576void __tsan_atomic64_store(volatile a64 *a, a64 v, int mo) {
577 return AtomicImpl<OpStore>(mo: to_morder(mo), args: a, args: v);
578}
579
580# if __TSAN_HAS_INT128
581SANITIZER_INTERFACE_ATTRIBUTE
582void __tsan_atomic128_store(volatile a128 *a, a128 v, int mo) {
583 return AtomicImpl<OpStore>(mo: to_morder(mo), args: a, args: v);
584}
585# endif
586
587SANITIZER_INTERFACE_ATTRIBUTE
588a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, int mo) {
589 return AtomicImpl<OpExchange>(mo: to_morder(mo), args: a, args: v);
590}
591
592SANITIZER_INTERFACE_ATTRIBUTE
593a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, int mo) {
594 return AtomicImpl<OpExchange>(mo: to_morder(mo), args: a, args: v);
595}
596
597SANITIZER_INTERFACE_ATTRIBUTE
598a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, int mo) {
599 return AtomicImpl<OpExchange>(mo: to_morder(mo), args: a, args: v);
600}
601
602SANITIZER_INTERFACE_ATTRIBUTE
603a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, int mo) {
604 return AtomicImpl<OpExchange>(mo: to_morder(mo), args: a, args: v);
605}
606
607# if __TSAN_HAS_INT128
608SANITIZER_INTERFACE_ATTRIBUTE
609a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, int mo) {
610 return AtomicImpl<OpExchange>(mo: to_morder(mo), args: a, args: v);
611}
612# endif
613
614SANITIZER_INTERFACE_ATTRIBUTE
615a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, int mo) {
616 return AtomicImpl<OpFetchAdd>(mo: to_morder(mo), args: a, args: v);
617}
618
619SANITIZER_INTERFACE_ATTRIBUTE
620a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, int mo) {
621 return AtomicImpl<OpFetchAdd>(mo: to_morder(mo), args: a, args: v);
622}
623
624SANITIZER_INTERFACE_ATTRIBUTE
625a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, int mo) {
626 return AtomicImpl<OpFetchAdd>(mo: to_morder(mo), args: a, args: v);
627}
628
629SANITIZER_INTERFACE_ATTRIBUTE
630a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, int mo) {
631 return AtomicImpl<OpFetchAdd>(mo: to_morder(mo), args: a, args: v);
632}
633
634# if __TSAN_HAS_INT128
635SANITIZER_INTERFACE_ATTRIBUTE
636a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, int mo) {
637 return AtomicImpl<OpFetchAdd>(mo: to_morder(mo), args: a, args: v);
638}
639# endif
640
641SANITIZER_INTERFACE_ATTRIBUTE
642a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, int mo) {
643 return AtomicImpl<OpFetchSub>(mo: to_morder(mo), args: a, args: v);
644}
645
646SANITIZER_INTERFACE_ATTRIBUTE
647a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, int mo) {
648 return AtomicImpl<OpFetchSub>(mo: to_morder(mo), args: a, args: v);
649}
650
651SANITIZER_INTERFACE_ATTRIBUTE
652a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, int mo) {
653 return AtomicImpl<OpFetchSub>(mo: to_morder(mo), args: a, args: v);
654}
655
656SANITIZER_INTERFACE_ATTRIBUTE
657a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, int mo) {
658 return AtomicImpl<OpFetchSub>(mo: to_morder(mo), args: a, args: v);
659}
660
661# if __TSAN_HAS_INT128
662SANITIZER_INTERFACE_ATTRIBUTE
663a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, int mo) {
664 return AtomicImpl<OpFetchSub>(mo: to_morder(mo), args: a, args: v);
665}
666# endif
667
668SANITIZER_INTERFACE_ATTRIBUTE
669a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, int mo) {
670 return AtomicImpl<OpFetchAnd>(mo: to_morder(mo), args: a, args: v);
671}
672
673SANITIZER_INTERFACE_ATTRIBUTE
674a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, int mo) {
675 return AtomicImpl<OpFetchAnd>(mo: to_morder(mo), args: a, args: v);
676}
677
678SANITIZER_INTERFACE_ATTRIBUTE
679a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, int mo) {
680 return AtomicImpl<OpFetchAnd>(mo: to_morder(mo), args: a, args: v);
681}
682
683SANITIZER_INTERFACE_ATTRIBUTE
684a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, int mo) {
685 return AtomicImpl<OpFetchAnd>(mo: to_morder(mo), args: a, args: v);
686}
687
688# if __TSAN_HAS_INT128
689SANITIZER_INTERFACE_ATTRIBUTE
690a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, int mo) {
691 return AtomicImpl<OpFetchAnd>(mo: to_morder(mo), args: a, args: v);
692}
693# endif
694
695SANITIZER_INTERFACE_ATTRIBUTE
696a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, int mo) {
697 return AtomicImpl<OpFetchOr>(mo: to_morder(mo), args: a, args: v);
698}
699
700SANITIZER_INTERFACE_ATTRIBUTE
701a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, int mo) {
702 return AtomicImpl<OpFetchOr>(mo: to_morder(mo), args: a, args: v);
703}
704
705SANITIZER_INTERFACE_ATTRIBUTE
706a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, int mo) {
707 return AtomicImpl<OpFetchOr>(mo: to_morder(mo), args: a, args: v);
708}
709
710SANITIZER_INTERFACE_ATTRIBUTE
711a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, int mo) {
712 return AtomicImpl<OpFetchOr>(mo: to_morder(mo), args: a, args: v);
713}
714
715# if __TSAN_HAS_INT128
716SANITIZER_INTERFACE_ATTRIBUTE
717a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, int mo) {
718 return AtomicImpl<OpFetchOr>(mo: to_morder(mo), args: a, args: v);
719}
720# endif
721
722SANITIZER_INTERFACE_ATTRIBUTE
723a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, int mo) {
724 return AtomicImpl<OpFetchXor>(mo: to_morder(mo), args: a, args: v);
725}
726
727SANITIZER_INTERFACE_ATTRIBUTE
728a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, int mo) {
729 return AtomicImpl<OpFetchXor>(mo: to_morder(mo), args: a, args: v);
730}
731
732SANITIZER_INTERFACE_ATTRIBUTE
733a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, int mo) {
734 return AtomicImpl<OpFetchXor>(mo: to_morder(mo), args: a, args: v);
735}
736
737SANITIZER_INTERFACE_ATTRIBUTE
738a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, int mo) {
739 return AtomicImpl<OpFetchXor>(mo: to_morder(mo), args: a, args: v);
740}
741
742# if __TSAN_HAS_INT128
743SANITIZER_INTERFACE_ATTRIBUTE
744a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, int mo) {
745 return AtomicImpl<OpFetchXor>(mo: to_morder(mo), args: a, args: v);
746}
747# endif
748
749SANITIZER_INTERFACE_ATTRIBUTE
750a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, int mo) {
751 return AtomicImpl<OpFetchNand>(mo: to_morder(mo), args: a, args: v);
752}
753
754SANITIZER_INTERFACE_ATTRIBUTE
755a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, int mo) {
756 return AtomicImpl<OpFetchNand>(mo: to_morder(mo), args: a, args: v);
757}
758
759SANITIZER_INTERFACE_ATTRIBUTE
760a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, int mo) {
761 return AtomicImpl<OpFetchNand>(mo: to_morder(mo), args: a, args: v);
762}
763
764SANITIZER_INTERFACE_ATTRIBUTE
765a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, int mo) {
766 return AtomicImpl<OpFetchNand>(mo: to_morder(mo), args: a, args: v);
767}
768
769# if __TSAN_HAS_INT128
770SANITIZER_INTERFACE_ATTRIBUTE
771a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, int mo) {
772 return AtomicImpl<OpFetchNand>(mo: to_morder(mo), args: a, args: v);
773}
774# endif
775
776SANITIZER_INTERFACE_ATTRIBUTE
777int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v, int mo,
778 int fmo) {
779 return AtomicImpl<OpCAS>(mo: to_morder(mo), args: to_morder(mo: fmo), args: a, args: c, args: v);
780}
781
782SANITIZER_INTERFACE_ATTRIBUTE
783int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
784 int mo, int fmo) {
785 return AtomicImpl<OpCAS>(mo: to_morder(mo), args: to_morder(mo: fmo), args: a, args: c, args: v);
786}
787
788SANITIZER_INTERFACE_ATTRIBUTE
789int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
790 int mo, int fmo) {
791 return AtomicImpl<OpCAS>(mo: to_morder(mo), args: to_morder(mo: fmo), args: a, args: c, args: v);
792}
793
794SANITIZER_INTERFACE_ATTRIBUTE
795int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
796 int mo, int fmo) {
797 return AtomicImpl<OpCAS>(mo: to_morder(mo), args: to_morder(mo: fmo), args: a, args: c, args: v);
798}
799
800# if __TSAN_HAS_INT128
801SANITIZER_INTERFACE_ATTRIBUTE
802int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
803 int mo, int fmo) {
804 return AtomicImpl<OpCAS>(mo: to_morder(mo), args: to_morder(mo: fmo), args: a, args: c, args: v);
805}
806# endif
807
808SANITIZER_INTERFACE_ATTRIBUTE
809int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v, int mo,
810 int fmo) {
811 return AtomicImpl<OpCAS>(mo: to_morder(mo), args: to_morder(mo: fmo), args: a, args: c, args: v);
812}
813
814SANITIZER_INTERFACE_ATTRIBUTE
815int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
816 int mo, int fmo) {
817 return AtomicImpl<OpCAS>(mo: to_morder(mo), args: to_morder(mo: fmo), args: a, args: c, args: v);
818}
819
820SANITIZER_INTERFACE_ATTRIBUTE
821int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
822 int mo, int fmo) {
823 return AtomicImpl<OpCAS>(mo: to_morder(mo), args: to_morder(mo: fmo), args: a, args: c, args: v);
824}
825
826SANITIZER_INTERFACE_ATTRIBUTE
827int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
828 int mo, int fmo) {
829 return AtomicImpl<OpCAS>(mo: to_morder(mo), args: to_morder(mo: fmo), args: a, args: c, args: v);
830}
831
832# if __TSAN_HAS_INT128
833SANITIZER_INTERFACE_ATTRIBUTE
834int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
835 int mo, int fmo) {
836 return AtomicImpl<OpCAS>(mo: to_morder(mo), args: to_morder(mo: fmo), args: a, args: c, args: v);
837}
838# endif
839
840SANITIZER_INTERFACE_ATTRIBUTE
841a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v, int mo,
842 int fmo) {
843 return AtomicImpl<OpCAS>(mo: to_morder(mo), args: to_morder(mo: fmo), args: a, args: c, args: v);
844}
845
846SANITIZER_INTERFACE_ATTRIBUTE
847a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v, int mo,
848 int fmo) {
849 return AtomicImpl<OpCAS>(mo: to_morder(mo), args: to_morder(mo: fmo), args: a, args: c, args: v);
850}
851
852SANITIZER_INTERFACE_ATTRIBUTE
853a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v, int mo,
854 int fmo) {
855 return AtomicImpl<OpCAS>(mo: to_morder(mo), args: to_morder(mo: fmo), args: a, args: c, args: v);
856}
857
858SANITIZER_INTERFACE_ATTRIBUTE
859a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v, int mo,
860 int fmo) {
861 return AtomicImpl<OpCAS>(mo: to_morder(mo), args: to_morder(mo: fmo), args: a, args: c, args: v);
862}
863
864# if __TSAN_HAS_INT128
865SANITIZER_INTERFACE_ATTRIBUTE
866a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
867 int mo, int fmo) {
868 return AtomicImpl<OpCAS>(mo: to_morder(mo), args: to_morder(mo: fmo), args: a, args: c, args: v);
869}
870# endif
871
872SANITIZER_INTERFACE_ATTRIBUTE
873void __tsan_atomic_thread_fence(int mo) {
874 return AtomicImpl<OpFence>(mo: to_morder(mo));
875}
876
877SANITIZER_INTERFACE_ATTRIBUTE
878void __tsan_atomic_signal_fence(int mo) {}
879} // extern "C"
880
881#else // #if !SANITIZER_GO
882
883// Go
884
885template <class Op, class... Types>
886void AtomicGo(ThreadState *thr, uptr cpc, uptr pc, Types... args) {
887 if (thr->ignore_sync) {
888 (void)Op::NoTsanAtomic(args...);
889 } else {
890 FuncEntry(thr, cpc);
891 (void)Op::Atomic(thr, pc, args...);
892 FuncExit(thr);
893 }
894}
895
896template <class Op, class... Types>
897auto AtomicGoRet(ThreadState *thr, uptr cpc, uptr pc, Types... args) {
898 if (thr->ignore_sync) {
899 return Op::NoTsanAtomic(args...);
900 } else {
901 FuncEntry(thr, cpc);
902 auto ret = Op::Atomic(thr, pc, args...);
903 FuncExit(thr);
904 return ret;
905 }
906}
907
908extern "C" {
909SANITIZER_INTERFACE_ATTRIBUTE
910void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
911 *(a32 *)(a + 8) = AtomicGoRet<OpLoad>(thr, cpc, pc, mo_acquire, *(a32 **)a);
912}
913
914SANITIZER_INTERFACE_ATTRIBUTE
915void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
916 *(a64 *)(a + 8) = AtomicGoRet<OpLoad>(thr, cpc, pc, mo_acquire, *(a64 **)a);
917}
918
919SANITIZER_INTERFACE_ATTRIBUTE
920void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
921 AtomicGo<OpStore>(thr, cpc, pc, mo_release, *(a32 **)a, *(a32 *)(a + 8));
922}
923
924SANITIZER_INTERFACE_ATTRIBUTE
925void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
926 AtomicGo<OpStore>(thr, cpc, pc, mo_release, *(a64 **)a, *(a64 *)(a + 8));
927}
928
929SANITIZER_INTERFACE_ATTRIBUTE
930void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
931 *(a32 *)(a + 16) = AtomicGoRet<OpFetchAdd>(thr, cpc, pc, mo_acq_rel,
932 *(a32 **)a, *(a32 *)(a + 8));
933}
934
935SANITIZER_INTERFACE_ATTRIBUTE
936void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
937 *(a64 *)(a + 16) = AtomicGoRet<OpFetchAdd>(thr, cpc, pc, mo_acq_rel,
938 *(a64 **)a, *(a64 *)(a + 8));
939}
940
941SANITIZER_INTERFACE_ATTRIBUTE
942void __tsan_go_atomic32_fetch_and(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
943 *(a32 *)(a + 16) = AtomicGoRet<OpFetchAnd>(thr, cpc, pc, mo_acq_rel,
944 *(a32 **)a, *(a32 *)(a + 8));
945}
946
947SANITIZER_INTERFACE_ATTRIBUTE
948void __tsan_go_atomic64_fetch_and(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
949 *(a64 *)(a + 16) = AtomicGoRet<OpFetchAnd>(thr, cpc, pc, mo_acq_rel,
950 *(a64 **)a, *(a64 *)(a + 8));
951}
952
953SANITIZER_INTERFACE_ATTRIBUTE
954void __tsan_go_atomic32_fetch_or(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
955 *(a32 *)(a + 16) = AtomicGoRet<OpFetchOr>(thr, cpc, pc, mo_acq_rel,
956 *(a32 **)a, *(a32 *)(a + 8));
957}
958
959SANITIZER_INTERFACE_ATTRIBUTE
960void __tsan_go_atomic64_fetch_or(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
961 *(a64 *)(a + 16) = AtomicGoRet<OpFetchOr>(thr, cpc, pc, mo_acq_rel,
962 *(a64 **)a, *(a64 *)(a + 8));
963}
964
965SANITIZER_INTERFACE_ATTRIBUTE
966void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
967 *(a32 *)(a + 16) = AtomicGoRet<OpExchange>(thr, cpc, pc, mo_acq_rel,
968 *(a32 **)a, *(a32 *)(a + 8));
969}
970
971SANITIZER_INTERFACE_ATTRIBUTE
972void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
973 *(a64 *)(a + 16) = AtomicGoRet<OpExchange>(thr, cpc, pc, mo_acq_rel,
974 *(a64 **)a, *(a64 *)(a + 8));
975}
976
977SANITIZER_INTERFACE_ATTRIBUTE
978void __tsan_go_atomic32_compare_exchange(ThreadState *thr, uptr cpc, uptr pc,
979 u8 *a) {
980 a32 cmp = *(a32 *)(a + 8);
981 a32 cur = AtomicGoRet<OpCAS>(thr, cpc, pc, mo_acq_rel, mo_acquire, *(a32 **)a,
982 cmp, *(a32 *)(a + 12));
983 *(bool *)(a + 16) = (cur == cmp);
984}
985
986SANITIZER_INTERFACE_ATTRIBUTE
987void __tsan_go_atomic64_compare_exchange(ThreadState *thr, uptr cpc, uptr pc,
988 u8 *a) {
989 a64 cmp = *(a64 *)(a + 8);
990 a64 cur = AtomicGoRet<OpCAS>(thr, cpc, pc, mo_acq_rel, mo_acquire, *(a64 **)a,
991 cmp, *(a64 *)(a + 16));
992 *(bool *)(a + 24) = (cur == cmp);
993}
994} // extern "C"
995#endif // #if !SANITIZER_GO
996

Provided by KDAB

Privacy Policy
Update your C++ knowledge – Modern C++11/14/17 Training
Find out more

source code of compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp