1//===-- A simple equivalent of std::atomic ----------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef LLVM_LIBC_SRC___SUPPORT_CPP_ATOMIC_H
10#define LLVM_LIBC_SRC___SUPPORT_CPP_ATOMIC_H
11
12#include "src/__support/macros/attributes.h"
13#include "src/__support/macros/properties/architectures.h"
14
15#include "type_traits.h"
16
17namespace LIBC_NAMESPACE {
18namespace cpp {
19
20enum class MemoryOrder : int {
21 RELAXED = __ATOMIC_RELAXED,
22 CONSUME = __ATOMIC_CONSUME,
23 ACQUIRE = __ATOMIC_ACQUIRE,
24 RELEASE = __ATOMIC_RELEASE,
25 ACQ_REL = __ATOMIC_ACQ_REL,
26 SEQ_CST = __ATOMIC_SEQ_CST
27};
28
29// These are a clang extension, see the clang documenation for more information:
30// https://clang.llvm.org/docs/LanguageExtensions.html#scoped-atomic-builtins.
31enum class MemoryScope : int {
32#if defined(__MEMORY_SCOPE_SYSTEM) && defined(__MEMORY_SCOPE_DEVICE)
33 SYSTEM = __MEMORY_SCOPE_SYSTEM,
34 DEVICE = __MEMORY_SCOPE_DEVICE,
35#else
36 SYSTEM = 0,
37 DEVICE = 0,
38#endif
39};
40
41template <typename T> struct Atomic {
42 // For now, we will restrict to only arithmetic types.
43 static_assert(is_arithmetic_v<T>, "Only arithmetic types can be atomic.");
44
45private:
46 // The value stored should be appropriately aligned so that
47 // hardware instructions used to perform atomic operations work
48 // correctly.
49 static constexpr int ALIGNMENT = sizeof(T) > alignof(T) ? sizeof(T)
50 : alignof(T);
51
52public:
53 using value_type = T;
54
55 // We keep the internal value public so that it can be addressable.
56 // This is useful in places like the Linux futex operations where
57 // we need pointers to the memory of the atomic values. Load and store
58 // operations should be performed using the atomic methods however.
59 alignas(ALIGNMENT) value_type val;
60
61 constexpr Atomic() = default;
62
63 // Intializes the value without using atomic operations.
64 constexpr Atomic(value_type v) : val(v) {}
65
66 Atomic(const Atomic &) = delete;
67 Atomic &operator=(const Atomic &) = delete;
68
69 // Atomic load.
70 operator T() { return __atomic_load_n(&val, int(MemoryOrder::SEQ_CST)); }
71
72 T load(MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
73 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
74#if __has_builtin(__scoped_atomic_load_n)
75 return __scoped_atomic_load_n(&val, int(mem_ord), (int)(mem_scope));
76#else
77 return __atomic_load_n(&val, int(mem_ord));
78#endif
79 }
80
81 // Atomic store.
82 T operator=(T rhs) {
83 __atomic_store_n(&val, rhs, int(MemoryOrder::SEQ_CST));
84 return rhs;
85 }
86
87 void store(T rhs, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
88 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
89#if __has_builtin(__scoped_atomic_store_n)
90 __scoped_atomic_store_n(&val, rhs, int(mem_ord), (int)(mem_scope));
91#else
92 __atomic_store_n(&val, rhs, int(mem_ord));
93#endif
94 }
95
96 // Atomic compare exchange
97 bool compare_exchange_strong(
98 T &expected, T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
99 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
100 return __atomic_compare_exchange_n(&val, &expected, desired, false,
101 int(mem_ord), int(mem_ord));
102 }
103
104 T exchange(T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
105 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
106#if __has_builtin(__scoped_atomic_exchange_n)
107 return __scoped_atomic_exchange_n(&val, desired, int(mem_ord),
108 (int)(mem_scope));
109#else
110 return __atomic_exchange_n(&val, desired, int(mem_ord));
111#endif
112 }
113
114 T fetch_add(T increment, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
115 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
116#if __has_builtin(__scoped_atomic_fetch_add)
117 return __scoped_atomic_fetch_add(&val, increment, int(mem_ord),
118 (int)(mem_scope));
119#else
120 return __atomic_fetch_add(&val, increment, int(mem_ord));
121#endif
122 }
123
124 T fetch_or(T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
125 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
126#if __has_builtin(__scoped_atomic_fetch_or)
127 return __scoped_atomic_fetch_or(&val, mask, int(mem_ord), (int)(mem_scope));
128#else
129 return __atomic_fetch_or(&val, mask, int(mem_ord));
130#endif
131 }
132
133 T fetch_and(T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
134 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
135#if __has_builtin(__scoped_atomic_fetch_and)
136 return __scoped_atomic_fetch_and(&val, mask, int(mem_ord),
137 (int)(mem_scope));
138#else
139 return __atomic_fetch_and(&val, mask, int(mem_ord));
140#endif
141 }
142
143 T fetch_sub(T decrement, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
144 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
145#if __has_builtin(__scoped_atomic_fetch_sub)
146 return __scoped_atomic_fetch_sub(&val, decrement, int(mem_ord),
147 (int)(mem_scope));
148#else
149 return __atomic_fetch_sub(&val, decrement, int(mem_ord));
150#endif
151 }
152
153 // Set the value without using an atomic operation. This is useful
154 // in initializing atomic values without a constructor.
155 void set(T rhs) { val = rhs; }
156};
157
158// Issue a thread fence with the given memory ordering.
159LIBC_INLINE void atomic_thread_fence([[maybe_unused]] MemoryOrder mem_ord) {
160// The NVPTX backend currently does not support atomic thread fences so we use a
161// full system fence instead.
162#ifdef LIBC_TARGET_ARCH_IS_NVPTX
163 __nvvm_membar_sys();
164#else
165 __atomic_thread_fence(static_cast<int>(mem_ord));
166#endif
167}
168
169// Establishes memory synchronization ordering of non-atomic and relaxed atomic
170// accesses, as instructed by order, between a thread and a signal handler
171// executed on the same thread. This is equivalent to atomic_thread_fence,
172// except no instructions for memory ordering are issued. Only reordering of
173// the instructions by the compiler is suppressed as order instructs.
174LIBC_INLINE void atomic_signal_fence([[maybe_unused]] MemoryOrder mem_ord) {
175#if __has_builtin(__atomic_signal_fence)
176 __atomic_signal_fence(static_cast<int>(mem_ord));
177#else
178 // if the builtin is not ready, use asm as a full compiler barrier.
179 asm volatile("" ::: "memory");
180#endif
181}
182
183} // namespace cpp
184} // namespace LIBC_NAMESPACE
185
186#endif // LLVM_LIBC_SRC___SUPPORT_CPP_ATOMIC_H
187

source code of libc/src/__support/CPP/atomic.h