Warning: This file is not a C or C++ file. It does not have highlighting.

1//===-- A simple equivalent of std::atomic ----------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef LLVM_LIBC_SRC___SUPPORT_CPP_ATOMIC_H
10#define LLVM_LIBC_SRC___SUPPORT_CPP_ATOMIC_H
11
12#include "src/__support/CPP/type_traits/has_unique_object_representations.h"
13#include "src/__support/macros/attributes.h"
14#include "src/__support/macros/config.h"
15#include "src/__support/macros/properties/architectures.h"
16
17#include "type_traits.h"
18
19namespace LIBC_NAMESPACE_DECL {
20namespace cpp {
21
22enum class MemoryOrder : int {
23 RELAXED = __ATOMIC_RELAXED,
24 CONSUME = __ATOMIC_CONSUME,
25 ACQUIRE = __ATOMIC_ACQUIRE,
26 RELEASE = __ATOMIC_RELEASE,
27 ACQ_REL = __ATOMIC_ACQ_REL,
28 SEQ_CST = __ATOMIC_SEQ_CST
29};
30
31// These are a clang extension, see the clang documenation for more information:
32// https://clang.llvm.org/docs/LanguageExtensions.html#scoped-atomic-builtins.
33enum class MemoryScope : int {
34#if defined(__MEMORY_SCOPE_SYSTEM) && defined(__MEMORY_SCOPE_DEVICE)
35 SYSTEM = __MEMORY_SCOPE_SYSTEM,
36 DEVICE = __MEMORY_SCOPE_DEVICE,
37#else
38 SYSTEM = 0,
39 DEVICE = 0,
40#endif
41};
42
43namespace impl {
44LIBC_INLINE constexpr int order(MemoryOrder mem_ord) {
45 return static_cast<int>(mem_ord);
46}
47
48LIBC_INLINE constexpr int scope(MemoryScope mem_scope) {
49 return static_cast<int>(mem_scope);
50}
51
52template <class T> LIBC_INLINE T *addressof(T &ref) {
53 return __builtin_addressof(ref);
54}
55
56LIBC_INLINE constexpr int infer_failure_order(MemoryOrder mem_ord) {
57 if (mem_ord == MemoryOrder::RELEASE)
58 return order(MemoryOrder::RELAXED);
59 if (mem_ord == MemoryOrder::ACQ_REL)
60 return order(MemoryOrder::ACQUIRE);
61 return order(mem_ord);
62}
63} // namespace impl
64
65template <typename T> struct Atomic {
66 static_assert(is_trivially_copyable_v<T> && is_copy_constructible_v<T> &&
67 is_move_constructible_v<T> && is_copy_assignable_v<T> &&
68 is_move_assignable_v<T>,
69 "atomic<T> requires T to be trivially copyable, copy "
70 "constructible, move constructible, copy assignable, "
71 "and move assignable.");
72
73 static_assert(cpp::has_unique_object_representations_v<T>,
74 "atomic<T> in libc only support types whose values has unique "
75 "object representations.");
76
77private:
78 // type conversion helper to avoid long c++ style casts
79
80 // Require types that are 1, 2, 4, 8, or 16 bytes in length to be aligned to
81 // at least their size to be potentially used lock-free.
82 LIBC_INLINE_VAR static constexpr size_t MIN_ALIGNMENT =
83 (sizeof(T) & (sizeof(T) - 1)) || (sizeof(T) > 16) ? 0 : sizeof(T);
84
85 LIBC_INLINE_VAR static constexpr size_t ALIGNMENT = alignof(T) > MIN_ALIGNMENT
86 ? alignof(T)
87 : MIN_ALIGNMENT;
88
89public:
90 using value_type = T;
91
92 // We keep the internal value public so that it can be addressable.
93 // This is useful in places like the Linux futex operations where
94 // we need pointers to the memory of the atomic values. Load and store
95 // operations should be performed using the atomic methods however.
96 alignas(ALIGNMENT) value_type val;
97
98 LIBC_INLINE constexpr Atomic() = default;
99
100 // Initializes the value without using atomic operations.
101 LIBC_INLINE constexpr Atomic(value_type v) : val(v) {}
102
103 LIBC_INLINE Atomic(const Atomic &) = delete;
104 LIBC_INLINE Atomic &operator=(const Atomic &) = delete;
105
106 // Atomic load.
107 LIBC_INLINE operator T() { return load(); }
108
109 LIBC_INLINE T
110 load(MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
111 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
112 T res;
113#if __has_builtin(__scoped_atomic_load)
114 __scoped_atomic_load(impl::addressof(val), impl::addressof(res),
115 impl::order(mem_ord), impl::scope(mem_scope));
116#else
117 __atomic_load(impl::addressof(val), impl::addressof(res),
118 impl::order(mem_ord));
119#endif
120 return res;
121 }
122
123 // Atomic store.
124 LIBC_INLINE T operator=(T rhs) {
125 store(rhs);
126 return rhs;
127 }
128
129 LIBC_INLINE void
130 store(T rhs, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
131 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
132#if __has_builtin(__scoped_atomic_store)
133 __scoped_atomic_store(impl::addressof(val), impl::addressof(rhs),
134 impl::order(mem_ord), impl::scope(mem_scope));
135#else
136 __atomic_store(impl::addressof(val), impl::addressof(rhs),
137 impl::order(mem_ord));
138#endif
139 }
140
141 // Atomic compare exchange
142 LIBC_INLINE bool compare_exchange_strong(
143 T &expected, T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
144 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
145 return __atomic_compare_exchange(
146 impl::addressof(val), impl::addressof(expected),
147 impl::addressof(desired), false, impl::order(mem_ord),
148 impl::infer_failure_order(mem_ord));
149 }
150
151 // Atomic compare exchange (separate success and failure memory orders)
152 LIBC_INLINE bool compare_exchange_strong(
153 T &expected, T desired, MemoryOrder success_order,
154 MemoryOrder failure_order,
155 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
156 return __atomic_compare_exchange(
157 impl::addressof(val), impl::addressof(expected),
158 impl::addressof(desired), false, impl::order(success_order),
159 impl::order(failure_order));
160 }
161
162 // Atomic compare exchange (weak version)
163 LIBC_INLINE bool compare_exchange_weak(
164 T &expected, T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
165 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
166 return __atomic_compare_exchange(
167 impl::addressof(val), impl::addressof(expected),
168 impl::addressof(desired), true, impl::order(mem_ord),
169 impl::infer_failure_order(mem_ord));
170 }
171
172 // Atomic compare exchange (weak version with separate success and failure
173 // memory orders)
174 LIBC_INLINE bool compare_exchange_weak(
175 T &expected, T desired, MemoryOrder success_order,
176 MemoryOrder failure_order,
177 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
178 return __atomic_compare_exchange(
179 impl::addressof(val), impl::addressof(expected),
180 impl::addressof(desired), true, impl::order(success_order),
181 impl::order(failure_order));
182 }
183
184 LIBC_INLINE T
185 exchange(T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
186 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
187 T ret;
188#if __has_builtin(__scoped_atomic_exchange)
189 __scoped_atomic_exchange(impl::addressof(val), impl::addressof(desired),
190 impl::addressof(ret), impl::order(mem_ord),
191 impl::scope(mem_scope));
192#else
193 __atomic_exchange(impl::addressof(val), impl::addressof(desired),
194 impl::addressof(ret), impl::order(mem_ord));
195#endif
196 return ret;
197 }
198
199 LIBC_INLINE T
200 fetch_add(T increment, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
201 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
202 static_assert(cpp::is_integral_v<T>, "T must be an integral type.");
203#if __has_builtin(__scoped_atomic_fetch_add)
204 return __scoped_atomic_fetch_add(impl::addressof(val), increment,
205 impl::order(mem_ord),
206 impl::scope(mem_scope));
207#else
208 return __atomic_fetch_add(impl::addressof(val), increment,
209 impl::order(mem_ord));
210#endif
211 }
212
213 LIBC_INLINE T
214 fetch_or(T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
215 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
216 static_assert(cpp::is_integral_v<T>, "T must be an integral type.");
217#if __has_builtin(__scoped_atomic_fetch_or)
218 return __scoped_atomic_fetch_or(impl::addressof(val), mask,
219 impl::order(mem_ord),
220 impl::scope(mem_scope));
221#else
222 return __atomic_fetch_or(impl::addressof(val), mask, impl::order(mem_ord));
223#endif
224 }
225
226 LIBC_INLINE T
227 fetch_and(T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
228 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
229 static_assert(cpp::is_integral_v<T>, "T must be an integral type.");
230#if __has_builtin(__scoped_atomic_fetch_and)
231 return __scoped_atomic_fetch_and(impl::addressof(val), mask,
232 impl::order(mem_ord),
233 impl::scope(mem_scope));
234#else
235 return __atomic_fetch_and(impl::addressof(val), mask, impl::order(mem_ord));
236#endif
237 }
238
239 LIBC_INLINE T
240 fetch_sub(T decrement, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
241 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
242 static_assert(cpp::is_integral_v<T>, "T must be an integral type.");
243#if __has_builtin(__scoped_atomic_fetch_sub)
244 return __scoped_atomic_fetch_sub(impl::addressof(val), decrement,
245 impl::order(mem_ord),
246 impl::scope(mem_scope));
247#else
248 return __atomic_fetch_sub(impl::addressof(val), decrement,
249 impl::order(mem_ord));
250#endif
251 }
252
253 // Set the value without using an atomic operation. This is useful
254 // in initializing atomic values without a constructor.
255 LIBC_INLINE void set(T rhs) { val = rhs; }
256};
257
258template <typename T> struct AtomicRef {
259 static_assert(is_trivially_copyable_v<T> && is_copy_constructible_v<T> &&
260 is_move_constructible_v<T> && is_copy_assignable_v<T> &&
261 is_move_assignable_v<T>,
262 "AtomicRef<T> requires T to be trivially copyable, copy "
263 "constructible, move constructible, copy assignable, "
264 "and move assignable.");
265
266 static_assert(cpp::has_unique_object_representations_v<T>,
267 "AtomicRef<T> only supports types with unique object "
268 "representations.");
269
270private:
271 T *ptr;
272
273public:
274 // Constructor from T reference
275 LIBC_INLINE explicit constexpr AtomicRef(T &obj) : ptr(&obj) {}
276
277 // Non-standard Implicit conversion from T*
278 LIBC_INLINE constexpr AtomicRef(T *obj) : ptr(obj) {}
279
280 LIBC_INLINE AtomicRef(const AtomicRef &) = default;
281 LIBC_INLINE AtomicRef &operator=(const AtomicRef &) = default;
282
283 // Atomic load
284 LIBC_INLINE operator T() const { return load(); }
285
286 LIBC_INLINE T
287 load(MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
288 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
289 T res;
290#if __has_builtin(__scoped_atomic_load)
291 __scoped_atomic_load(ptr, &res, impl::order(mem_ord),
292 impl::scope(mem_scope));
293#else
294 __atomic_load(ptr, &res, impl::order(mem_ord));
295#endif
296 return res;
297 }
298
299 // Atomic store
300 LIBC_INLINE T operator=(T rhs) const {
301 store(rhs);
302 return rhs;
303 }
304
305 LIBC_INLINE void
306 store(T rhs, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
307 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
308#if __has_builtin(__scoped_atomic_store)
309 __scoped_atomic_store(ptr, &rhs, impl::order(mem_ord),
310 impl::scope(mem_scope));
311#else
312 __atomic_store(ptr, &rhs, impl::order(mem_ord));
313#endif
314 }
315
316 // Atomic compare exchange (strong)
317 LIBC_INLINE bool compare_exchange_strong(
318 T &expected, T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
319 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
320 return __atomic_compare_exchange(ptr, &expected, &desired, false,
321 impl::order(mem_ord),
322 impl::infer_failure_order(mem_ord));
323 }
324
325 // Atomic compare exchange (strong, separate success/failure memory orders)
326 LIBC_INLINE bool compare_exchange_strong(
327 T &expected, T desired, MemoryOrder success_order,
328 MemoryOrder failure_order,
329 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
330 return __atomic_compare_exchange(ptr, &expected, &desired, false,
331 impl::order(success_order),
332 impl::order(failure_order));
333 }
334
335 // Atomic exchange
336 LIBC_INLINE T
337 exchange(T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
338 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
339 T ret;
340#if __has_builtin(__scoped_atomic_exchange)
341 __scoped_atomic_exchange(ptr, &desired, &ret, impl::order(mem_ord),
342 impl::scope(mem_scope));
343#else
344 __atomic_exchange(ptr, &desired, &ret, impl::order(mem_ord));
345#endif
346 return ret;
347 }
348
349 LIBC_INLINE T fetch_add(
350 T increment, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
351 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
352 static_assert(cpp::is_integral_v<T>, "T must be an integral type.");
353#if __has_builtin(__scoped_atomic_fetch_add)
354 return __scoped_atomic_fetch_add(ptr, increment, impl::order(mem_ord),
355 impl::scope(mem_scope));
356#else
357 return __atomic_fetch_add(ptr, increment, impl::order(mem_ord));
358#endif
359 }
360
361 LIBC_INLINE T
362 fetch_or(T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
363 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
364 static_assert(cpp::is_integral_v<T>, "T must be an integral type.");
365#if __has_builtin(__scoped_atomic_fetch_or)
366 return __scoped_atomic_fetch_or(ptr, mask, impl::order(mem_ord),
367 impl::scope(mem_scope));
368#else
369 return __atomic_fetch_or(ptr, mask, impl::order(mem_ord));
370#endif
371 }
372
373 LIBC_INLINE T fetch_and(
374 T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
375 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
376 static_assert(cpp::is_integral_v<T>, "T must be an integral type.");
377#if __has_builtin(__scoped_atomic_fetch_and)
378 return __scoped_atomic_fetch_and(ptr, mask, impl::order(mem_ord),
379 impl::scope(mem_scope));
380#else
381 return __atomic_fetch_and(ptr, mask, impl::order(mem_ord));
382#endif
383 }
384
385 LIBC_INLINE T fetch_sub(
386 T decrement, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
387 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
388 static_assert(cpp::is_integral_v<T>, "T must be an integral type.");
389#if __has_builtin(__scoped_atomic_fetch_sub)
390 return __scoped_atomic_fetch_sub(ptr, decrement, impl::order(mem_ord),
391 impl::scope(mem_scope));
392#else
393 return __atomic_fetch_sub(ptr, decrement, impl::order(mem_ord));
394#endif
395 }
396};
397
398// Permit CTAD when generating an atomic reference.
399template <typename T> AtomicRef(T &) -> AtomicRef<T>;
400
401// Issue a thread fence with the given memory ordering.
402LIBC_INLINE void atomic_thread_fence(
403 MemoryOrder mem_ord,
404 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
405#if __has_builtin(__scoped_atomic_thread_fence)
406 __scoped_atomic_thread_fence(static_cast<int>(mem_ord),
407 static_cast<int>(mem_scope));
408#else
409 __atomic_thread_fence(static_cast<int>(mem_ord));
410#endif
411}
412
413// Establishes memory synchronization ordering of non-atomic and relaxed atomic
414// accesses, as instructed by order, between a thread and a signal handler
415// executed on the same thread. This is equivalent to atomic_thread_fence,
416// except no instructions for memory ordering are issued. Only reordering of
417// the instructions by the compiler is suppressed as order instructs.
418LIBC_INLINE void atomic_signal_fence([[maybe_unused]] MemoryOrder mem_ord) {
419#if __has_builtin(__atomic_signal_fence)
420 __atomic_signal_fence(static_cast<int>(mem_ord));
421#else
422 // if the builtin is not ready, use asm as a full compiler barrier.
423 asm volatile("" ::: "memory");
424#endif
425}
426} // namespace cpp
427} // namespace LIBC_NAMESPACE_DECL
428
429#endif // LLVM_LIBC_SRC___SUPPORT_CPP_ATOMIC_H
430

Warning: This file is not a C or C++ file. It does not have highlighting.

Provided by KDAB

Privacy Policy
Improve your Profiling and Debugging skills
Find out more

source code of libc/src/__support/CPP/atomic.h