1 | //! Atomic types |
2 | //! |
3 | //! Atomic types provide primitive shared-memory communication between |
4 | //! threads, and are the building blocks of other concurrent |
5 | //! types. |
6 | //! |
7 | //! This module defines atomic versions of a select number of primitive |
8 | //! types, including [`AtomicBool`], [`AtomicIsize`], [`AtomicUsize`], |
9 | //! [`AtomicI8`], [`AtomicU16`], etc. |
10 | //! Atomic types present operations that, when used correctly, synchronize |
11 | //! updates between threads. |
12 | //! |
13 | //! Atomic variables are safe to share between threads (they implement [`Sync`]) |
14 | //! but they do not themselves provide the mechanism for sharing and follow the |
15 | //! [threading model](../../../std/thread/index.html#the-threading-model) of Rust. |
16 | //! The most common way to share an atomic variable is to put it into an [`Arc`][arc] (an |
17 | //! atomically-reference-counted shared pointer). |
18 | //! |
19 | //! [arc]: ../../../std/sync/struct.Arc.html |
20 | //! |
21 | //! Atomic types may be stored in static variables, initialized using |
22 | //! the constant initializers like [`AtomicBool::new`]. Atomic statics |
23 | //! are often used for lazy global initialization. |
24 | //! |
25 | //! ## Memory model for atomic accesses |
26 | //! |
27 | //! Rust atomics currently follow the same rules as [C++20 atomics][cpp], specifically the rules |
28 | //! from the [`intro.races`][cpp-intro.races] section, without the "consume" memory ordering. Since |
29 | //! C++ uses an object-based memory model whereas Rust is access-based, a bit of translation work |
30 | //! has to be done to apply the C++ rules to Rust: whenever C++ talks about "the value of an |
31 | //! object", we understand that to mean the resulting bytes obtained when doing a read. When the C++ |
32 | //! standard talks about "the value of an atomic object", this refers to the result of doing an |
33 | //! atomic load (via the operations provided in this module). A "modification of an atomic object" |
34 | //! refers to an atomic store. |
35 | //! |
36 | //! The end result is *almost* equivalent to saying that creating a *shared reference* to one of the |
37 | //! Rust atomic types corresponds to creating an `atomic_ref` in C++, with the `atomic_ref` being |
38 | //! destroyed when the lifetime of the shared reference ends. The main difference is that Rust |
39 | //! permits concurrent atomic and non-atomic reads to the same memory as those cause no issue in the |
40 | //! C++ memory model, they are just forbidden in C++ because memory is partitioned into "atomic |
41 | //! objects" and "non-atomic objects" (with `atomic_ref` temporarily converting a non-atomic object |
42 | //! into an atomic object). |
43 | //! |
44 | //! The most important aspect of this model is that *data races* are undefined behavior. A data race |
45 | //! is defined as conflicting non-synchronized accesses where at least one of the accesses is |
46 | //! non-atomic. Here, accesses are *conflicting* if they affect overlapping regions of memory and at |
47 | //! least one of them is a write. (A `compare_exchange` or `compare_exchange_weak` that does not |
48 | //! succeed is not considered a write.) They are *non-synchronized* if neither of them |
49 | //! *happens-before* the other, according to the happens-before order of the memory model. |
50 | //! |
51 | //! The other possible cause of undefined behavior in the memory model are mixed-size accesses: Rust |
52 | //! inherits the C++ limitation that non-synchronized conflicting atomic accesses may not partially |
53 | //! overlap. In other words, every pair of non-synchronized atomic accesses must be either disjoint, |
54 | //! access the exact same memory (including using the same access size), or both be reads. |
55 | //! |
56 | //! Each atomic access takes an [`Ordering`] which defines how the operation interacts with the |
57 | //! happens-before order. These orderings behave the same as the corresponding [C++20 atomic |
58 | //! orderings][cpp_memory_order]. For more information, see the [nomicon]. |
59 | //! |
60 | //! [cpp]: https://en.cppreference.com/w/cpp/atomic |
61 | //! [cpp-intro.races]: https://timsong-cpp.github.io/cppwp/n4868/intro.multithread#intro.races |
62 | //! [cpp_memory_order]: https://en.cppreference.com/w/cpp/atomic/memory_order |
63 | //! [nomicon]: ../../../nomicon/atomics.html |
64 | //! |
65 | //! ```rust,no_run undefined_behavior |
66 | //! use std::sync::atomic::{AtomicU16, AtomicU8, Ordering}; |
67 | //! use std::mem::transmute; |
68 | //! use std::thread; |
69 | //! |
70 | //! let atomic = AtomicU16::new(0); |
71 | //! |
72 | //! thread::scope(|s| { |
73 | //! // This is UB: conflicting non-synchronized accesses, at least one of which is non-atomic. |
74 | //! s.spawn(|| atomic.store(1, Ordering::Relaxed)); // atomic store |
75 | //! s.spawn(|| unsafe { atomic.as_ptr().write(2) }); // non-atomic write |
76 | //! }); |
77 | //! |
78 | //! thread::scope(|s| { |
79 | //! // This is fine: the accesses do not conflict (as none of them performs any modification). |
80 | //! // In C++ this would be disallowed since creating an `atomic_ref` precludes |
81 | //! // further non-atomic accesses, but Rust does not have that limitation. |
82 | //! s.spawn(|| atomic.load(Ordering::Relaxed)); // atomic load |
83 | //! s.spawn(|| unsafe { atomic.as_ptr().read() }); // non-atomic read |
84 | //! }); |
85 | //! |
86 | //! thread::scope(|s| { |
87 | //! // This is fine: `join` synchronizes the code in a way such that the atomic |
88 | //! // store happens-before the non-atomic write. |
89 | //! let handle = s.spawn(|| atomic.store(1, Ordering::Relaxed)); // atomic store |
90 | //! handle.join().expect("thread won't panic" ); // synchronize |
91 | //! s.spawn(|| unsafe { atomic.as_ptr().write(2) }); // non-atomic write |
92 | //! }); |
93 | //! |
94 | //! thread::scope(|s| { |
95 | //! // This is UB: non-synchronized conflicting differently-sized atomic accesses. |
96 | //! s.spawn(|| atomic.store(1, Ordering::Relaxed)); |
97 | //! s.spawn(|| unsafe { |
98 | //! let differently_sized = transmute::<&AtomicU16, &AtomicU8>(&atomic); |
99 | //! differently_sized.store(2, Ordering::Relaxed); |
100 | //! }); |
101 | //! }); |
102 | //! |
103 | //! thread::scope(|s| { |
104 | //! // This is fine: `join` synchronizes the code in a way such that |
105 | //! // the 1-byte store happens-before the 2-byte store. |
106 | //! let handle = s.spawn(|| atomic.store(1, Ordering::Relaxed)); |
107 | //! handle.join().expect("thread won't panic" ); |
108 | //! s.spawn(|| unsafe { |
109 | //! let differently_sized = transmute::<&AtomicU16, &AtomicU8>(&atomic); |
110 | //! differently_sized.store(2, Ordering::Relaxed); |
111 | //! }); |
112 | //! }); |
113 | //! ``` |
114 | //! |
115 | //! # Portability |
116 | //! |
117 | //! All atomic types in this module are guaranteed to be [lock-free] if they're |
118 | //! available. This means they don't internally acquire a global mutex. Atomic |
119 | //! types and operations are not guaranteed to be wait-free. This means that |
120 | //! operations like `fetch_or` may be implemented with a compare-and-swap loop. |
121 | //! |
122 | //! Atomic operations may be implemented at the instruction layer with |
123 | //! larger-size atomics. For example some platforms use 4-byte atomic |
124 | //! instructions to implement `AtomicI8`. Note that this emulation should not |
125 | //! have an impact on correctness of code, it's just something to be aware of. |
126 | //! |
127 | //! The atomic types in this module might not be available on all platforms. The |
128 | //! atomic types here are all widely available, however, and can generally be |
129 | //! relied upon existing. Some notable exceptions are: |
130 | //! |
131 | //! * PowerPC and MIPS platforms with 32-bit pointers do not have `AtomicU64` or |
132 | //! `AtomicI64` types. |
133 | //! * ARM platforms like `armv5te` that aren't for Linux only provide `load` |
134 | //! and `store` operations, and do not support Compare and Swap (CAS) |
135 | //! operations, such as `swap`, `fetch_add`, etc. Additionally on Linux, |
136 | //! these CAS operations are implemented via [operating system support], which |
137 | //! may come with a performance penalty. |
138 | //! * ARM targets with `thumbv6m` only provide `load` and `store` operations, |
139 | //! and do not support Compare and Swap (CAS) operations, such as `swap`, |
140 | //! `fetch_add`, etc. |
141 | //! |
142 | //! [operating system support]: https://www.kernel.org/doc/Documentation/arm/kernel_user_helpers.txt |
143 | //! |
144 | //! Note that future platforms may be added that also do not have support for |
145 | //! some atomic operations. Maximally portable code will want to be careful |
146 | //! about which atomic types are used. `AtomicUsize` and `AtomicIsize` are |
147 | //! generally the most portable, but even then they're not available everywhere. |
148 | //! For reference, the `std` library requires `AtomicBool`s and pointer-sized atomics, although |
149 | //! `core` does not. |
150 | //! |
151 | //! The `#[cfg(target_has_atomic)]` attribute can be used to conditionally |
152 | //! compile based on the target's supported bit widths. It is a key-value |
153 | //! option set for each supported size, with values "8", "16", "32", "64", |
154 | //! "128", and "ptr" for pointer-sized atomics. |
155 | //! |
156 | //! [lock-free]: https://en.wikipedia.org/wiki/Non-blocking_algorithm |
157 | //! |
158 | //! # Atomic accesses to read-only memory |
159 | //! |
160 | //! In general, *all* atomic accesses on read-only memory are undefined behavior. For instance, attempting |
161 | //! to do a `compare_exchange` that will definitely fail (making it conceptually a read-only |
162 | //! operation) can still cause a segmentation fault if the underlying memory page is mapped read-only. Since |
163 | //! atomic `load`s might be implemented using compare-exchange operations, even a `load` can fault |
164 | //! on read-only memory. |
165 | //! |
166 | //! For the purpose of this section, "read-only memory" is defined as memory that is read-only in |
167 | //! the underlying target, i.e., the pages are mapped with a read-only flag and any attempt to write |
168 | //! will cause a page fault. In particular, an `&u128` reference that points to memory that is |
169 | //! read-write mapped is *not* considered to point to "read-only memory". In Rust, almost all memory |
170 | //! is read-write; the only exceptions are memory created by `const` items or `static` items without |
171 | //! interior mutability, and memory that was specifically marked as read-only by the operating |
172 | //! system via platform-specific APIs. |
173 | //! |
174 | //! As an exception from the general rule stated above, "sufficiently small" atomic loads with |
175 | //! `Ordering::Relaxed` are implemented in a way that works on read-only memory, and are hence not |
176 | //! undefined behavior. The exact size limit for what makes a load "sufficiently small" varies |
177 | //! depending on the target: |
178 | //! |
179 | //! | `target_arch` | Size limit | |
180 | //! |---------------|---------| |
181 | //! | `x86`, `arm`, `mips`, `mips32r6`, `powerpc`, `riscv32`, `sparc`, `hexagon` | 4 bytes | |
182 | //! | `x86_64`, `aarch64`, `loongarch64`, `mips64`, `mips64r6`, `powerpc64`, `riscv64`, `sparc64`, `s390x` | 8 bytes | |
183 | //! |
184 | //! Atomics loads that are larger than this limit as well as atomic loads with ordering other |
185 | //! than `Relaxed`, as well as *all* atomic loads on targets not listed in the table, might still be |
186 | //! read-only under certain conditions, but that is not a stable guarantee and should not be relied |
187 | //! upon. |
188 | //! |
189 | //! If you need to do an acquire load on read-only memory, you can do a relaxed load followed by an |
190 | //! acquire fence instead. |
191 | //! |
192 | //! # Examples |
193 | //! |
194 | //! A simple spinlock: |
195 | //! |
196 | //! ``` |
197 | //! use std::sync::Arc; |
198 | //! use std::sync::atomic::{AtomicUsize, Ordering}; |
199 | //! use std::{hint, thread}; |
200 | //! |
201 | //! fn main() { |
202 | //! let spinlock = Arc::new(AtomicUsize::new(1)); |
203 | //! |
204 | //! let spinlock_clone = Arc::clone(&spinlock); |
205 | //! |
206 | //! let thread = thread::spawn(move || { |
207 | //! spinlock_clone.store(0, Ordering::Release); |
208 | //! }); |
209 | //! |
210 | //! // Wait for the other thread to release the lock |
211 | //! while spinlock.load(Ordering::Acquire) != 0 { |
212 | //! hint::spin_loop(); |
213 | //! } |
214 | //! |
215 | //! if let Err(panic) = thread.join() { |
216 | //! println!("Thread had an error: {panic:?}" ); |
217 | //! } |
218 | //! } |
219 | //! ``` |
220 | //! |
221 | //! Keep a global count of live threads: |
222 | //! |
223 | //! ``` |
224 | //! use std::sync::atomic::{AtomicUsize, Ordering}; |
225 | //! |
226 | //! static GLOBAL_THREAD_COUNT: AtomicUsize = AtomicUsize::new(0); |
227 | //! |
228 | //! // Note that Relaxed ordering doesn't synchronize anything |
229 | //! // except the global thread counter itself. |
230 | //! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::Relaxed); |
231 | //! // Note that this number may not be true at the moment of printing |
232 | //! // because some other thread may have changed static value already. |
233 | //! println!("live threads: {}" , old_thread_count + 1); |
234 | //! ``` |
235 | |
236 | #![stable (feature = "rust1" , since = "1.0.0" )] |
237 | #![cfg_attr (not(target_has_atomic_load_store = "8" ), allow(dead_code))] |
238 | #![cfg_attr (not(target_has_atomic_load_store = "8" ), allow(unused_imports))] |
239 | #![rustc_diagnostic_item = "atomic_mod" ] |
240 | // Clippy complains about the pattern of "safe function calling unsafe function taking pointers". |
241 | // This happens with AtomicPtr intrinsics but is fine, as the pointers clippy is concerned about |
242 | // are just normal values that get loaded/stored, but not dereferenced. |
243 | #![allow (clippy::not_unsafe_ptr_arg_deref)] |
244 | |
245 | use self::Ordering::*; |
246 | use crate::cell::UnsafeCell; |
247 | use crate::hint::spin_loop; |
248 | use crate::{fmt, intrinsics}; |
249 | |
250 | // Some architectures don't have byte-sized atomics, which results in LLVM |
251 | // emulating them using a LL/SC loop. However for AtomicBool we can take |
252 | // advantage of the fact that it only ever contains 0 or 1 and use atomic OR/AND |
253 | // instead, which LLVM can emulate using a larger atomic OR/AND operation. |
254 | // |
255 | // This list should only contain architectures which have word-sized atomic-or/ |
256 | // atomic-and instructions but don't natively support byte-sized atomics. |
257 | #[cfg (target_has_atomic = "8" )] |
258 | const EMULATE_ATOMIC_BOOL: bool = |
259 | cfg!(any(target_arch = "riscv32" , target_arch = "riscv64" , target_arch = "loongarch64" )); |
260 | |
261 | /// A boolean type which can be safely shared between threads. |
262 | /// |
263 | /// This type has the same size, alignment, and bit validity as a [`bool`]. |
264 | /// |
265 | /// **Note**: This type is only available on platforms that support atomic |
266 | /// loads and stores of `u8`. |
267 | #[cfg (target_has_atomic_load_store = "8" )] |
268 | #[stable (feature = "rust1" , since = "1.0.0" )] |
269 | #[rustc_diagnostic_item = "AtomicBool" ] |
270 | #[repr (C, align(1))] |
271 | pub struct AtomicBool { |
272 | v: UnsafeCell<u8>, |
273 | } |
274 | |
275 | #[cfg (target_has_atomic_load_store = "8" )] |
276 | #[stable (feature = "rust1" , since = "1.0.0" )] |
277 | impl Default for AtomicBool { |
278 | /// Creates an `AtomicBool` initialized to `false`. |
279 | #[inline ] |
280 | fn default() -> Self { |
281 | Self::new(false) |
282 | } |
283 | } |
284 | |
285 | // Send is implicitly implemented for AtomicBool. |
286 | #[cfg (target_has_atomic_load_store = "8" )] |
287 | #[stable (feature = "rust1" , since = "1.0.0" )] |
288 | unsafe impl Sync for AtomicBool {} |
289 | |
290 | /// A raw pointer type which can be safely shared between threads. |
291 | /// |
292 | /// This type has the same size and bit validity as a `*mut T`. |
293 | /// |
294 | /// **Note**: This type is only available on platforms that support atomic |
295 | /// loads and stores of pointers. Its size depends on the target pointer's size. |
296 | #[cfg (target_has_atomic_load_store = "ptr" )] |
297 | #[stable (feature = "rust1" , since = "1.0.0" )] |
298 | #[rustc_diagnostic_item = "AtomicPtr" ] |
299 | #[cfg_attr (target_pointer_width = "16" , repr(C, align(2)))] |
300 | #[cfg_attr (target_pointer_width = "32" , repr(C, align(4)))] |
301 | #[cfg_attr (target_pointer_width = "64" , repr(C, align(8)))] |
302 | pub struct AtomicPtr<T> { |
303 | p: UnsafeCell<*mut T>, |
304 | } |
305 | |
306 | #[cfg (target_has_atomic_load_store = "ptr" )] |
307 | #[stable (feature = "rust1" , since = "1.0.0" )] |
308 | impl<T> Default for AtomicPtr<T> { |
309 | /// Creates a null `AtomicPtr<T>`. |
310 | fn default() -> AtomicPtr<T> { |
311 | AtomicPtr::new(crate::ptr::null_mut()) |
312 | } |
313 | } |
314 | |
315 | #[cfg (target_has_atomic_load_store = "ptr" )] |
316 | #[stable (feature = "rust1" , since = "1.0.0" )] |
317 | unsafe impl<T> Send for AtomicPtr<T> {} |
318 | #[cfg (target_has_atomic_load_store = "ptr" )] |
319 | #[stable (feature = "rust1" , since = "1.0.0" )] |
320 | unsafe impl<T> Sync for AtomicPtr<T> {} |
321 | |
322 | /// Atomic memory orderings |
323 | /// |
324 | /// Memory orderings specify the way atomic operations synchronize memory. |
325 | /// In its weakest [`Ordering::Relaxed`], only the memory directly touched by the |
326 | /// operation is synchronized. On the other hand, a store-load pair of [`Ordering::SeqCst`] |
327 | /// operations synchronize other memory while additionally preserving a total order of such |
328 | /// operations across all threads. |
329 | /// |
330 | /// Rust's memory orderings are [the same as those of |
331 | /// C++20](https://en.cppreference.com/w/cpp/atomic/memory_order). |
332 | /// |
333 | /// For more information see the [nomicon]. |
334 | /// |
335 | /// [nomicon]: ../../../nomicon/atomics.html |
336 | #[stable (feature = "rust1" , since = "1.0.0" )] |
337 | #[derive (Copy, Clone, Debug, Eq, PartialEq, Hash)] |
338 | #[non_exhaustive ] |
339 | #[rustc_diagnostic_item = "Ordering" ] |
340 | pub enum Ordering { |
341 | /// No ordering constraints, only atomic operations. |
342 | /// |
343 | /// Corresponds to [`memory_order_relaxed`] in C++20. |
344 | /// |
345 | /// [`memory_order_relaxed`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Relaxed_ordering |
346 | #[stable (feature = "rust1" , since = "1.0.0" )] |
347 | Relaxed, |
348 | /// When coupled with a store, all previous operations become ordered |
349 | /// before any load of this value with [`Acquire`] (or stronger) ordering. |
350 | /// In particular, all previous writes become visible to all threads |
351 | /// that perform an [`Acquire`] (or stronger) load of this value. |
352 | /// |
353 | /// Notice that using this ordering for an operation that combines loads |
354 | /// and stores leads to a [`Relaxed`] load operation! |
355 | /// |
356 | /// This ordering is only applicable for operations that can perform a store. |
357 | /// |
358 | /// Corresponds to [`memory_order_release`] in C++20. |
359 | /// |
360 | /// [`memory_order_release`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering |
361 | #[stable (feature = "rust1" , since = "1.0.0" )] |
362 | Release, |
363 | /// When coupled with a load, if the loaded value was written by a store operation with |
364 | /// [`Release`] (or stronger) ordering, then all subsequent operations |
365 | /// become ordered after that store. In particular, all subsequent loads will see data |
366 | /// written before the store. |
367 | /// |
368 | /// Notice that using this ordering for an operation that combines loads |
369 | /// and stores leads to a [`Relaxed`] store operation! |
370 | /// |
371 | /// This ordering is only applicable for operations that can perform a load. |
372 | /// |
373 | /// Corresponds to [`memory_order_acquire`] in C++20. |
374 | /// |
375 | /// [`memory_order_acquire`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering |
376 | #[stable (feature = "rust1" , since = "1.0.0" )] |
377 | Acquire, |
378 | /// Has the effects of both [`Acquire`] and [`Release`] together: |
379 | /// For loads it uses [`Acquire`] ordering. For stores it uses the [`Release`] ordering. |
380 | /// |
381 | /// Notice that in the case of `compare_and_swap`, it is possible that the operation ends up |
382 | /// not performing any store and hence it has just [`Acquire`] ordering. However, |
383 | /// `AcqRel` will never perform [`Relaxed`] accesses. |
384 | /// |
385 | /// This ordering is only applicable for operations that combine both loads and stores. |
386 | /// |
387 | /// Corresponds to [`memory_order_acq_rel`] in C++20. |
388 | /// |
389 | /// [`memory_order_acq_rel`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering |
390 | #[stable (feature = "rust1" , since = "1.0.0" )] |
391 | AcqRel, |
392 | /// Like [`Acquire`]/[`Release`]/[`AcqRel`] (for load, store, and load-with-store |
393 | /// operations, respectively) with the additional guarantee that all threads see all |
394 | /// sequentially consistent operations in the same order. |
395 | /// |
396 | /// Corresponds to [`memory_order_seq_cst`] in C++20. |
397 | /// |
398 | /// [`memory_order_seq_cst`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Sequentially-consistent_ordering |
399 | #[stable (feature = "rust1" , since = "1.0.0" )] |
400 | SeqCst, |
401 | } |
402 | |
403 | /// An [`AtomicBool`] initialized to `false`. |
404 | #[cfg (target_has_atomic_load_store = "8" )] |
405 | #[stable (feature = "rust1" , since = "1.0.0" )] |
406 | #[deprecated ( |
407 | since = "1.34.0" , |
408 | note = "the `new` function is now preferred" , |
409 | suggestion = "AtomicBool::new(false)" |
410 | )] |
411 | pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false); |
412 | |
413 | #[cfg (target_has_atomic_load_store = "8" )] |
414 | impl AtomicBool { |
415 | /// Creates a new `AtomicBool`. |
416 | /// |
417 | /// # Examples |
418 | /// |
419 | /// ``` |
420 | /// use std::sync::atomic::AtomicBool; |
421 | /// |
422 | /// let atomic_true = AtomicBool::new(true); |
423 | /// let atomic_false = AtomicBool::new(false); |
424 | /// ``` |
425 | #[inline ] |
426 | #[stable (feature = "rust1" , since = "1.0.0" )] |
427 | #[rustc_const_stable (feature = "const_atomic_new" , since = "1.24.0" )] |
428 | #[must_use ] |
429 | pub const fn new(v: bool) -> AtomicBool { |
430 | AtomicBool { v: UnsafeCell::new(v as u8) } |
431 | } |
432 | |
433 | /// Creates a new `AtomicBool` from a pointer. |
434 | /// |
435 | /// # Examples |
436 | /// |
437 | /// ``` |
438 | /// use std::sync::atomic::{self, AtomicBool}; |
439 | /// |
440 | /// // Get a pointer to an allocated value |
441 | /// let ptr: *mut bool = Box::into_raw(Box::new(false)); |
442 | /// |
443 | /// assert!(ptr.cast::<AtomicBool>().is_aligned()); |
444 | /// |
445 | /// { |
446 | /// // Create an atomic view of the allocated value |
447 | /// let atomic = unsafe { AtomicBool::from_ptr(ptr) }; |
448 | /// |
449 | /// // Use `atomic` for atomic operations, possibly share it with other threads |
450 | /// atomic.store(true, atomic::Ordering::Relaxed); |
451 | /// } |
452 | /// |
453 | /// // It's ok to non-atomically access the value behind `ptr`, |
454 | /// // since the reference to the atomic ended its lifetime in the block above |
455 | /// assert_eq!(unsafe { *ptr }, true); |
456 | /// |
457 | /// // Deallocate the value |
458 | /// unsafe { drop(Box::from_raw(ptr)) } |
459 | /// ``` |
460 | /// |
461 | /// # Safety |
462 | /// |
463 | /// * `ptr` must be aligned to `align_of::<AtomicBool>()` (note that this is always true, since |
464 | /// `align_of::<AtomicBool>() == 1`). |
465 | /// * `ptr` must be [valid] for both reads and writes for the whole lifetime `'a`. |
466 | /// * You must adhere to the [Memory model for atomic accesses]. In particular, it is not |
467 | /// allowed to mix atomic and non-atomic accesses, or atomic accesses of different sizes, |
468 | /// without synchronization. |
469 | /// |
470 | /// [valid]: crate::ptr#safety |
471 | /// [Memory model for atomic accesses]: self#memory-model-for-atomic-accesses |
472 | #[inline ] |
473 | #[stable (feature = "atomic_from_ptr" , since = "1.75.0" )] |
474 | #[rustc_const_stable (feature = "const_atomic_from_ptr" , since = "1.84.0" )] |
475 | pub const unsafe fn from_ptr<'a>(ptr: *mut bool) -> &'a AtomicBool { |
476 | // SAFETY: guaranteed by the caller |
477 | unsafe { &*ptr.cast() } |
478 | } |
479 | |
480 | /// Returns a mutable reference to the underlying [`bool`]. |
481 | /// |
482 | /// This is safe because the mutable reference guarantees that no other threads are |
483 | /// concurrently accessing the atomic data. |
484 | /// |
485 | /// # Examples |
486 | /// |
487 | /// ``` |
488 | /// use std::sync::atomic::{AtomicBool, Ordering}; |
489 | /// |
490 | /// let mut some_bool = AtomicBool::new(true); |
491 | /// assert_eq!(*some_bool.get_mut(), true); |
492 | /// *some_bool.get_mut() = false; |
493 | /// assert_eq!(some_bool.load(Ordering::SeqCst), false); |
494 | /// ``` |
495 | #[inline ] |
496 | #[stable (feature = "atomic_access" , since = "1.15.0" )] |
497 | pub fn get_mut(&mut self) -> &mut bool { |
498 | // SAFETY: the mutable reference guarantees unique ownership. |
499 | unsafe { &mut *(self.v.get() as *mut bool) } |
500 | } |
501 | |
502 | /// Gets atomic access to a `&mut bool`. |
503 | /// |
504 | /// # Examples |
505 | /// |
506 | /// ``` |
507 | /// #![feature(atomic_from_mut)] |
508 | /// use std::sync::atomic::{AtomicBool, Ordering}; |
509 | /// |
510 | /// let mut some_bool = true; |
511 | /// let a = AtomicBool::from_mut(&mut some_bool); |
512 | /// a.store(false, Ordering::Relaxed); |
513 | /// assert_eq!(some_bool, false); |
514 | /// ``` |
515 | #[inline ] |
516 | #[cfg (target_has_atomic_equal_alignment = "8" )] |
517 | #[unstable (feature = "atomic_from_mut" , issue = "76314" )] |
518 | pub fn from_mut(v: &mut bool) -> &mut Self { |
519 | // SAFETY: the mutable reference guarantees unique ownership, and |
520 | // alignment of both `bool` and `Self` is 1. |
521 | unsafe { &mut *(v as *mut bool as *mut Self) } |
522 | } |
523 | |
524 | /// Gets non-atomic access to a `&mut [AtomicBool]` slice. |
525 | /// |
526 | /// This is safe because the mutable reference guarantees that no other threads are |
527 | /// concurrently accessing the atomic data. |
528 | /// |
529 | /// # Examples |
530 | /// |
531 | /// ``` |
532 | /// #![feature(atomic_from_mut)] |
533 | /// use std::sync::atomic::{AtomicBool, Ordering}; |
534 | /// |
535 | /// let mut some_bools = [const { AtomicBool::new(false) }; 10]; |
536 | /// |
537 | /// let view: &mut [bool] = AtomicBool::get_mut_slice(&mut some_bools); |
538 | /// assert_eq!(view, [false; 10]); |
539 | /// view[..5].copy_from_slice(&[true; 5]); |
540 | /// |
541 | /// std::thread::scope(|s| { |
542 | /// for t in &some_bools[..5] { |
543 | /// s.spawn(move || assert_eq!(t.load(Ordering::Relaxed), true)); |
544 | /// } |
545 | /// |
546 | /// for f in &some_bools[5..] { |
547 | /// s.spawn(move || assert_eq!(f.load(Ordering::Relaxed), false)); |
548 | /// } |
549 | /// }); |
550 | /// ``` |
551 | #[inline ] |
552 | #[unstable (feature = "atomic_from_mut" , issue = "76314" )] |
553 | pub fn get_mut_slice(this: &mut [Self]) -> &mut [bool] { |
554 | // SAFETY: the mutable reference guarantees unique ownership. |
555 | unsafe { &mut *(this as *mut [Self] as *mut [bool]) } |
556 | } |
557 | |
558 | /// Gets atomic access to a `&mut [bool]` slice. |
559 | /// |
560 | /// # Examples |
561 | /// |
562 | /// ``` |
563 | /// #![feature(atomic_from_mut)] |
564 | /// use std::sync::atomic::{AtomicBool, Ordering}; |
565 | /// |
566 | /// let mut some_bools = [false; 10]; |
567 | /// let a = &*AtomicBool::from_mut_slice(&mut some_bools); |
568 | /// std::thread::scope(|s| { |
569 | /// for i in 0..a.len() { |
570 | /// s.spawn(move || a[i].store(true, Ordering::Relaxed)); |
571 | /// } |
572 | /// }); |
573 | /// assert_eq!(some_bools, [true; 10]); |
574 | /// ``` |
575 | #[inline ] |
576 | #[cfg (target_has_atomic_equal_alignment = "8" )] |
577 | #[unstable (feature = "atomic_from_mut" , issue = "76314" )] |
578 | pub fn from_mut_slice(v: &mut [bool]) -> &mut [Self] { |
579 | // SAFETY: the mutable reference guarantees unique ownership, and |
580 | // alignment of both `bool` and `Self` is 1. |
581 | unsafe { &mut *(v as *mut [bool] as *mut [Self]) } |
582 | } |
583 | |
584 | /// Consumes the atomic and returns the contained value. |
585 | /// |
586 | /// This is safe because passing `self` by value guarantees that no other threads are |
587 | /// concurrently accessing the atomic data. |
588 | /// |
589 | /// # Examples |
590 | /// |
591 | /// ``` |
592 | /// use std::sync::atomic::AtomicBool; |
593 | /// |
594 | /// let some_bool = AtomicBool::new(true); |
595 | /// assert_eq!(some_bool.into_inner(), true); |
596 | /// ``` |
597 | #[inline ] |
598 | #[stable (feature = "atomic_access" , since = "1.15.0" )] |
599 | #[rustc_const_stable (feature = "const_atomic_into_inner" , since = "1.79.0" )] |
600 | pub const fn into_inner(self) -> bool { |
601 | self.v.into_inner() != 0 |
602 | } |
603 | |
604 | /// Loads a value from the bool. |
605 | /// |
606 | /// `load` takes an [`Ordering`] argument which describes the memory ordering |
607 | /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`]. |
608 | /// |
609 | /// # Panics |
610 | /// |
611 | /// Panics if `order` is [`Release`] or [`AcqRel`]. |
612 | /// |
613 | /// # Examples |
614 | /// |
615 | /// ``` |
616 | /// use std::sync::atomic::{AtomicBool, Ordering}; |
617 | /// |
618 | /// let some_bool = AtomicBool::new(true); |
619 | /// |
620 | /// assert_eq!(some_bool.load(Ordering::Relaxed), true); |
621 | /// ``` |
622 | #[inline ] |
623 | #[stable (feature = "rust1" , since = "1.0.0" )] |
624 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
625 | pub fn load(&self, order: Ordering) -> bool { |
626 | // SAFETY: any data races are prevented by atomic intrinsics and the raw |
627 | // pointer passed in is valid because we got it from a reference. |
628 | unsafe { atomic_load(self.v.get(), order) != 0 } |
629 | } |
630 | |
631 | /// Stores a value into the bool. |
632 | /// |
633 | /// `store` takes an [`Ordering`] argument which describes the memory ordering |
634 | /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`]. |
635 | /// |
636 | /// # Panics |
637 | /// |
638 | /// Panics if `order` is [`Acquire`] or [`AcqRel`]. |
639 | /// |
640 | /// # Examples |
641 | /// |
642 | /// ``` |
643 | /// use std::sync::atomic::{AtomicBool, Ordering}; |
644 | /// |
645 | /// let some_bool = AtomicBool::new(true); |
646 | /// |
647 | /// some_bool.store(false, Ordering::Relaxed); |
648 | /// assert_eq!(some_bool.load(Ordering::Relaxed), false); |
649 | /// ``` |
650 | #[inline ] |
651 | #[stable (feature = "rust1" , since = "1.0.0" )] |
652 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
653 | pub fn store(&self, val: bool, order: Ordering) { |
654 | // SAFETY: any data races are prevented by atomic intrinsics and the raw |
655 | // pointer passed in is valid because we got it from a reference. |
656 | unsafe { |
657 | atomic_store(self.v.get(), val as u8, order); |
658 | } |
659 | } |
660 | |
661 | /// Stores a value into the bool, returning the previous value. |
662 | /// |
663 | /// `swap` takes an [`Ordering`] argument which describes the memory ordering |
664 | /// of this operation. All ordering modes are possible. Note that using |
665 | /// [`Acquire`] makes the store part of this operation [`Relaxed`], and |
666 | /// using [`Release`] makes the load part [`Relaxed`]. |
667 | /// |
668 | /// **Note:** This method is only available on platforms that support atomic |
669 | /// operations on `u8`. |
670 | /// |
671 | /// # Examples |
672 | /// |
673 | /// ``` |
674 | /// use std::sync::atomic::{AtomicBool, Ordering}; |
675 | /// |
676 | /// let some_bool = AtomicBool::new(true); |
677 | /// |
678 | /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true); |
679 | /// assert_eq!(some_bool.load(Ordering::Relaxed), false); |
680 | /// ``` |
681 | #[inline ] |
682 | #[stable (feature = "rust1" , since = "1.0.0" )] |
683 | #[cfg (target_has_atomic = "8" )] |
684 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
685 | pub fn swap(&self, val: bool, order: Ordering) -> bool { |
686 | if EMULATE_ATOMIC_BOOL { |
687 | if val { self.fetch_or(true, order) } else { self.fetch_and(false, order) } |
688 | } else { |
689 | // SAFETY: data races are prevented by atomic intrinsics. |
690 | unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 } |
691 | } |
692 | } |
693 | |
694 | /// Stores a value into the [`bool`] if the current value is the same as the `current` value. |
695 | /// |
696 | /// The return value is always the previous value. If it is equal to `current`, then the value |
697 | /// was updated. |
698 | /// |
699 | /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory |
700 | /// ordering of this operation. Notice that even when using [`AcqRel`], the operation |
701 | /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics. |
702 | /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it |
703 | /// happens, and using [`Release`] makes the load part [`Relaxed`]. |
704 | /// |
705 | /// **Note:** This method is only available on platforms that support atomic |
706 | /// operations on `u8`. |
707 | /// |
708 | /// # Migrating to `compare_exchange` and `compare_exchange_weak` |
709 | /// |
710 | /// `compare_and_swap` is equivalent to `compare_exchange` with the following mapping for |
711 | /// memory orderings: |
712 | /// |
713 | /// Original | Success | Failure |
714 | /// -------- | ------- | ------- |
715 | /// Relaxed | Relaxed | Relaxed |
716 | /// Acquire | Acquire | Acquire |
717 | /// Release | Release | Relaxed |
718 | /// AcqRel | AcqRel | Acquire |
719 | /// SeqCst | SeqCst | SeqCst |
720 | /// |
721 | /// `compare_and_swap` and `compare_exchange` also differ in their return type. You can use |
722 | /// `compare_exchange(...).unwrap_or_else(|x| x)` to recover the behavior of `compare_and_swap`, |
723 | /// but in most cases it is more idiomatic to check whether the return value is `Ok` or `Err` |
724 | /// rather than to infer success vs failure based on the value that was read. |
725 | /// |
726 | /// During migration, consider whether it makes sense to use `compare_exchange_weak` instead. |
727 | /// `compare_exchange_weak` is allowed to fail spuriously even when the comparison succeeds, |
728 | /// which allows the compiler to generate better assembly code when the compare and swap |
729 | /// is used in a loop. |
730 | /// |
731 | /// # Examples |
732 | /// |
733 | /// ``` |
734 | /// use std::sync::atomic::{AtomicBool, Ordering}; |
735 | /// |
736 | /// let some_bool = AtomicBool::new(true); |
737 | /// |
738 | /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true); |
739 | /// assert_eq!(some_bool.load(Ordering::Relaxed), false); |
740 | /// |
741 | /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false); |
742 | /// assert_eq!(some_bool.load(Ordering::Relaxed), false); |
743 | /// ``` |
744 | #[inline ] |
745 | #[stable (feature = "rust1" , since = "1.0.0" )] |
746 | #[deprecated ( |
747 | since = "1.50.0" , |
748 | note = "Use `compare_exchange` or `compare_exchange_weak` instead" |
749 | )] |
750 | #[cfg (target_has_atomic = "8" )] |
751 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
752 | pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool { |
753 | match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) { |
754 | Ok(x) => x, |
755 | Err(x) => x, |
756 | } |
757 | } |
758 | |
759 | /// Stores a value into the [`bool`] if the current value is the same as the `current` value. |
760 | /// |
761 | /// The return value is a result indicating whether the new value was written and containing |
762 | /// the previous value. On success this value is guaranteed to be equal to `current`. |
763 | /// |
764 | /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory |
765 | /// ordering of this operation. `success` describes the required ordering for the |
766 | /// read-modify-write operation that takes place if the comparison with `current` succeeds. |
767 | /// `failure` describes the required ordering for the load operation that takes place when |
768 | /// the comparison fails. Using [`Acquire`] as success ordering makes the store part |
769 | /// of this operation [`Relaxed`], and using [`Release`] makes the successful load |
770 | /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]. |
771 | /// |
772 | /// **Note:** This method is only available on platforms that support atomic |
773 | /// operations on `u8`. |
774 | /// |
775 | /// # Examples |
776 | /// |
777 | /// ``` |
778 | /// use std::sync::atomic::{AtomicBool, Ordering}; |
779 | /// |
780 | /// let some_bool = AtomicBool::new(true); |
781 | /// |
782 | /// assert_eq!(some_bool.compare_exchange(true, |
783 | /// false, |
784 | /// Ordering::Acquire, |
785 | /// Ordering::Relaxed), |
786 | /// Ok(true)); |
787 | /// assert_eq!(some_bool.load(Ordering::Relaxed), false); |
788 | /// |
789 | /// assert_eq!(some_bool.compare_exchange(true, true, |
790 | /// Ordering::SeqCst, |
791 | /// Ordering::Acquire), |
792 | /// Err(false)); |
793 | /// assert_eq!(some_bool.load(Ordering::Relaxed), false); |
794 | /// ``` |
795 | #[inline ] |
796 | #[stable (feature = "extended_compare_and_swap" , since = "1.10.0" )] |
797 | #[doc (alias = "compare_and_swap" )] |
798 | #[cfg (target_has_atomic = "8" )] |
799 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
800 | pub fn compare_exchange( |
801 | &self, |
802 | current: bool, |
803 | new: bool, |
804 | success: Ordering, |
805 | failure: Ordering, |
806 | ) -> Result<bool, bool> { |
807 | if EMULATE_ATOMIC_BOOL { |
808 | // Pick the strongest ordering from success and failure. |
809 | let order = match (success, failure) { |
810 | (SeqCst, _) => SeqCst, |
811 | (_, SeqCst) => SeqCst, |
812 | (AcqRel, _) => AcqRel, |
813 | (_, AcqRel) => { |
814 | panic!("there is no such thing as an acquire-release failure ordering" ) |
815 | } |
816 | (Release, Acquire) => AcqRel, |
817 | (Acquire, _) => Acquire, |
818 | (_, Acquire) => Acquire, |
819 | (Release, Relaxed) => Release, |
820 | (_, Release) => panic!("there is no such thing as a release failure ordering" ), |
821 | (Relaxed, Relaxed) => Relaxed, |
822 | }; |
823 | let old = if current == new { |
824 | // This is a no-op, but we still need to perform the operation |
825 | // for memory ordering reasons. |
826 | self.fetch_or(false, order) |
827 | } else { |
828 | // This sets the value to the new one and returns the old one. |
829 | self.swap(new, order) |
830 | }; |
831 | if old == current { Ok(old) } else { Err(old) } |
832 | } else { |
833 | // SAFETY: data races are prevented by atomic intrinsics. |
834 | match unsafe { |
835 | atomic_compare_exchange(self.v.get(), current as u8, new as u8, success, failure) |
836 | } { |
837 | Ok(x) => Ok(x != 0), |
838 | Err(x) => Err(x != 0), |
839 | } |
840 | } |
841 | } |
842 | |
843 | /// Stores a value into the [`bool`] if the current value is the same as the `current` value. |
844 | /// |
845 | /// Unlike [`AtomicBool::compare_exchange`], this function is allowed to spuriously fail even when the |
846 | /// comparison succeeds, which can result in more efficient code on some platforms. The |
847 | /// return value is a result indicating whether the new value was written and containing the |
848 | /// previous value. |
849 | /// |
850 | /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory |
851 | /// ordering of this operation. `success` describes the required ordering for the |
852 | /// read-modify-write operation that takes place if the comparison with `current` succeeds. |
853 | /// `failure` describes the required ordering for the load operation that takes place when |
854 | /// the comparison fails. Using [`Acquire`] as success ordering makes the store part |
855 | /// of this operation [`Relaxed`], and using [`Release`] makes the successful load |
856 | /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]. |
857 | /// |
858 | /// **Note:** This method is only available on platforms that support atomic |
859 | /// operations on `u8`. |
860 | /// |
861 | /// # Examples |
862 | /// |
863 | /// ``` |
864 | /// use std::sync::atomic::{AtomicBool, Ordering}; |
865 | /// |
866 | /// let val = AtomicBool::new(false); |
867 | /// |
868 | /// let new = true; |
869 | /// let mut old = val.load(Ordering::Relaxed); |
870 | /// loop { |
871 | /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) { |
872 | /// Ok(_) => break, |
873 | /// Err(x) => old = x, |
874 | /// } |
875 | /// } |
876 | /// ``` |
877 | #[inline ] |
878 | #[stable (feature = "extended_compare_and_swap" , since = "1.10.0" )] |
879 | #[doc (alias = "compare_and_swap" )] |
880 | #[cfg (target_has_atomic = "8" )] |
881 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
882 | pub fn compare_exchange_weak( |
883 | &self, |
884 | current: bool, |
885 | new: bool, |
886 | success: Ordering, |
887 | failure: Ordering, |
888 | ) -> Result<bool, bool> { |
889 | if EMULATE_ATOMIC_BOOL { |
890 | return self.compare_exchange(current, new, success, failure); |
891 | } |
892 | |
893 | // SAFETY: data races are prevented by atomic intrinsics. |
894 | match unsafe { |
895 | atomic_compare_exchange_weak(self.v.get(), current as u8, new as u8, success, failure) |
896 | } { |
897 | Ok(x) => Ok(x != 0), |
898 | Err(x) => Err(x != 0), |
899 | } |
900 | } |
901 | |
902 | /// Logical "and" with a boolean value. |
903 | /// |
904 | /// Performs a logical "and" operation on the current value and the argument `val`, and sets |
905 | /// the new value to the result. |
906 | /// |
907 | /// Returns the previous value. |
908 | /// |
909 | /// `fetch_and` takes an [`Ordering`] argument which describes the memory ordering |
910 | /// of this operation. All ordering modes are possible. Note that using |
911 | /// [`Acquire`] makes the store part of this operation [`Relaxed`], and |
912 | /// using [`Release`] makes the load part [`Relaxed`]. |
913 | /// |
914 | /// **Note:** This method is only available on platforms that support atomic |
915 | /// operations on `u8`. |
916 | /// |
917 | /// # Examples |
918 | /// |
919 | /// ``` |
920 | /// use std::sync::atomic::{AtomicBool, Ordering}; |
921 | /// |
922 | /// let foo = AtomicBool::new(true); |
923 | /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true); |
924 | /// assert_eq!(foo.load(Ordering::SeqCst), false); |
925 | /// |
926 | /// let foo = AtomicBool::new(true); |
927 | /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true); |
928 | /// assert_eq!(foo.load(Ordering::SeqCst), true); |
929 | /// |
930 | /// let foo = AtomicBool::new(false); |
931 | /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false); |
932 | /// assert_eq!(foo.load(Ordering::SeqCst), false); |
933 | /// ``` |
934 | #[inline ] |
935 | #[stable (feature = "rust1" , since = "1.0.0" )] |
936 | #[cfg (target_has_atomic = "8" )] |
937 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
938 | pub fn fetch_and(&self, val: bool, order: Ordering) -> bool { |
939 | // SAFETY: data races are prevented by atomic intrinsics. |
940 | unsafe { atomic_and(self.v.get(), val as u8, order) != 0 } |
941 | } |
942 | |
943 | /// Logical "nand" with a boolean value. |
944 | /// |
945 | /// Performs a logical "nand" operation on the current value and the argument `val`, and sets |
946 | /// the new value to the result. |
947 | /// |
948 | /// Returns the previous value. |
949 | /// |
950 | /// `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering |
951 | /// of this operation. All ordering modes are possible. Note that using |
952 | /// [`Acquire`] makes the store part of this operation [`Relaxed`], and |
953 | /// using [`Release`] makes the load part [`Relaxed`]. |
954 | /// |
955 | /// **Note:** This method is only available on platforms that support atomic |
956 | /// operations on `u8`. |
957 | /// |
958 | /// # Examples |
959 | /// |
960 | /// ``` |
961 | /// use std::sync::atomic::{AtomicBool, Ordering}; |
962 | /// |
963 | /// let foo = AtomicBool::new(true); |
964 | /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true); |
965 | /// assert_eq!(foo.load(Ordering::SeqCst), true); |
966 | /// |
967 | /// let foo = AtomicBool::new(true); |
968 | /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true); |
969 | /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0); |
970 | /// assert_eq!(foo.load(Ordering::SeqCst), false); |
971 | /// |
972 | /// let foo = AtomicBool::new(false); |
973 | /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false); |
974 | /// assert_eq!(foo.load(Ordering::SeqCst), true); |
975 | /// ``` |
976 | #[inline ] |
977 | #[stable (feature = "rust1" , since = "1.0.0" )] |
978 | #[cfg (target_has_atomic = "8" )] |
979 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
980 | pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool { |
981 | // We can't use atomic_nand here because it can result in a bool with |
982 | // an invalid value. This happens because the atomic operation is done |
983 | // with an 8-bit integer internally, which would set the upper 7 bits. |
984 | // So we just use fetch_xor or swap instead. |
985 | if val { |
986 | // !(x & true) == !x |
987 | // We must invert the bool. |
988 | self.fetch_xor(true, order) |
989 | } else { |
990 | // !(x & false) == true |
991 | // We must set the bool to true. |
992 | self.swap(true, order) |
993 | } |
994 | } |
995 | |
996 | /// Logical "or" with a boolean value. |
997 | /// |
998 | /// Performs a logical "or" operation on the current value and the argument `val`, and sets the |
999 | /// new value to the result. |
1000 | /// |
1001 | /// Returns the previous value. |
1002 | /// |
1003 | /// `fetch_or` takes an [`Ordering`] argument which describes the memory ordering |
1004 | /// of this operation. All ordering modes are possible. Note that using |
1005 | /// [`Acquire`] makes the store part of this operation [`Relaxed`], and |
1006 | /// using [`Release`] makes the load part [`Relaxed`]. |
1007 | /// |
1008 | /// **Note:** This method is only available on platforms that support atomic |
1009 | /// operations on `u8`. |
1010 | /// |
1011 | /// # Examples |
1012 | /// |
1013 | /// ``` |
1014 | /// use std::sync::atomic::{AtomicBool, Ordering}; |
1015 | /// |
1016 | /// let foo = AtomicBool::new(true); |
1017 | /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true); |
1018 | /// assert_eq!(foo.load(Ordering::SeqCst), true); |
1019 | /// |
1020 | /// let foo = AtomicBool::new(true); |
1021 | /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true); |
1022 | /// assert_eq!(foo.load(Ordering::SeqCst), true); |
1023 | /// |
1024 | /// let foo = AtomicBool::new(false); |
1025 | /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false); |
1026 | /// assert_eq!(foo.load(Ordering::SeqCst), false); |
1027 | /// ``` |
1028 | #[inline ] |
1029 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1030 | #[cfg (target_has_atomic = "8" )] |
1031 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
1032 | pub fn fetch_or(&self, val: bool, order: Ordering) -> bool { |
1033 | // SAFETY: data races are prevented by atomic intrinsics. |
1034 | unsafe { atomic_or(self.v.get(), val as u8, order) != 0 } |
1035 | } |
1036 | |
1037 | /// Logical "xor" with a boolean value. |
1038 | /// |
1039 | /// Performs a logical "xor" operation on the current value and the argument `val`, and sets |
1040 | /// the new value to the result. |
1041 | /// |
1042 | /// Returns the previous value. |
1043 | /// |
1044 | /// `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering |
1045 | /// of this operation. All ordering modes are possible. Note that using |
1046 | /// [`Acquire`] makes the store part of this operation [`Relaxed`], and |
1047 | /// using [`Release`] makes the load part [`Relaxed`]. |
1048 | /// |
1049 | /// **Note:** This method is only available on platforms that support atomic |
1050 | /// operations on `u8`. |
1051 | /// |
1052 | /// # Examples |
1053 | /// |
1054 | /// ``` |
1055 | /// use std::sync::atomic::{AtomicBool, Ordering}; |
1056 | /// |
1057 | /// let foo = AtomicBool::new(true); |
1058 | /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true); |
1059 | /// assert_eq!(foo.load(Ordering::SeqCst), true); |
1060 | /// |
1061 | /// let foo = AtomicBool::new(true); |
1062 | /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true); |
1063 | /// assert_eq!(foo.load(Ordering::SeqCst), false); |
1064 | /// |
1065 | /// let foo = AtomicBool::new(false); |
1066 | /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false); |
1067 | /// assert_eq!(foo.load(Ordering::SeqCst), false); |
1068 | /// ``` |
1069 | #[inline ] |
1070 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1071 | #[cfg (target_has_atomic = "8" )] |
1072 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
1073 | pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool { |
1074 | // SAFETY: data races are prevented by atomic intrinsics. |
1075 | unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 } |
1076 | } |
1077 | |
1078 | /// Logical "not" with a boolean value. |
1079 | /// |
1080 | /// Performs a logical "not" operation on the current value, and sets |
1081 | /// the new value to the result. |
1082 | /// |
1083 | /// Returns the previous value. |
1084 | /// |
1085 | /// `fetch_not` takes an [`Ordering`] argument which describes the memory ordering |
1086 | /// of this operation. All ordering modes are possible. Note that using |
1087 | /// [`Acquire`] makes the store part of this operation [`Relaxed`], and |
1088 | /// using [`Release`] makes the load part [`Relaxed`]. |
1089 | /// |
1090 | /// **Note:** This method is only available on platforms that support atomic |
1091 | /// operations on `u8`. |
1092 | /// |
1093 | /// # Examples |
1094 | /// |
1095 | /// ``` |
1096 | /// use std::sync::atomic::{AtomicBool, Ordering}; |
1097 | /// |
1098 | /// let foo = AtomicBool::new(true); |
1099 | /// assert_eq!(foo.fetch_not(Ordering::SeqCst), true); |
1100 | /// assert_eq!(foo.load(Ordering::SeqCst), false); |
1101 | /// |
1102 | /// let foo = AtomicBool::new(false); |
1103 | /// assert_eq!(foo.fetch_not(Ordering::SeqCst), false); |
1104 | /// assert_eq!(foo.load(Ordering::SeqCst), true); |
1105 | /// ``` |
1106 | #[inline ] |
1107 | #[stable (feature = "atomic_bool_fetch_not" , since = "1.81.0" )] |
1108 | #[cfg (target_has_atomic = "8" )] |
1109 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
1110 | pub fn fetch_not(&self, order: Ordering) -> bool { |
1111 | self.fetch_xor(true, order) |
1112 | } |
1113 | |
1114 | /// Returns a mutable pointer to the underlying [`bool`]. |
1115 | /// |
1116 | /// Doing non-atomic reads and writes on the resulting boolean can be a data race. |
1117 | /// This method is mostly useful for FFI, where the function signature may use |
1118 | /// `*mut bool` instead of `&AtomicBool`. |
1119 | /// |
1120 | /// Returning an `*mut` pointer from a shared reference to this atomic is safe because the |
1121 | /// atomic types work with interior mutability. All modifications of an atomic change the value |
1122 | /// through a shared reference, and can do so safely as long as they use atomic operations. Any |
1123 | /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the same |
1124 | /// restriction: operations on it must be atomic. |
1125 | /// |
1126 | /// # Examples |
1127 | /// |
1128 | /// ```ignore (extern-declaration) |
1129 | /// # fn main() { |
1130 | /// use std::sync::atomic::AtomicBool; |
1131 | /// |
1132 | /// extern "C" { |
1133 | /// fn my_atomic_op(arg: *mut bool); |
1134 | /// } |
1135 | /// |
1136 | /// let mut atomic = AtomicBool::new(true); |
1137 | /// unsafe { |
1138 | /// my_atomic_op(atomic.as_ptr()); |
1139 | /// } |
1140 | /// # } |
1141 | /// ``` |
1142 | #[inline ] |
1143 | #[stable (feature = "atomic_as_ptr" , since = "1.70.0" )] |
1144 | #[rustc_const_stable (feature = "atomic_as_ptr" , since = "1.70.0" )] |
1145 | #[rustc_never_returns_null_ptr ] |
1146 | pub const fn as_ptr(&self) -> *mut bool { |
1147 | self.v.get().cast() |
1148 | } |
1149 | |
1150 | /// Fetches the value, and applies a function to it that returns an optional |
1151 | /// new value. Returns a `Result` of `Ok(previous_value)` if the function |
1152 | /// returned `Some(_)`, else `Err(previous_value)`. |
1153 | /// |
1154 | /// Note: This may call the function multiple times if the value has been |
1155 | /// changed from other threads in the meantime, as long as the function |
1156 | /// returns `Some(_)`, but the function will have been applied only once to |
1157 | /// the stored value. |
1158 | /// |
1159 | /// `fetch_update` takes two [`Ordering`] arguments to describe the memory |
1160 | /// ordering of this operation. The first describes the required ordering for |
1161 | /// when the operation finally succeeds while the second describes the |
1162 | /// required ordering for loads. These correspond to the success and failure |
1163 | /// orderings of [`AtomicBool::compare_exchange`] respectively. |
1164 | /// |
1165 | /// Using [`Acquire`] as success ordering makes the store part of this |
1166 | /// operation [`Relaxed`], and using [`Release`] makes the final successful |
1167 | /// load [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], |
1168 | /// [`Acquire`] or [`Relaxed`]. |
1169 | /// |
1170 | /// **Note:** This method is only available on platforms that support atomic |
1171 | /// operations on `u8`. |
1172 | /// |
1173 | /// # Considerations |
1174 | /// |
1175 | /// This method is not magic; it is not provided by the hardware. |
1176 | /// It is implemented in terms of [`AtomicBool::compare_exchange_weak`], and suffers from the same drawbacks. |
1177 | /// In particular, this method will not circumvent the [ABA Problem]. |
1178 | /// |
1179 | /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem |
1180 | /// |
1181 | /// # Examples |
1182 | /// |
1183 | /// ```rust |
1184 | /// use std::sync::atomic::{AtomicBool, Ordering}; |
1185 | /// |
1186 | /// let x = AtomicBool::new(false); |
1187 | /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(false)); |
1188 | /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(!x)), Ok(false)); |
1189 | /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(!x)), Ok(true)); |
1190 | /// assert_eq!(x.load(Ordering::SeqCst), false); |
1191 | /// ``` |
1192 | #[inline ] |
1193 | #[stable (feature = "atomic_fetch_update" , since = "1.53.0" )] |
1194 | #[cfg (target_has_atomic = "8" )] |
1195 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
1196 | pub fn fetch_update<F>( |
1197 | &self, |
1198 | set_order: Ordering, |
1199 | fetch_order: Ordering, |
1200 | mut f: F, |
1201 | ) -> Result<bool, bool> |
1202 | where |
1203 | F: FnMut(bool) -> Option<bool>, |
1204 | { |
1205 | let mut prev = self.load(fetch_order); |
1206 | while let Some(next) = f(prev) { |
1207 | match self.compare_exchange_weak(prev, next, set_order, fetch_order) { |
1208 | x @ Ok(_) => return x, |
1209 | Err(next_prev) => prev = next_prev, |
1210 | } |
1211 | } |
1212 | Err(prev) |
1213 | } |
1214 | |
1215 | /// Fetches the value, and applies a function to it that returns an optional |
1216 | /// new value. Returns a `Result` of `Ok(previous_value)` if the function |
1217 | /// returned `Some(_)`, else `Err(previous_value)`. |
1218 | /// |
1219 | /// See also: [`update`](`AtomicBool::update`). |
1220 | /// |
1221 | /// Note: This may call the function multiple times if the value has been |
1222 | /// changed from other threads in the meantime, as long as the function |
1223 | /// returns `Some(_)`, but the function will have been applied only once to |
1224 | /// the stored value. |
1225 | /// |
1226 | /// `try_update` takes two [`Ordering`] arguments to describe the memory |
1227 | /// ordering of this operation. The first describes the required ordering for |
1228 | /// when the operation finally succeeds while the second describes the |
1229 | /// required ordering for loads. These correspond to the success and failure |
1230 | /// orderings of [`AtomicBool::compare_exchange`] respectively. |
1231 | /// |
1232 | /// Using [`Acquire`] as success ordering makes the store part of this |
1233 | /// operation [`Relaxed`], and using [`Release`] makes the final successful |
1234 | /// load [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], |
1235 | /// [`Acquire`] or [`Relaxed`]. |
1236 | /// |
1237 | /// **Note:** This method is only available on platforms that support atomic |
1238 | /// operations on `u8`. |
1239 | /// |
1240 | /// # Considerations |
1241 | /// |
1242 | /// This method is not magic; it is not provided by the hardware. |
1243 | /// It is implemented in terms of [`AtomicBool::compare_exchange_weak`], and suffers from the same drawbacks. |
1244 | /// In particular, this method will not circumvent the [ABA Problem]. |
1245 | /// |
1246 | /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem |
1247 | /// |
1248 | /// # Examples |
1249 | /// |
1250 | /// ```rust |
1251 | /// #![feature(atomic_try_update)] |
1252 | /// use std::sync::atomic::{AtomicBool, Ordering}; |
1253 | /// |
1254 | /// let x = AtomicBool::new(false); |
1255 | /// assert_eq!(x.try_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(false)); |
1256 | /// assert_eq!(x.try_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(!x)), Ok(false)); |
1257 | /// assert_eq!(x.try_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(!x)), Ok(true)); |
1258 | /// assert_eq!(x.load(Ordering::SeqCst), false); |
1259 | /// ``` |
1260 | #[inline ] |
1261 | #[unstable (feature = "atomic_try_update" , issue = "135894" )] |
1262 | #[cfg (target_has_atomic = "8" )] |
1263 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
1264 | pub fn try_update( |
1265 | &self, |
1266 | set_order: Ordering, |
1267 | fetch_order: Ordering, |
1268 | f: impl FnMut(bool) -> Option<bool>, |
1269 | ) -> Result<bool, bool> { |
1270 | // FIXME(atomic_try_update): this is currently an unstable alias to `fetch_update`; |
1271 | // when stabilizing, turn `fetch_update` into a deprecated alias to `try_update`. |
1272 | self.fetch_update(set_order, fetch_order, f) |
1273 | } |
1274 | |
1275 | /// Fetches the value, applies a function to it that it return a new value. |
1276 | /// The new value is stored and the old value is returned. |
1277 | /// |
1278 | /// See also: [`try_update`](`AtomicBool::try_update`). |
1279 | /// |
1280 | /// Note: This may call the function multiple times if the value has been changed from other threads in |
1281 | /// the meantime, but the function will have been applied only once to the stored value. |
1282 | /// |
1283 | /// `update` takes two [`Ordering`] arguments to describe the memory |
1284 | /// ordering of this operation. The first describes the required ordering for |
1285 | /// when the operation finally succeeds while the second describes the |
1286 | /// required ordering for loads. These correspond to the success and failure |
1287 | /// orderings of [`AtomicBool::compare_exchange`] respectively. |
1288 | /// |
1289 | /// Using [`Acquire`] as success ordering makes the store part |
1290 | /// of this operation [`Relaxed`], and using [`Release`] makes the final successful load |
1291 | /// [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]. |
1292 | /// |
1293 | /// **Note:** This method is only available on platforms that support atomic operations on `u8`. |
1294 | /// |
1295 | /// # Considerations |
1296 | /// |
1297 | /// This method is not magic; it is not provided by the hardware. |
1298 | /// It is implemented in terms of [`AtomicBool::compare_exchange_weak`], and suffers from the same drawbacks. |
1299 | /// In particular, this method will not circumvent the [ABA Problem]. |
1300 | /// |
1301 | /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem |
1302 | /// |
1303 | /// # Examples |
1304 | /// |
1305 | /// ```rust |
1306 | /// #![feature(atomic_try_update)] |
1307 | /// |
1308 | /// use std::sync::atomic::{AtomicBool, Ordering}; |
1309 | /// |
1310 | /// let x = AtomicBool::new(false); |
1311 | /// assert_eq!(x.update(Ordering::SeqCst, Ordering::SeqCst, |x| !x), false); |
1312 | /// assert_eq!(x.update(Ordering::SeqCst, Ordering::SeqCst, |x| !x), true); |
1313 | /// assert_eq!(x.load(Ordering::SeqCst), false); |
1314 | /// ``` |
1315 | #[inline ] |
1316 | #[unstable (feature = "atomic_try_update" , issue = "135894" )] |
1317 | #[cfg (target_has_atomic = "8" )] |
1318 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
1319 | pub fn update( |
1320 | &self, |
1321 | set_order: Ordering, |
1322 | fetch_order: Ordering, |
1323 | mut f: impl FnMut(bool) -> bool, |
1324 | ) -> bool { |
1325 | let mut prev = self.load(fetch_order); |
1326 | loop { |
1327 | match self.compare_exchange_weak(prev, f(prev), set_order, fetch_order) { |
1328 | Ok(x) => break x, |
1329 | Err(next_prev) => prev = next_prev, |
1330 | } |
1331 | } |
1332 | } |
1333 | } |
1334 | |
1335 | #[cfg (target_has_atomic_load_store = "ptr" )] |
1336 | impl<T> AtomicPtr<T> { |
1337 | /// Creates a new `AtomicPtr`. |
1338 | /// |
1339 | /// # Examples |
1340 | /// |
1341 | /// ``` |
1342 | /// use std::sync::atomic::AtomicPtr; |
1343 | /// |
1344 | /// let ptr = &mut 5; |
1345 | /// let atomic_ptr = AtomicPtr::new(ptr); |
1346 | /// ``` |
1347 | #[inline ] |
1348 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1349 | #[rustc_const_stable (feature = "const_atomic_new" , since = "1.24.0" )] |
1350 | pub const fn new(p: *mut T) -> AtomicPtr<T> { |
1351 | AtomicPtr { p: UnsafeCell::new(p) } |
1352 | } |
1353 | |
1354 | /// Creates a new `AtomicPtr` from a pointer. |
1355 | /// |
1356 | /// # Examples |
1357 | /// |
1358 | /// ``` |
1359 | /// use std::sync::atomic::{self, AtomicPtr}; |
1360 | /// |
1361 | /// // Get a pointer to an allocated value |
1362 | /// let ptr: *mut *mut u8 = Box::into_raw(Box::new(std::ptr::null_mut())); |
1363 | /// |
1364 | /// assert!(ptr.cast::<AtomicPtr<u8>>().is_aligned()); |
1365 | /// |
1366 | /// { |
1367 | /// // Create an atomic view of the allocated value |
1368 | /// let atomic = unsafe { AtomicPtr::from_ptr(ptr) }; |
1369 | /// |
1370 | /// // Use `atomic` for atomic operations, possibly share it with other threads |
1371 | /// atomic.store(std::ptr::NonNull::dangling().as_ptr(), atomic::Ordering::Relaxed); |
1372 | /// } |
1373 | /// |
1374 | /// // It's ok to non-atomically access the value behind `ptr`, |
1375 | /// // since the reference to the atomic ended its lifetime in the block above |
1376 | /// assert!(!unsafe { *ptr }.is_null()); |
1377 | /// |
1378 | /// // Deallocate the value |
1379 | /// unsafe { drop(Box::from_raw(ptr)) } |
1380 | /// ``` |
1381 | /// |
1382 | /// # Safety |
1383 | /// |
1384 | /// * `ptr` must be aligned to `align_of::<AtomicPtr<T>>()` (note that on some platforms this |
1385 | /// can be bigger than `align_of::<*mut T>()`). |
1386 | /// * `ptr` must be [valid] for both reads and writes for the whole lifetime `'a`. |
1387 | /// * You must adhere to the [Memory model for atomic accesses]. In particular, it is not |
1388 | /// allowed to mix atomic and non-atomic accesses, or atomic accesses of different sizes, |
1389 | /// without synchronization. |
1390 | /// |
1391 | /// [valid]: crate::ptr#safety |
1392 | /// [Memory model for atomic accesses]: self#memory-model-for-atomic-accesses |
1393 | #[inline ] |
1394 | #[stable (feature = "atomic_from_ptr" , since = "1.75.0" )] |
1395 | #[rustc_const_stable (feature = "const_atomic_from_ptr" , since = "1.84.0" )] |
1396 | pub const unsafe fn from_ptr<'a>(ptr: *mut *mut T) -> &'a AtomicPtr<T> { |
1397 | // SAFETY: guaranteed by the caller |
1398 | unsafe { &*ptr.cast() } |
1399 | } |
1400 | |
1401 | /// Returns a mutable reference to the underlying pointer. |
1402 | /// |
1403 | /// This is safe because the mutable reference guarantees that no other threads are |
1404 | /// concurrently accessing the atomic data. |
1405 | /// |
1406 | /// # Examples |
1407 | /// |
1408 | /// ``` |
1409 | /// use std::sync::atomic::{AtomicPtr, Ordering}; |
1410 | /// |
1411 | /// let mut data = 10; |
1412 | /// let mut atomic_ptr = AtomicPtr::new(&mut data); |
1413 | /// let mut other_data = 5; |
1414 | /// *atomic_ptr.get_mut() = &mut other_data; |
1415 | /// assert_eq!(unsafe { *atomic_ptr.load(Ordering::SeqCst) }, 5); |
1416 | /// ``` |
1417 | #[inline ] |
1418 | #[stable (feature = "atomic_access" , since = "1.15.0" )] |
1419 | pub fn get_mut(&mut self) -> &mut *mut T { |
1420 | self.p.get_mut() |
1421 | } |
1422 | |
1423 | /// Gets atomic access to a pointer. |
1424 | /// |
1425 | /// # Examples |
1426 | /// |
1427 | /// ``` |
1428 | /// #![feature(atomic_from_mut)] |
1429 | /// use std::sync::atomic::{AtomicPtr, Ordering}; |
1430 | /// |
1431 | /// let mut data = 123; |
1432 | /// let mut some_ptr = &mut data as *mut i32; |
1433 | /// let a = AtomicPtr::from_mut(&mut some_ptr); |
1434 | /// let mut other_data = 456; |
1435 | /// a.store(&mut other_data, Ordering::Relaxed); |
1436 | /// assert_eq!(unsafe { *some_ptr }, 456); |
1437 | /// ``` |
1438 | #[inline ] |
1439 | #[cfg (target_has_atomic_equal_alignment = "ptr" )] |
1440 | #[unstable (feature = "atomic_from_mut" , issue = "76314" )] |
1441 | pub fn from_mut(v: &mut *mut T) -> &mut Self { |
1442 | let [] = [(); align_of::<AtomicPtr<()>>() - align_of::<*mut ()>()]; |
1443 | // SAFETY: |
1444 | // - the mutable reference guarantees unique ownership. |
1445 | // - the alignment of `*mut T` and `Self` is the same on all platforms |
1446 | // supported by rust, as verified above. |
1447 | unsafe { &mut *(v as *mut *mut T as *mut Self) } |
1448 | } |
1449 | |
1450 | /// Gets non-atomic access to a `&mut [AtomicPtr]` slice. |
1451 | /// |
1452 | /// This is safe because the mutable reference guarantees that no other threads are |
1453 | /// concurrently accessing the atomic data. |
1454 | /// |
1455 | /// # Examples |
1456 | /// |
1457 | /// ``` |
1458 | /// #![feature(atomic_from_mut)] |
1459 | /// use std::ptr::null_mut; |
1460 | /// use std::sync::atomic::{AtomicPtr, Ordering}; |
1461 | /// |
1462 | /// let mut some_ptrs = [const { AtomicPtr::new(null_mut::<String>()) }; 10]; |
1463 | /// |
1464 | /// let view: &mut [*mut String] = AtomicPtr::get_mut_slice(&mut some_ptrs); |
1465 | /// assert_eq!(view, [null_mut::<String>(); 10]); |
1466 | /// view |
1467 | /// .iter_mut() |
1468 | /// .enumerate() |
1469 | /// .for_each(|(i, ptr)| *ptr = Box::into_raw(Box::new(format!("iteration#{i}" )))); |
1470 | /// |
1471 | /// std::thread::scope(|s| { |
1472 | /// for ptr in &some_ptrs { |
1473 | /// s.spawn(move || { |
1474 | /// let ptr = ptr.load(Ordering::Relaxed); |
1475 | /// assert!(!ptr.is_null()); |
1476 | /// |
1477 | /// let name = unsafe { Box::from_raw(ptr) }; |
1478 | /// println!("Hello, {name}!" ); |
1479 | /// }); |
1480 | /// } |
1481 | /// }); |
1482 | /// ``` |
1483 | #[inline ] |
1484 | #[unstable (feature = "atomic_from_mut" , issue = "76314" )] |
1485 | pub fn get_mut_slice(this: &mut [Self]) -> &mut [*mut T] { |
1486 | // SAFETY: the mutable reference guarantees unique ownership. |
1487 | unsafe { &mut *(this as *mut [Self] as *mut [*mut T]) } |
1488 | } |
1489 | |
1490 | /// Gets atomic access to a slice of pointers. |
1491 | /// |
1492 | /// # Examples |
1493 | /// |
1494 | /// ``` |
1495 | /// #![feature(atomic_from_mut)] |
1496 | /// use std::ptr::null_mut; |
1497 | /// use std::sync::atomic::{AtomicPtr, Ordering}; |
1498 | /// |
1499 | /// let mut some_ptrs = [null_mut::<String>(); 10]; |
1500 | /// let a = &*AtomicPtr::from_mut_slice(&mut some_ptrs); |
1501 | /// std::thread::scope(|s| { |
1502 | /// for i in 0..a.len() { |
1503 | /// s.spawn(move || { |
1504 | /// let name = Box::new(format!("thread{i}" )); |
1505 | /// a[i].store(Box::into_raw(name), Ordering::Relaxed); |
1506 | /// }); |
1507 | /// } |
1508 | /// }); |
1509 | /// for p in some_ptrs { |
1510 | /// assert!(!p.is_null()); |
1511 | /// let name = unsafe { Box::from_raw(p) }; |
1512 | /// println!("Hello, {name}!" ); |
1513 | /// } |
1514 | /// ``` |
1515 | #[inline ] |
1516 | #[cfg (target_has_atomic_equal_alignment = "ptr" )] |
1517 | #[unstable (feature = "atomic_from_mut" , issue = "76314" )] |
1518 | pub fn from_mut_slice(v: &mut [*mut T]) -> &mut [Self] { |
1519 | // SAFETY: |
1520 | // - the mutable reference guarantees unique ownership. |
1521 | // - the alignment of `*mut T` and `Self` is the same on all platforms |
1522 | // supported by rust, as verified above. |
1523 | unsafe { &mut *(v as *mut [*mut T] as *mut [Self]) } |
1524 | } |
1525 | |
1526 | /// Consumes the atomic and returns the contained value. |
1527 | /// |
1528 | /// This is safe because passing `self` by value guarantees that no other threads are |
1529 | /// concurrently accessing the atomic data. |
1530 | /// |
1531 | /// # Examples |
1532 | /// |
1533 | /// ``` |
1534 | /// use std::sync::atomic::AtomicPtr; |
1535 | /// |
1536 | /// let mut data = 5; |
1537 | /// let atomic_ptr = AtomicPtr::new(&mut data); |
1538 | /// assert_eq!(unsafe { *atomic_ptr.into_inner() }, 5); |
1539 | /// ``` |
1540 | #[inline ] |
1541 | #[stable (feature = "atomic_access" , since = "1.15.0" )] |
1542 | #[rustc_const_stable (feature = "const_atomic_into_inner" , since = "1.79.0" )] |
1543 | pub const fn into_inner(self) -> *mut T { |
1544 | self.p.into_inner() |
1545 | } |
1546 | |
1547 | /// Loads a value from the pointer. |
1548 | /// |
1549 | /// `load` takes an [`Ordering`] argument which describes the memory ordering |
1550 | /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`]. |
1551 | /// |
1552 | /// # Panics |
1553 | /// |
1554 | /// Panics if `order` is [`Release`] or [`AcqRel`]. |
1555 | /// |
1556 | /// # Examples |
1557 | /// |
1558 | /// ``` |
1559 | /// use std::sync::atomic::{AtomicPtr, Ordering}; |
1560 | /// |
1561 | /// let ptr = &mut 5; |
1562 | /// let some_ptr = AtomicPtr::new(ptr); |
1563 | /// |
1564 | /// let value = some_ptr.load(Ordering::Relaxed); |
1565 | /// ``` |
1566 | #[inline ] |
1567 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1568 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
1569 | pub fn load(&self, order: Ordering) -> *mut T { |
1570 | // SAFETY: data races are prevented by atomic intrinsics. |
1571 | unsafe { atomic_load(self.p.get(), order) } |
1572 | } |
1573 | |
1574 | /// Stores a value into the pointer. |
1575 | /// |
1576 | /// `store` takes an [`Ordering`] argument which describes the memory ordering |
1577 | /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`]. |
1578 | /// |
1579 | /// # Panics |
1580 | /// |
1581 | /// Panics if `order` is [`Acquire`] or [`AcqRel`]. |
1582 | /// |
1583 | /// # Examples |
1584 | /// |
1585 | /// ``` |
1586 | /// use std::sync::atomic::{AtomicPtr, Ordering}; |
1587 | /// |
1588 | /// let ptr = &mut 5; |
1589 | /// let some_ptr = AtomicPtr::new(ptr); |
1590 | /// |
1591 | /// let other_ptr = &mut 10; |
1592 | /// |
1593 | /// some_ptr.store(other_ptr, Ordering::Relaxed); |
1594 | /// ``` |
1595 | #[inline ] |
1596 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1597 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
1598 | pub fn store(&self, ptr: *mut T, order: Ordering) { |
1599 | // SAFETY: data races are prevented by atomic intrinsics. |
1600 | unsafe { |
1601 | atomic_store(self.p.get(), ptr, order); |
1602 | } |
1603 | } |
1604 | |
1605 | /// Stores a value into the pointer, returning the previous value. |
1606 | /// |
1607 | /// `swap` takes an [`Ordering`] argument which describes the memory ordering |
1608 | /// of this operation. All ordering modes are possible. Note that using |
1609 | /// [`Acquire`] makes the store part of this operation [`Relaxed`], and |
1610 | /// using [`Release`] makes the load part [`Relaxed`]. |
1611 | /// |
1612 | /// **Note:** This method is only available on platforms that support atomic |
1613 | /// operations on pointers. |
1614 | /// |
1615 | /// # Examples |
1616 | /// |
1617 | /// ``` |
1618 | /// use std::sync::atomic::{AtomicPtr, Ordering}; |
1619 | /// |
1620 | /// let ptr = &mut 5; |
1621 | /// let some_ptr = AtomicPtr::new(ptr); |
1622 | /// |
1623 | /// let other_ptr = &mut 10; |
1624 | /// |
1625 | /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed); |
1626 | /// ``` |
1627 | #[inline ] |
1628 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1629 | #[cfg (target_has_atomic = "ptr" )] |
1630 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
1631 | pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T { |
1632 | // SAFETY: data races are prevented by atomic intrinsics. |
1633 | unsafe { atomic_swap(self.p.get(), ptr, order) } |
1634 | } |
1635 | |
1636 | /// Stores a value into the pointer if the current value is the same as the `current` value. |
1637 | /// |
1638 | /// The return value is always the previous value. If it is equal to `current`, then the value |
1639 | /// was updated. |
1640 | /// |
1641 | /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory |
1642 | /// ordering of this operation. Notice that even when using [`AcqRel`], the operation |
1643 | /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics. |
1644 | /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it |
1645 | /// happens, and using [`Release`] makes the load part [`Relaxed`]. |
1646 | /// |
1647 | /// **Note:** This method is only available on platforms that support atomic |
1648 | /// operations on pointers. |
1649 | /// |
1650 | /// # Migrating to `compare_exchange` and `compare_exchange_weak` |
1651 | /// |
1652 | /// `compare_and_swap` is equivalent to `compare_exchange` with the following mapping for |
1653 | /// memory orderings: |
1654 | /// |
1655 | /// Original | Success | Failure |
1656 | /// -------- | ------- | ------- |
1657 | /// Relaxed | Relaxed | Relaxed |
1658 | /// Acquire | Acquire | Acquire |
1659 | /// Release | Release | Relaxed |
1660 | /// AcqRel | AcqRel | Acquire |
1661 | /// SeqCst | SeqCst | SeqCst |
1662 | /// |
1663 | /// `compare_and_swap` and `compare_exchange` also differ in their return type. You can use |
1664 | /// `compare_exchange(...).unwrap_or_else(|x| x)` to recover the behavior of `compare_and_swap`, |
1665 | /// but in most cases it is more idiomatic to check whether the return value is `Ok` or `Err` |
1666 | /// rather than to infer success vs failure based on the value that was read. |
1667 | /// |
1668 | /// During migration, consider whether it makes sense to use `compare_exchange_weak` instead. |
1669 | /// `compare_exchange_weak` is allowed to fail spuriously even when the comparison succeeds, |
1670 | /// which allows the compiler to generate better assembly code when the compare and swap |
1671 | /// is used in a loop. |
1672 | /// |
1673 | /// # Examples |
1674 | /// |
1675 | /// ``` |
1676 | /// use std::sync::atomic::{AtomicPtr, Ordering}; |
1677 | /// |
1678 | /// let ptr = &mut 5; |
1679 | /// let some_ptr = AtomicPtr::new(ptr); |
1680 | /// |
1681 | /// let other_ptr = &mut 10; |
1682 | /// |
1683 | /// let value = some_ptr.compare_and_swap(ptr, other_ptr, Ordering::Relaxed); |
1684 | /// ``` |
1685 | #[inline ] |
1686 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1687 | #[deprecated ( |
1688 | since = "1.50.0" , |
1689 | note = "Use `compare_exchange` or `compare_exchange_weak` instead" |
1690 | )] |
1691 | #[cfg (target_has_atomic = "ptr" )] |
1692 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
1693 | pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T { |
1694 | match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) { |
1695 | Ok(x) => x, |
1696 | Err(x) => x, |
1697 | } |
1698 | } |
1699 | |
1700 | /// Stores a value into the pointer if the current value is the same as the `current` value. |
1701 | /// |
1702 | /// The return value is a result indicating whether the new value was written and containing |
1703 | /// the previous value. On success this value is guaranteed to be equal to `current`. |
1704 | /// |
1705 | /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory |
1706 | /// ordering of this operation. `success` describes the required ordering for the |
1707 | /// read-modify-write operation that takes place if the comparison with `current` succeeds. |
1708 | /// `failure` describes the required ordering for the load operation that takes place when |
1709 | /// the comparison fails. Using [`Acquire`] as success ordering makes the store part |
1710 | /// of this operation [`Relaxed`], and using [`Release`] makes the successful load |
1711 | /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]. |
1712 | /// |
1713 | /// **Note:** This method is only available on platforms that support atomic |
1714 | /// operations on pointers. |
1715 | /// |
1716 | /// # Examples |
1717 | /// |
1718 | /// ``` |
1719 | /// use std::sync::atomic::{AtomicPtr, Ordering}; |
1720 | /// |
1721 | /// let ptr = &mut 5; |
1722 | /// let some_ptr = AtomicPtr::new(ptr); |
1723 | /// |
1724 | /// let other_ptr = &mut 10; |
1725 | /// |
1726 | /// let value = some_ptr.compare_exchange(ptr, other_ptr, |
1727 | /// Ordering::SeqCst, Ordering::Relaxed); |
1728 | /// ``` |
1729 | #[inline ] |
1730 | #[stable (feature = "extended_compare_and_swap" , since = "1.10.0" )] |
1731 | #[cfg (target_has_atomic = "ptr" )] |
1732 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
1733 | pub fn compare_exchange( |
1734 | &self, |
1735 | current: *mut T, |
1736 | new: *mut T, |
1737 | success: Ordering, |
1738 | failure: Ordering, |
1739 | ) -> Result<*mut T, *mut T> { |
1740 | // SAFETY: data races are prevented by atomic intrinsics. |
1741 | unsafe { atomic_compare_exchange(self.p.get(), current, new, success, failure) } |
1742 | } |
1743 | |
1744 | /// Stores a value into the pointer if the current value is the same as the `current` value. |
1745 | /// |
1746 | /// Unlike [`AtomicPtr::compare_exchange`], this function is allowed to spuriously fail even when the |
1747 | /// comparison succeeds, which can result in more efficient code on some platforms. The |
1748 | /// return value is a result indicating whether the new value was written and containing the |
1749 | /// previous value. |
1750 | /// |
1751 | /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory |
1752 | /// ordering of this operation. `success` describes the required ordering for the |
1753 | /// read-modify-write operation that takes place if the comparison with `current` succeeds. |
1754 | /// `failure` describes the required ordering for the load operation that takes place when |
1755 | /// the comparison fails. Using [`Acquire`] as success ordering makes the store part |
1756 | /// of this operation [`Relaxed`], and using [`Release`] makes the successful load |
1757 | /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]. |
1758 | /// |
1759 | /// **Note:** This method is only available on platforms that support atomic |
1760 | /// operations on pointers. |
1761 | /// |
1762 | /// # Examples |
1763 | /// |
1764 | /// ``` |
1765 | /// use std::sync::atomic::{AtomicPtr, Ordering}; |
1766 | /// |
1767 | /// let some_ptr = AtomicPtr::new(&mut 5); |
1768 | /// |
1769 | /// let new = &mut 10; |
1770 | /// let mut old = some_ptr.load(Ordering::Relaxed); |
1771 | /// loop { |
1772 | /// match some_ptr.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) { |
1773 | /// Ok(_) => break, |
1774 | /// Err(x) => old = x, |
1775 | /// } |
1776 | /// } |
1777 | /// ``` |
1778 | #[inline ] |
1779 | #[stable (feature = "extended_compare_and_swap" , since = "1.10.0" )] |
1780 | #[cfg (target_has_atomic = "ptr" )] |
1781 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
1782 | pub fn compare_exchange_weak( |
1783 | &self, |
1784 | current: *mut T, |
1785 | new: *mut T, |
1786 | success: Ordering, |
1787 | failure: Ordering, |
1788 | ) -> Result<*mut T, *mut T> { |
1789 | // SAFETY: This intrinsic is unsafe because it operates on a raw pointer |
1790 | // but we know for sure that the pointer is valid (we just got it from |
1791 | // an `UnsafeCell` that we have by reference) and the atomic operation |
1792 | // itself allows us to safely mutate the `UnsafeCell` contents. |
1793 | unsafe { atomic_compare_exchange_weak(self.p.get(), current, new, success, failure) } |
1794 | } |
1795 | |
1796 | /// Fetches the value, and applies a function to it that returns an optional |
1797 | /// new value. Returns a `Result` of `Ok(previous_value)` if the function |
1798 | /// returned `Some(_)`, else `Err(previous_value)`. |
1799 | /// |
1800 | /// Note: This may call the function multiple times if the value has been |
1801 | /// changed from other threads in the meantime, as long as the function |
1802 | /// returns `Some(_)`, but the function will have been applied only once to |
1803 | /// the stored value. |
1804 | /// |
1805 | /// `fetch_update` takes two [`Ordering`] arguments to describe the memory |
1806 | /// ordering of this operation. The first describes the required ordering for |
1807 | /// when the operation finally succeeds while the second describes the |
1808 | /// required ordering for loads. These correspond to the success and failure |
1809 | /// orderings of [`AtomicPtr::compare_exchange`] respectively. |
1810 | /// |
1811 | /// Using [`Acquire`] as success ordering makes the store part of this |
1812 | /// operation [`Relaxed`], and using [`Release`] makes the final successful |
1813 | /// load [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], |
1814 | /// [`Acquire`] or [`Relaxed`]. |
1815 | /// |
1816 | /// **Note:** This method is only available on platforms that support atomic |
1817 | /// operations on pointers. |
1818 | /// |
1819 | /// # Considerations |
1820 | /// |
1821 | /// This method is not magic; it is not provided by the hardware. |
1822 | /// It is implemented in terms of [`AtomicPtr::compare_exchange_weak`], and suffers from the same drawbacks. |
1823 | /// In particular, this method will not circumvent the [ABA Problem]. |
1824 | /// |
1825 | /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem |
1826 | /// |
1827 | /// # Examples |
1828 | /// |
1829 | /// ```rust |
1830 | /// use std::sync::atomic::{AtomicPtr, Ordering}; |
1831 | /// |
1832 | /// let ptr: *mut _ = &mut 5; |
1833 | /// let some_ptr = AtomicPtr::new(ptr); |
1834 | /// |
1835 | /// let new: *mut _ = &mut 10; |
1836 | /// assert_eq!(some_ptr.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(ptr)); |
1837 | /// let result = some_ptr.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| { |
1838 | /// if x == ptr { |
1839 | /// Some(new) |
1840 | /// } else { |
1841 | /// None |
1842 | /// } |
1843 | /// }); |
1844 | /// assert_eq!(result, Ok(ptr)); |
1845 | /// assert_eq!(some_ptr.load(Ordering::SeqCst), new); |
1846 | /// ``` |
1847 | #[inline ] |
1848 | #[stable (feature = "atomic_fetch_update" , since = "1.53.0" )] |
1849 | #[cfg (target_has_atomic = "ptr" )] |
1850 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
1851 | pub fn fetch_update<F>( |
1852 | &self, |
1853 | set_order: Ordering, |
1854 | fetch_order: Ordering, |
1855 | mut f: F, |
1856 | ) -> Result<*mut T, *mut T> |
1857 | where |
1858 | F: FnMut(*mut T) -> Option<*mut T>, |
1859 | { |
1860 | let mut prev = self.load(fetch_order); |
1861 | while let Some(next) = f(prev) { |
1862 | match self.compare_exchange_weak(prev, next, set_order, fetch_order) { |
1863 | x @ Ok(_) => return x, |
1864 | Err(next_prev) => prev = next_prev, |
1865 | } |
1866 | } |
1867 | Err(prev) |
1868 | } |
1869 | /// Fetches the value, and applies a function to it that returns an optional |
1870 | /// new value. Returns a `Result` of `Ok(previous_value)` if the function |
1871 | /// returned `Some(_)`, else `Err(previous_value)`. |
1872 | /// |
1873 | /// See also: [`update`](`AtomicPtr::update`). |
1874 | /// |
1875 | /// Note: This may call the function multiple times if the value has been |
1876 | /// changed from other threads in the meantime, as long as the function |
1877 | /// returns `Some(_)`, but the function will have been applied only once to |
1878 | /// the stored value. |
1879 | /// |
1880 | /// `try_update` takes two [`Ordering`] arguments to describe the memory |
1881 | /// ordering of this operation. The first describes the required ordering for |
1882 | /// when the operation finally succeeds while the second describes the |
1883 | /// required ordering for loads. These correspond to the success and failure |
1884 | /// orderings of [`AtomicPtr::compare_exchange`] respectively. |
1885 | /// |
1886 | /// Using [`Acquire`] as success ordering makes the store part of this |
1887 | /// operation [`Relaxed`], and using [`Release`] makes the final successful |
1888 | /// load [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], |
1889 | /// [`Acquire`] or [`Relaxed`]. |
1890 | /// |
1891 | /// **Note:** This method is only available on platforms that support atomic |
1892 | /// operations on pointers. |
1893 | /// |
1894 | /// # Considerations |
1895 | /// |
1896 | /// This method is not magic; it is not provided by the hardware. |
1897 | /// It is implemented in terms of [`AtomicPtr::compare_exchange_weak`], and suffers from the same drawbacks. |
1898 | /// In particular, this method will not circumvent the [ABA Problem]. |
1899 | /// |
1900 | /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem |
1901 | /// |
1902 | /// # Examples |
1903 | /// |
1904 | /// ```rust |
1905 | /// #![feature(atomic_try_update)] |
1906 | /// use std::sync::atomic::{AtomicPtr, Ordering}; |
1907 | /// |
1908 | /// let ptr: *mut _ = &mut 5; |
1909 | /// let some_ptr = AtomicPtr::new(ptr); |
1910 | /// |
1911 | /// let new: *mut _ = &mut 10; |
1912 | /// assert_eq!(some_ptr.try_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(ptr)); |
1913 | /// let result = some_ptr.try_update(Ordering::SeqCst, Ordering::SeqCst, |x| { |
1914 | /// if x == ptr { |
1915 | /// Some(new) |
1916 | /// } else { |
1917 | /// None |
1918 | /// } |
1919 | /// }); |
1920 | /// assert_eq!(result, Ok(ptr)); |
1921 | /// assert_eq!(some_ptr.load(Ordering::SeqCst), new); |
1922 | /// ``` |
1923 | #[inline ] |
1924 | #[unstable (feature = "atomic_try_update" , issue = "135894" )] |
1925 | #[cfg (target_has_atomic = "ptr" )] |
1926 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
1927 | pub fn try_update( |
1928 | &self, |
1929 | set_order: Ordering, |
1930 | fetch_order: Ordering, |
1931 | f: impl FnMut(*mut T) -> Option<*mut T>, |
1932 | ) -> Result<*mut T, *mut T> { |
1933 | // FIXME(atomic_try_update): this is currently an unstable alias to `fetch_update`; |
1934 | // when stabilizing, turn `fetch_update` into a deprecated alias to `try_update`. |
1935 | self.fetch_update(set_order, fetch_order, f) |
1936 | } |
1937 | |
1938 | /// Fetches the value, applies a function to it that it return a new value. |
1939 | /// The new value is stored and the old value is returned. |
1940 | /// |
1941 | /// See also: [`try_update`](`AtomicPtr::try_update`). |
1942 | /// |
1943 | /// Note: This may call the function multiple times if the value has been changed from other threads in |
1944 | /// the meantime, but the function will have been applied only once to the stored value. |
1945 | /// |
1946 | /// `update` takes two [`Ordering`] arguments to describe the memory |
1947 | /// ordering of this operation. The first describes the required ordering for |
1948 | /// when the operation finally succeeds while the second describes the |
1949 | /// required ordering for loads. These correspond to the success and failure |
1950 | /// orderings of [`AtomicPtr::compare_exchange`] respectively. |
1951 | /// |
1952 | /// Using [`Acquire`] as success ordering makes the store part |
1953 | /// of this operation [`Relaxed`], and using [`Release`] makes the final successful load |
1954 | /// [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]. |
1955 | /// |
1956 | /// **Note:** This method is only available on platforms that support atomic |
1957 | /// operations on pointers. |
1958 | /// |
1959 | /// # Considerations |
1960 | /// |
1961 | /// This method is not magic; it is not provided by the hardware. |
1962 | /// It is implemented in terms of [`AtomicPtr::compare_exchange_weak`], and suffers from the same drawbacks. |
1963 | /// In particular, this method will not circumvent the [ABA Problem]. |
1964 | /// |
1965 | /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem |
1966 | /// |
1967 | /// # Examples |
1968 | /// |
1969 | /// ```rust |
1970 | /// #![feature(atomic_try_update)] |
1971 | /// |
1972 | /// use std::sync::atomic::{AtomicPtr, Ordering}; |
1973 | /// |
1974 | /// let ptr: *mut _ = &mut 5; |
1975 | /// let some_ptr = AtomicPtr::new(ptr); |
1976 | /// |
1977 | /// let new: *mut _ = &mut 10; |
1978 | /// let result = some_ptr.update(Ordering::SeqCst, Ordering::SeqCst, |_| new); |
1979 | /// assert_eq!(result, ptr); |
1980 | /// assert_eq!(some_ptr.load(Ordering::SeqCst), new); |
1981 | /// ``` |
1982 | #[inline ] |
1983 | #[unstable (feature = "atomic_try_update" , issue = "135894" )] |
1984 | #[cfg (target_has_atomic = "8" )] |
1985 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
1986 | pub fn update( |
1987 | &self, |
1988 | set_order: Ordering, |
1989 | fetch_order: Ordering, |
1990 | mut f: impl FnMut(*mut T) -> *mut T, |
1991 | ) -> *mut T { |
1992 | let mut prev = self.load(fetch_order); |
1993 | loop { |
1994 | match self.compare_exchange_weak(prev, f(prev), set_order, fetch_order) { |
1995 | Ok(x) => break x, |
1996 | Err(next_prev) => prev = next_prev, |
1997 | } |
1998 | } |
1999 | } |
2000 | |
2001 | /// Offsets the pointer's address by adding `val` (in units of `T`), |
2002 | /// returning the previous pointer. |
2003 | /// |
2004 | /// This is equivalent to using [`wrapping_add`] to atomically perform the |
2005 | /// equivalent of `ptr = ptr.wrapping_add(val);`. |
2006 | /// |
2007 | /// This method operates in units of `T`, which means that it cannot be used |
2008 | /// to offset the pointer by an amount which is not a multiple of |
2009 | /// `size_of::<T>()`. This can sometimes be inconvenient, as you may want to |
2010 | /// work with a deliberately misaligned pointer. In such cases, you may use |
2011 | /// the [`fetch_byte_add`](Self::fetch_byte_add) method instead. |
2012 | /// |
2013 | /// `fetch_ptr_add` takes an [`Ordering`] argument which describes the |
2014 | /// memory ordering of this operation. All ordering modes are possible. Note |
2015 | /// that using [`Acquire`] makes the store part of this operation |
2016 | /// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`]. |
2017 | /// |
2018 | /// **Note**: This method is only available on platforms that support atomic |
2019 | /// operations on [`AtomicPtr`]. |
2020 | /// |
2021 | /// [`wrapping_add`]: pointer::wrapping_add |
2022 | /// |
2023 | /// # Examples |
2024 | /// |
2025 | /// ``` |
2026 | /// #![feature(strict_provenance_atomic_ptr)] |
2027 | /// use core::sync::atomic::{AtomicPtr, Ordering}; |
2028 | /// |
2029 | /// let atom = AtomicPtr::<i64>::new(core::ptr::null_mut()); |
2030 | /// assert_eq!(atom.fetch_ptr_add(1, Ordering::Relaxed).addr(), 0); |
2031 | /// // Note: units of `size_of::<i64>()`. |
2032 | /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 8); |
2033 | /// ``` |
2034 | #[inline ] |
2035 | #[cfg (target_has_atomic = "ptr" )] |
2036 | #[unstable (feature = "strict_provenance_atomic_ptr" , issue = "99108" )] |
2037 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
2038 | pub fn fetch_ptr_add(&self, val: usize, order: Ordering) -> *mut T { |
2039 | self.fetch_byte_add(val.wrapping_mul(size_of::<T>()), order) |
2040 | } |
2041 | |
2042 | /// Offsets the pointer's address by subtracting `val` (in units of `T`), |
2043 | /// returning the previous pointer. |
2044 | /// |
2045 | /// This is equivalent to using [`wrapping_sub`] to atomically perform the |
2046 | /// equivalent of `ptr = ptr.wrapping_sub(val);`. |
2047 | /// |
2048 | /// This method operates in units of `T`, which means that it cannot be used |
2049 | /// to offset the pointer by an amount which is not a multiple of |
2050 | /// `size_of::<T>()`. This can sometimes be inconvenient, as you may want to |
2051 | /// work with a deliberately misaligned pointer. In such cases, you may use |
2052 | /// the [`fetch_byte_sub`](Self::fetch_byte_sub) method instead. |
2053 | /// |
2054 | /// `fetch_ptr_sub` takes an [`Ordering`] argument which describes the memory |
2055 | /// ordering of this operation. All ordering modes are possible. Note that |
2056 | /// using [`Acquire`] makes the store part of this operation [`Relaxed`], |
2057 | /// and using [`Release`] makes the load part [`Relaxed`]. |
2058 | /// |
2059 | /// **Note**: This method is only available on platforms that support atomic |
2060 | /// operations on [`AtomicPtr`]. |
2061 | /// |
2062 | /// [`wrapping_sub`]: pointer::wrapping_sub |
2063 | /// |
2064 | /// # Examples |
2065 | /// |
2066 | /// ``` |
2067 | /// #![feature(strict_provenance_atomic_ptr)] |
2068 | /// use core::sync::atomic::{AtomicPtr, Ordering}; |
2069 | /// |
2070 | /// let array = [1i32, 2i32]; |
2071 | /// let atom = AtomicPtr::new(array.as_ptr().wrapping_add(1) as *mut _); |
2072 | /// |
2073 | /// assert!(core::ptr::eq( |
2074 | /// atom.fetch_ptr_sub(1, Ordering::Relaxed), |
2075 | /// &array[1], |
2076 | /// )); |
2077 | /// assert!(core::ptr::eq(atom.load(Ordering::Relaxed), &array[0])); |
2078 | /// ``` |
2079 | #[inline ] |
2080 | #[cfg (target_has_atomic = "ptr" )] |
2081 | #[unstable (feature = "strict_provenance_atomic_ptr" , issue = "99108" )] |
2082 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
2083 | pub fn fetch_ptr_sub(&self, val: usize, order: Ordering) -> *mut T { |
2084 | self.fetch_byte_sub(val.wrapping_mul(size_of::<T>()), order) |
2085 | } |
2086 | |
2087 | /// Offsets the pointer's address by adding `val` *bytes*, returning the |
2088 | /// previous pointer. |
2089 | /// |
2090 | /// This is equivalent to using [`wrapping_byte_add`] to atomically |
2091 | /// perform `ptr = ptr.wrapping_byte_add(val)`. |
2092 | /// |
2093 | /// `fetch_byte_add` takes an [`Ordering`] argument which describes the |
2094 | /// memory ordering of this operation. All ordering modes are possible. Note |
2095 | /// that using [`Acquire`] makes the store part of this operation |
2096 | /// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`]. |
2097 | /// |
2098 | /// **Note**: This method is only available on platforms that support atomic |
2099 | /// operations on [`AtomicPtr`]. |
2100 | /// |
2101 | /// [`wrapping_byte_add`]: pointer::wrapping_byte_add |
2102 | /// |
2103 | /// # Examples |
2104 | /// |
2105 | /// ``` |
2106 | /// #![feature(strict_provenance_atomic_ptr)] |
2107 | /// use core::sync::atomic::{AtomicPtr, Ordering}; |
2108 | /// |
2109 | /// let atom = AtomicPtr::<i64>::new(core::ptr::null_mut()); |
2110 | /// assert_eq!(atom.fetch_byte_add(1, Ordering::Relaxed).addr(), 0); |
2111 | /// // Note: in units of bytes, not `size_of::<i64>()`. |
2112 | /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 1); |
2113 | /// ``` |
2114 | #[inline ] |
2115 | #[cfg (target_has_atomic = "ptr" )] |
2116 | #[unstable (feature = "strict_provenance_atomic_ptr" , issue = "99108" )] |
2117 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
2118 | pub fn fetch_byte_add(&self, val: usize, order: Ordering) -> *mut T { |
2119 | // SAFETY: data races are prevented by atomic intrinsics. |
2120 | unsafe { atomic_add(self.p.get(), core::ptr::without_provenance_mut(val), order).cast() } |
2121 | } |
2122 | |
2123 | /// Offsets the pointer's address by subtracting `val` *bytes*, returning the |
2124 | /// previous pointer. |
2125 | /// |
2126 | /// This is equivalent to using [`wrapping_byte_sub`] to atomically |
2127 | /// perform `ptr = ptr.wrapping_byte_sub(val)`. |
2128 | /// |
2129 | /// `fetch_byte_sub` takes an [`Ordering`] argument which describes the |
2130 | /// memory ordering of this operation. All ordering modes are possible. Note |
2131 | /// that using [`Acquire`] makes the store part of this operation |
2132 | /// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`]. |
2133 | /// |
2134 | /// **Note**: This method is only available on platforms that support atomic |
2135 | /// operations on [`AtomicPtr`]. |
2136 | /// |
2137 | /// [`wrapping_byte_sub`]: pointer::wrapping_byte_sub |
2138 | /// |
2139 | /// # Examples |
2140 | /// |
2141 | /// ``` |
2142 | /// #![feature(strict_provenance_atomic_ptr)] |
2143 | /// use core::sync::atomic::{AtomicPtr, Ordering}; |
2144 | /// |
2145 | /// let atom = AtomicPtr::<i64>::new(core::ptr::without_provenance_mut(1)); |
2146 | /// assert_eq!(atom.fetch_byte_sub(1, Ordering::Relaxed).addr(), 1); |
2147 | /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 0); |
2148 | /// ``` |
2149 | #[inline ] |
2150 | #[cfg (target_has_atomic = "ptr" )] |
2151 | #[unstable (feature = "strict_provenance_atomic_ptr" , issue = "99108" )] |
2152 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
2153 | pub fn fetch_byte_sub(&self, val: usize, order: Ordering) -> *mut T { |
2154 | // SAFETY: data races are prevented by atomic intrinsics. |
2155 | unsafe { atomic_sub(self.p.get(), core::ptr::without_provenance_mut(val), order).cast() } |
2156 | } |
2157 | |
2158 | /// Performs a bitwise "or" operation on the address of the current pointer, |
2159 | /// and the argument `val`, and stores a pointer with provenance of the |
2160 | /// current pointer and the resulting address. |
2161 | /// |
2162 | /// This is equivalent to using [`map_addr`] to atomically perform |
2163 | /// `ptr = ptr.map_addr(|a| a | val)`. This can be used in tagged |
2164 | /// pointer schemes to atomically set tag bits. |
2165 | /// |
2166 | /// **Caveat**: This operation returns the previous value. To compute the |
2167 | /// stored value without losing provenance, you may use [`map_addr`]. For |
2168 | /// example: `a.fetch_or(val).map_addr(|a| a | val)`. |
2169 | /// |
2170 | /// `fetch_or` takes an [`Ordering`] argument which describes the memory |
2171 | /// ordering of this operation. All ordering modes are possible. Note that |
2172 | /// using [`Acquire`] makes the store part of this operation [`Relaxed`], |
2173 | /// and using [`Release`] makes the load part [`Relaxed`]. |
2174 | /// |
2175 | /// **Note**: This method is only available on platforms that support atomic |
2176 | /// operations on [`AtomicPtr`]. |
2177 | /// |
2178 | /// This API and its claimed semantics are part of the Strict Provenance |
2179 | /// experiment, see the [module documentation for `ptr`][crate::ptr] for |
2180 | /// details. |
2181 | /// |
2182 | /// [`map_addr`]: pointer::map_addr |
2183 | /// |
2184 | /// # Examples |
2185 | /// |
2186 | /// ``` |
2187 | /// #![feature(strict_provenance_atomic_ptr)] |
2188 | /// use core::sync::atomic::{AtomicPtr, Ordering}; |
2189 | /// |
2190 | /// let pointer = &mut 3i64 as *mut i64; |
2191 | /// |
2192 | /// let atom = AtomicPtr::<i64>::new(pointer); |
2193 | /// // Tag the bottom bit of the pointer. |
2194 | /// assert_eq!(atom.fetch_or(1, Ordering::Relaxed).addr() & 1, 0); |
2195 | /// // Extract and untag. |
2196 | /// let tagged = atom.load(Ordering::Relaxed); |
2197 | /// assert_eq!(tagged.addr() & 1, 1); |
2198 | /// assert_eq!(tagged.map_addr(|p| p & !1), pointer); |
2199 | /// ``` |
2200 | #[inline ] |
2201 | #[cfg (target_has_atomic = "ptr" )] |
2202 | #[unstable (feature = "strict_provenance_atomic_ptr" , issue = "99108" )] |
2203 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
2204 | pub fn fetch_or(&self, val: usize, order: Ordering) -> *mut T { |
2205 | // SAFETY: data races are prevented by atomic intrinsics. |
2206 | unsafe { atomic_or(self.p.get(), core::ptr::without_provenance_mut(val), order).cast() } |
2207 | } |
2208 | |
2209 | /// Performs a bitwise "and" operation on the address of the current |
2210 | /// pointer, and the argument `val`, and stores a pointer with provenance of |
2211 | /// the current pointer and the resulting address. |
2212 | /// |
2213 | /// This is equivalent to using [`map_addr`] to atomically perform |
2214 | /// `ptr = ptr.map_addr(|a| a & val)`. This can be used in tagged |
2215 | /// pointer schemes to atomically unset tag bits. |
2216 | /// |
2217 | /// **Caveat**: This operation returns the previous value. To compute the |
2218 | /// stored value without losing provenance, you may use [`map_addr`]. For |
2219 | /// example: `a.fetch_and(val).map_addr(|a| a & val)`. |
2220 | /// |
2221 | /// `fetch_and` takes an [`Ordering`] argument which describes the memory |
2222 | /// ordering of this operation. All ordering modes are possible. Note that |
2223 | /// using [`Acquire`] makes the store part of this operation [`Relaxed`], |
2224 | /// and using [`Release`] makes the load part [`Relaxed`]. |
2225 | /// |
2226 | /// **Note**: This method is only available on platforms that support atomic |
2227 | /// operations on [`AtomicPtr`]. |
2228 | /// |
2229 | /// This API and its claimed semantics are part of the Strict Provenance |
2230 | /// experiment, see the [module documentation for `ptr`][crate::ptr] for |
2231 | /// details. |
2232 | /// |
2233 | /// [`map_addr`]: pointer::map_addr |
2234 | /// |
2235 | /// # Examples |
2236 | /// |
2237 | /// ``` |
2238 | /// #![feature(strict_provenance_atomic_ptr)] |
2239 | /// use core::sync::atomic::{AtomicPtr, Ordering}; |
2240 | /// |
2241 | /// let pointer = &mut 3i64 as *mut i64; |
2242 | /// // A tagged pointer |
2243 | /// let atom = AtomicPtr::<i64>::new(pointer.map_addr(|a| a | 1)); |
2244 | /// assert_eq!(atom.fetch_or(1, Ordering::Relaxed).addr() & 1, 1); |
2245 | /// // Untag, and extract the previously tagged pointer. |
2246 | /// let untagged = atom.fetch_and(!1, Ordering::Relaxed) |
2247 | /// .map_addr(|a| a & !1); |
2248 | /// assert_eq!(untagged, pointer); |
2249 | /// ``` |
2250 | #[inline ] |
2251 | #[cfg (target_has_atomic = "ptr" )] |
2252 | #[unstable (feature = "strict_provenance_atomic_ptr" , issue = "99108" )] |
2253 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
2254 | pub fn fetch_and(&self, val: usize, order: Ordering) -> *mut T { |
2255 | // SAFETY: data races are prevented by atomic intrinsics. |
2256 | unsafe { atomic_and(self.p.get(), core::ptr::without_provenance_mut(val), order).cast() } |
2257 | } |
2258 | |
2259 | /// Performs a bitwise "xor" operation on the address of the current |
2260 | /// pointer, and the argument `val`, and stores a pointer with provenance of |
2261 | /// the current pointer and the resulting address. |
2262 | /// |
2263 | /// This is equivalent to using [`map_addr`] to atomically perform |
2264 | /// `ptr = ptr.map_addr(|a| a ^ val)`. This can be used in tagged |
2265 | /// pointer schemes to atomically toggle tag bits. |
2266 | /// |
2267 | /// **Caveat**: This operation returns the previous value. To compute the |
2268 | /// stored value without losing provenance, you may use [`map_addr`]. For |
2269 | /// example: `a.fetch_xor(val).map_addr(|a| a ^ val)`. |
2270 | /// |
2271 | /// `fetch_xor` takes an [`Ordering`] argument which describes the memory |
2272 | /// ordering of this operation. All ordering modes are possible. Note that |
2273 | /// using [`Acquire`] makes the store part of this operation [`Relaxed`], |
2274 | /// and using [`Release`] makes the load part [`Relaxed`]. |
2275 | /// |
2276 | /// **Note**: This method is only available on platforms that support atomic |
2277 | /// operations on [`AtomicPtr`]. |
2278 | /// |
2279 | /// This API and its claimed semantics are part of the Strict Provenance |
2280 | /// experiment, see the [module documentation for `ptr`][crate::ptr] for |
2281 | /// details. |
2282 | /// |
2283 | /// [`map_addr`]: pointer::map_addr |
2284 | /// |
2285 | /// # Examples |
2286 | /// |
2287 | /// ``` |
2288 | /// #![feature(strict_provenance_atomic_ptr)] |
2289 | /// use core::sync::atomic::{AtomicPtr, Ordering}; |
2290 | /// |
2291 | /// let pointer = &mut 3i64 as *mut i64; |
2292 | /// let atom = AtomicPtr::<i64>::new(pointer); |
2293 | /// |
2294 | /// // Toggle a tag bit on the pointer. |
2295 | /// atom.fetch_xor(1, Ordering::Relaxed); |
2296 | /// assert_eq!(atom.load(Ordering::Relaxed).addr() & 1, 1); |
2297 | /// ``` |
2298 | #[inline ] |
2299 | #[cfg (target_has_atomic = "ptr" )] |
2300 | #[unstable (feature = "strict_provenance_atomic_ptr" , issue = "99108" )] |
2301 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
2302 | pub fn fetch_xor(&self, val: usize, order: Ordering) -> *mut T { |
2303 | // SAFETY: data races are prevented by atomic intrinsics. |
2304 | unsafe { atomic_xor(self.p.get(), core::ptr::without_provenance_mut(val), order).cast() } |
2305 | } |
2306 | |
2307 | /// Returns a mutable pointer to the underlying pointer. |
2308 | /// |
2309 | /// Doing non-atomic reads and writes on the resulting pointer can be a data race. |
2310 | /// This method is mostly useful for FFI, where the function signature may use |
2311 | /// `*mut *mut T` instead of `&AtomicPtr<T>`. |
2312 | /// |
2313 | /// Returning an `*mut` pointer from a shared reference to this atomic is safe because the |
2314 | /// atomic types work with interior mutability. All modifications of an atomic change the value |
2315 | /// through a shared reference, and can do so safely as long as they use atomic operations. Any |
2316 | /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the same |
2317 | /// restriction: operations on it must be atomic. |
2318 | /// |
2319 | /// # Examples |
2320 | /// |
2321 | /// ```ignore (extern-declaration) |
2322 | /// use std::sync::atomic::AtomicPtr; |
2323 | /// |
2324 | /// extern "C" { |
2325 | /// fn my_atomic_op(arg: *mut *mut u32); |
2326 | /// } |
2327 | /// |
2328 | /// let mut value = 17; |
2329 | /// let atomic = AtomicPtr::new(&mut value); |
2330 | /// |
2331 | /// // SAFETY: Safe as long as `my_atomic_op` is atomic. |
2332 | /// unsafe { |
2333 | /// my_atomic_op(atomic.as_ptr()); |
2334 | /// } |
2335 | /// ``` |
2336 | #[inline ] |
2337 | #[stable (feature = "atomic_as_ptr" , since = "1.70.0" )] |
2338 | #[rustc_const_stable (feature = "atomic_as_ptr" , since = "1.70.0" )] |
2339 | #[rustc_never_returns_null_ptr ] |
2340 | pub const fn as_ptr(&self) -> *mut *mut T { |
2341 | self.p.get() |
2342 | } |
2343 | } |
2344 | |
2345 | #[cfg (target_has_atomic_load_store = "8" )] |
2346 | #[stable (feature = "atomic_bool_from" , since = "1.24.0" )] |
2347 | impl From<bool> for AtomicBool { |
2348 | /// Converts a `bool` into an `AtomicBool`. |
2349 | /// |
2350 | /// # Examples |
2351 | /// |
2352 | /// ``` |
2353 | /// use std::sync::atomic::AtomicBool; |
2354 | /// let atomic_bool = AtomicBool::from(true); |
2355 | /// assert_eq!(format!("{atomic_bool:?}" ), "true" ) |
2356 | /// ``` |
2357 | #[inline ] |
2358 | fn from(b: bool) -> Self { |
2359 | Self::new(b) |
2360 | } |
2361 | } |
2362 | |
2363 | #[cfg (target_has_atomic_load_store = "ptr" )] |
2364 | #[stable (feature = "atomic_from" , since = "1.23.0" )] |
2365 | impl<T> From<*mut T> for AtomicPtr<T> { |
2366 | /// Converts a `*mut T` into an `AtomicPtr<T>`. |
2367 | #[inline ] |
2368 | fn from(p: *mut T) -> Self { |
2369 | Self::new(p) |
2370 | } |
2371 | } |
2372 | |
2373 | #[allow (unused_macros)] // This macro ends up being unused on some architectures. |
2374 | macro_rules! if_8_bit { |
2375 | (u8, $( yes = [$($yes:tt)*], )? $( no = [$($no:tt)*], )? ) => { concat!("" , $($($yes)*)?) }; |
2376 | (i8, $( yes = [$($yes:tt)*], )? $( no = [$($no:tt)*], )? ) => { concat!("" , $($($yes)*)?) }; |
2377 | ($_:ident, $( yes = [$($yes:tt)*], )? $( no = [$($no:tt)*], )? ) => { concat!("" , $($($no)*)?) }; |
2378 | } |
2379 | |
2380 | #[cfg (target_has_atomic_load_store)] |
2381 | macro_rules! atomic_int { |
2382 | ($cfg_cas:meta, |
2383 | $cfg_align:meta, |
2384 | $stable:meta, |
2385 | $stable_cxchg:meta, |
2386 | $stable_debug:meta, |
2387 | $stable_access:meta, |
2388 | $stable_from:meta, |
2389 | $stable_nand:meta, |
2390 | $const_stable_new:meta, |
2391 | $const_stable_into_inner:meta, |
2392 | $diagnostic_item:meta, |
2393 | $s_int_type:literal, |
2394 | $extra_feature:expr, |
2395 | $min_fn:ident, $max_fn:ident, |
2396 | $align:expr, |
2397 | $int_type:ident $atomic_type:ident) => { |
2398 | /// An integer type which can be safely shared between threads. |
2399 | /// |
2400 | /// This type has the same |
2401 | #[doc = if_8_bit!( |
2402 | $int_type, |
2403 | yes = ["size, alignment, and bit validity" ], |
2404 | no = ["size and bit validity" ], |
2405 | )] |
2406 | /// as the underlying integer type, [` |
2407 | #[doc = $s_int_type] |
2408 | /// `]. |
2409 | #[doc = if_8_bit! { |
2410 | $int_type, |
2411 | no = [ |
2412 | "However, the alignment of this type is always equal to its " , |
2413 | "size, even on targets where [`" , $s_int_type, "`] has a " , |
2414 | "lesser alignment." |
2415 | ], |
2416 | }] |
2417 | /// |
2418 | /// For more about the differences between atomic types and |
2419 | /// non-atomic types as well as information about the portability of |
2420 | /// this type, please see the [module-level documentation]. |
2421 | /// |
2422 | /// **Note:** This type is only available on platforms that support |
2423 | /// atomic loads and stores of [` |
2424 | #[doc = $s_int_type] |
2425 | /// `]. |
2426 | /// |
2427 | /// [module-level documentation]: crate::sync::atomic |
2428 | #[$stable] |
2429 | #[$diagnostic_item] |
2430 | #[repr(C, align($align))] |
2431 | pub struct $atomic_type { |
2432 | v: UnsafeCell<$int_type>, |
2433 | } |
2434 | |
2435 | #[$stable] |
2436 | impl Default for $atomic_type { |
2437 | #[inline] |
2438 | fn default() -> Self { |
2439 | Self::new(Default::default()) |
2440 | } |
2441 | } |
2442 | |
2443 | #[$stable_from] |
2444 | impl From<$int_type> for $atomic_type { |
2445 | #[doc = concat!("Converts an `" , stringify!($int_type), "` into an `" , stringify!($atomic_type), "`." )] |
2446 | #[inline] |
2447 | fn from(v: $int_type) -> Self { Self::new(v) } |
2448 | } |
2449 | |
2450 | #[$stable_debug] |
2451 | impl fmt::Debug for $atomic_type { |
2452 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
2453 | fmt::Debug::fmt(&self.load(Ordering::Relaxed), f) |
2454 | } |
2455 | } |
2456 | |
2457 | // Send is implicitly implemented. |
2458 | #[$stable] |
2459 | unsafe impl Sync for $atomic_type {} |
2460 | |
2461 | impl $atomic_type { |
2462 | /// Creates a new atomic integer. |
2463 | /// |
2464 | /// # Examples |
2465 | /// |
2466 | /// ``` |
2467 | #[doc = concat!($extra_feature, "use std::sync::atomic::" , stringify!($atomic_type), ";" )] |
2468 | /// |
2469 | #[doc = concat!("let atomic_forty_two = " , stringify!($atomic_type), "::new(42);" )] |
2470 | /// ``` |
2471 | #[inline] |
2472 | #[$stable] |
2473 | #[$const_stable_new] |
2474 | #[must_use] |
2475 | pub const fn new(v: $int_type) -> Self { |
2476 | Self {v: UnsafeCell::new(v)} |
2477 | } |
2478 | |
2479 | /// Creates a new reference to an atomic integer from a pointer. |
2480 | /// |
2481 | /// # Examples |
2482 | /// |
2483 | /// ``` |
2484 | #[doc = concat!($extra_feature, "use std::sync::atomic::{self, " , stringify!($atomic_type), "};" )] |
2485 | /// |
2486 | /// // Get a pointer to an allocated value |
2487 | #[doc = concat!("let ptr: *mut " , stringify!($int_type), " = Box::into_raw(Box::new(0));" )] |
2488 | /// |
2489 | #[doc = concat!("assert!(ptr.cast::<" , stringify!($atomic_type), ">().is_aligned());" )] |
2490 | /// |
2491 | /// { |
2492 | /// // Create an atomic view of the allocated value |
2493 | // SAFETY: this is a doc comment, tidy, it can't hurt you (also guaranteed by the construction of `ptr` and the assert above) |
2494 | #[doc = concat!(" let atomic = unsafe {" , stringify!($atomic_type), "::from_ptr(ptr) };" )] |
2495 | /// |
2496 | /// // Use `atomic` for atomic operations, possibly share it with other threads |
2497 | /// atomic.store(1, atomic::Ordering::Relaxed); |
2498 | /// } |
2499 | /// |
2500 | /// // It's ok to non-atomically access the value behind `ptr`, |
2501 | /// // since the reference to the atomic ended its lifetime in the block above |
2502 | /// assert_eq!(unsafe { *ptr }, 1); |
2503 | /// |
2504 | /// // Deallocate the value |
2505 | /// unsafe { drop(Box::from_raw(ptr)) } |
2506 | /// ``` |
2507 | /// |
2508 | /// # Safety |
2509 | /// |
2510 | /// * `ptr` must be aligned to |
2511 | #[doc = concat!(" `align_of::<" , stringify!($atomic_type), ">()`" )] |
2512 | #[doc = if_8_bit!{ |
2513 | $int_type, |
2514 | yes = [ |
2515 | " (note that this is always true, since `align_of::<" , |
2516 | stringify!($atomic_type), ">() == 1`)." |
2517 | ], |
2518 | no = [ |
2519 | " (note that on some platforms this can be bigger than `align_of::<" , |
2520 | stringify!($int_type), ">()`)." |
2521 | ], |
2522 | }] |
2523 | /// * `ptr` must be [valid] for both reads and writes for the whole lifetime `'a`. |
2524 | /// * You must adhere to the [Memory model for atomic accesses]. In particular, it is not |
2525 | /// allowed to mix atomic and non-atomic accesses, or atomic accesses of different sizes, |
2526 | /// without synchronization. |
2527 | /// |
2528 | /// [valid]: crate::ptr#safety |
2529 | /// [Memory model for atomic accesses]: self#memory-model-for-atomic-accesses |
2530 | #[inline] |
2531 | #[stable(feature = "atomic_from_ptr" , since = "1.75.0" )] |
2532 | #[rustc_const_stable(feature = "const_atomic_from_ptr" , since = "1.84.0" )] |
2533 | pub const unsafe fn from_ptr<'a>(ptr: *mut $int_type) -> &'a $atomic_type { |
2534 | // SAFETY: guaranteed by the caller |
2535 | unsafe { &*ptr.cast() } |
2536 | } |
2537 | |
2538 | |
2539 | /// Returns a mutable reference to the underlying integer. |
2540 | /// |
2541 | /// This is safe because the mutable reference guarantees that no other threads are |
2542 | /// concurrently accessing the atomic data. |
2543 | /// |
2544 | /// # Examples |
2545 | /// |
2546 | /// ``` |
2547 | #[doc = concat!($extra_feature, "use std::sync::atomic::{" , stringify!($atomic_type), ", Ordering};" )] |
2548 | /// |
2549 | #[doc = concat!("let mut some_var = " , stringify!($atomic_type), "::new(10);" )] |
2550 | /// assert_eq!(*some_var.get_mut(), 10); |
2551 | /// *some_var.get_mut() = 5; |
2552 | /// assert_eq!(some_var.load(Ordering::SeqCst), 5); |
2553 | /// ``` |
2554 | #[inline] |
2555 | #[$stable_access] |
2556 | pub fn get_mut(&mut self) -> &mut $int_type { |
2557 | self.v.get_mut() |
2558 | } |
2559 | |
2560 | #[doc = concat!("Get atomic access to a `&mut " , stringify!($int_type), "`." )] |
2561 | /// |
2562 | #[doc = if_8_bit! { |
2563 | $int_type, |
2564 | no = [ |
2565 | "**Note:** This function is only available on targets where `" , |
2566 | stringify!($atomic_type), "` has the same alignment as `" , stringify!($int_type), "`." |
2567 | ], |
2568 | }] |
2569 | /// |
2570 | /// # Examples |
2571 | /// |
2572 | /// ``` |
2573 | /// #![feature(atomic_from_mut)] |
2574 | #[doc = concat!($extra_feature, "use std::sync::atomic::{" , stringify!($atomic_type), ", Ordering};" )] |
2575 | /// |
2576 | /// let mut some_int = 123; |
2577 | #[doc = concat!("let a = " , stringify!($atomic_type), "::from_mut(&mut some_int);" )] |
2578 | /// a.store(100, Ordering::Relaxed); |
2579 | /// assert_eq!(some_int, 100); |
2580 | /// ``` |
2581 | /// |
2582 | #[inline] |
2583 | #[$cfg_align] |
2584 | #[unstable(feature = "atomic_from_mut" , issue = "76314" )] |
2585 | pub fn from_mut(v: &mut $int_type) -> &mut Self { |
2586 | let [] = [(); align_of::<Self>() - align_of::<$int_type>()]; |
2587 | // SAFETY: |
2588 | // - the mutable reference guarantees unique ownership. |
2589 | // - the alignment of `$int_type` and `Self` is the |
2590 | // same, as promised by $cfg_align and verified above. |
2591 | unsafe { &mut *(v as *mut $int_type as *mut Self) } |
2592 | } |
2593 | |
2594 | #[doc = concat!("Get non-atomic access to a `&mut [" , stringify!($atomic_type), "]` slice" )] |
2595 | /// |
2596 | /// This is safe because the mutable reference guarantees that no other threads are |
2597 | /// concurrently accessing the atomic data. |
2598 | /// |
2599 | /// # Examples |
2600 | /// |
2601 | /// ``` |
2602 | /// #![feature(atomic_from_mut)] |
2603 | #[doc = concat!($extra_feature, "use std::sync::atomic::{" , stringify!($atomic_type), ", Ordering};" )] |
2604 | /// |
2605 | #[doc = concat!("let mut some_ints = [const { " , stringify!($atomic_type), "::new(0) }; 10];" )] |
2606 | /// |
2607 | #[doc = concat!("let view: &mut [" , stringify!($int_type), "] = " , stringify!($atomic_type), "::get_mut_slice(&mut some_ints);" )] |
2608 | /// assert_eq!(view, [0; 10]); |
2609 | /// view |
2610 | /// .iter_mut() |
2611 | /// .enumerate() |
2612 | /// .for_each(|(idx, int)| *int = idx as _); |
2613 | /// |
2614 | /// std::thread::scope(|s| { |
2615 | /// some_ints |
2616 | /// .iter() |
2617 | /// .enumerate() |
2618 | /// .for_each(|(idx, int)| { |
2619 | /// s.spawn(move || assert_eq!(int.load(Ordering::Relaxed), idx as _)); |
2620 | /// }) |
2621 | /// }); |
2622 | /// ``` |
2623 | #[inline] |
2624 | #[unstable(feature = "atomic_from_mut" , issue = "76314" )] |
2625 | pub fn get_mut_slice(this: &mut [Self]) -> &mut [$int_type] { |
2626 | // SAFETY: the mutable reference guarantees unique ownership. |
2627 | unsafe { &mut *(this as *mut [Self] as *mut [$int_type]) } |
2628 | } |
2629 | |
2630 | #[doc = concat!("Get atomic access to a `&mut [" , stringify!($int_type), "]` slice." )] |
2631 | /// |
2632 | /// # Examples |
2633 | /// |
2634 | /// ``` |
2635 | /// #![feature(atomic_from_mut)] |
2636 | #[doc = concat!($extra_feature, "use std::sync::atomic::{" , stringify!($atomic_type), ", Ordering};" )] |
2637 | /// |
2638 | /// let mut some_ints = [0; 10]; |
2639 | #[doc = concat!("let a = &*" , stringify!($atomic_type), "::from_mut_slice(&mut some_ints);" )] |
2640 | /// std::thread::scope(|s| { |
2641 | /// for i in 0..a.len() { |
2642 | /// s.spawn(move || a[i].store(i as _, Ordering::Relaxed)); |
2643 | /// } |
2644 | /// }); |
2645 | /// for (i, n) in some_ints.into_iter().enumerate() { |
2646 | /// assert_eq!(i, n as usize); |
2647 | /// } |
2648 | /// ``` |
2649 | #[inline] |
2650 | #[$cfg_align] |
2651 | #[unstable(feature = "atomic_from_mut" , issue = "76314" )] |
2652 | pub fn from_mut_slice(v: &mut [$int_type]) -> &mut [Self] { |
2653 | let [] = [(); align_of::<Self>() - align_of::<$int_type>()]; |
2654 | // SAFETY: |
2655 | // - the mutable reference guarantees unique ownership. |
2656 | // - the alignment of `$int_type` and `Self` is the |
2657 | // same, as promised by $cfg_align and verified above. |
2658 | unsafe { &mut *(v as *mut [$int_type] as *mut [Self]) } |
2659 | } |
2660 | |
2661 | /// Consumes the atomic and returns the contained value. |
2662 | /// |
2663 | /// This is safe because passing `self` by value guarantees that no other threads are |
2664 | /// concurrently accessing the atomic data. |
2665 | /// |
2666 | /// # Examples |
2667 | /// |
2668 | /// ``` |
2669 | #[doc = concat!($extra_feature, "use std::sync::atomic::" , stringify!($atomic_type), ";" )] |
2670 | /// |
2671 | #[doc = concat!("let some_var = " , stringify!($atomic_type), "::new(5);" )] |
2672 | /// assert_eq!(some_var.into_inner(), 5); |
2673 | /// ``` |
2674 | #[inline] |
2675 | #[$stable_access] |
2676 | #[$const_stable_into_inner] |
2677 | pub const fn into_inner(self) -> $int_type { |
2678 | self.v.into_inner() |
2679 | } |
2680 | |
2681 | /// Loads a value from the atomic integer. |
2682 | /// |
2683 | /// `load` takes an [`Ordering`] argument which describes the memory ordering of this operation. |
2684 | /// Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`]. |
2685 | /// |
2686 | /// # Panics |
2687 | /// |
2688 | /// Panics if `order` is [`Release`] or [`AcqRel`]. |
2689 | /// |
2690 | /// # Examples |
2691 | /// |
2692 | /// ``` |
2693 | #[doc = concat!($extra_feature, "use std::sync::atomic::{" , stringify!($atomic_type), ", Ordering};" )] |
2694 | /// |
2695 | #[doc = concat!("let some_var = " , stringify!($atomic_type), "::new(5);" )] |
2696 | /// |
2697 | /// assert_eq!(some_var.load(Ordering::Relaxed), 5); |
2698 | /// ``` |
2699 | #[inline] |
2700 | #[$stable] |
2701 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
2702 | pub fn load(&self, order: Ordering) -> $int_type { |
2703 | // SAFETY: data races are prevented by atomic intrinsics. |
2704 | unsafe { atomic_load(self.v.get(), order) } |
2705 | } |
2706 | |
2707 | /// Stores a value into the atomic integer. |
2708 | /// |
2709 | /// `store` takes an [`Ordering`] argument which describes the memory ordering of this operation. |
2710 | /// Possible values are [`SeqCst`], [`Release`] and [`Relaxed`]. |
2711 | /// |
2712 | /// # Panics |
2713 | /// |
2714 | /// Panics if `order` is [`Acquire`] or [`AcqRel`]. |
2715 | /// |
2716 | /// # Examples |
2717 | /// |
2718 | /// ``` |
2719 | #[doc = concat!($extra_feature, "use std::sync::atomic::{" , stringify!($atomic_type), ", Ordering};" )] |
2720 | /// |
2721 | #[doc = concat!("let some_var = " , stringify!($atomic_type), "::new(5);" )] |
2722 | /// |
2723 | /// some_var.store(10, Ordering::Relaxed); |
2724 | /// assert_eq!(some_var.load(Ordering::Relaxed), 10); |
2725 | /// ``` |
2726 | #[inline] |
2727 | #[$stable] |
2728 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
2729 | pub fn store(&self, val: $int_type, order: Ordering) { |
2730 | // SAFETY: data races are prevented by atomic intrinsics. |
2731 | unsafe { atomic_store(self.v.get(), val, order); } |
2732 | } |
2733 | |
2734 | /// Stores a value into the atomic integer, returning the previous value. |
2735 | /// |
2736 | /// `swap` takes an [`Ordering`] argument which describes the memory ordering |
2737 | /// of this operation. All ordering modes are possible. Note that using |
2738 | /// [`Acquire`] makes the store part of this operation [`Relaxed`], and |
2739 | /// using [`Release`] makes the load part [`Relaxed`]. |
2740 | /// |
2741 | /// **Note**: This method is only available on platforms that support atomic operations on |
2742 | #[doc = concat!("[`" , $s_int_type, "`]." )] |
2743 | /// |
2744 | /// # Examples |
2745 | /// |
2746 | /// ``` |
2747 | #[doc = concat!($extra_feature, "use std::sync::atomic::{" , stringify!($atomic_type), ", Ordering};" )] |
2748 | /// |
2749 | #[doc = concat!("let some_var = " , stringify!($atomic_type), "::new(5);" )] |
2750 | /// |
2751 | /// assert_eq!(some_var.swap(10, Ordering::Relaxed), 5); |
2752 | /// ``` |
2753 | #[inline] |
2754 | #[$stable] |
2755 | #[$cfg_cas] |
2756 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
2757 | pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type { |
2758 | // SAFETY: data races are prevented by atomic intrinsics. |
2759 | unsafe { atomic_swap(self.v.get(), val, order) } |
2760 | } |
2761 | |
2762 | /// Stores a value into the atomic integer if the current value is the same as |
2763 | /// the `current` value. |
2764 | /// |
2765 | /// The return value is always the previous value. If it is equal to `current`, then the |
2766 | /// value was updated. |
2767 | /// |
2768 | /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory |
2769 | /// ordering of this operation. Notice that even when using [`AcqRel`], the operation |
2770 | /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics. |
2771 | /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it |
2772 | /// happens, and using [`Release`] makes the load part [`Relaxed`]. |
2773 | /// |
2774 | /// **Note**: This method is only available on platforms that support atomic operations on |
2775 | #[doc = concat!("[`" , $s_int_type, "`]." )] |
2776 | /// |
2777 | /// # Migrating to `compare_exchange` and `compare_exchange_weak` |
2778 | /// |
2779 | /// `compare_and_swap` is equivalent to `compare_exchange` with the following mapping for |
2780 | /// memory orderings: |
2781 | /// |
2782 | /// Original | Success | Failure |
2783 | /// -------- | ------- | ------- |
2784 | /// Relaxed | Relaxed | Relaxed |
2785 | /// Acquire | Acquire | Acquire |
2786 | /// Release | Release | Relaxed |
2787 | /// AcqRel | AcqRel | Acquire |
2788 | /// SeqCst | SeqCst | SeqCst |
2789 | /// |
2790 | /// `compare_and_swap` and `compare_exchange` also differ in their return type. You can use |
2791 | /// `compare_exchange(...).unwrap_or_else(|x| x)` to recover the behavior of `compare_and_swap`, |
2792 | /// but in most cases it is more idiomatic to check whether the return value is `Ok` or `Err` |
2793 | /// rather than to infer success vs failure based on the value that was read. |
2794 | /// |
2795 | /// During migration, consider whether it makes sense to use `compare_exchange_weak` instead. |
2796 | /// `compare_exchange_weak` is allowed to fail spuriously even when the comparison succeeds, |
2797 | /// which allows the compiler to generate better assembly code when the compare and swap |
2798 | /// is used in a loop. |
2799 | /// |
2800 | /// # Examples |
2801 | /// |
2802 | /// ``` |
2803 | #[doc = concat!($extra_feature, "use std::sync::atomic::{" , stringify!($atomic_type), ", Ordering};" )] |
2804 | /// |
2805 | #[doc = concat!("let some_var = " , stringify!($atomic_type), "::new(5);" )] |
2806 | /// |
2807 | /// assert_eq!(some_var.compare_and_swap(5, 10, Ordering::Relaxed), 5); |
2808 | /// assert_eq!(some_var.load(Ordering::Relaxed), 10); |
2809 | /// |
2810 | /// assert_eq!(some_var.compare_and_swap(6, 12, Ordering::Relaxed), 10); |
2811 | /// assert_eq!(some_var.load(Ordering::Relaxed), 10); |
2812 | /// ``` |
2813 | #[inline] |
2814 | #[$stable] |
2815 | #[deprecated( |
2816 | since = "1.50.0" , |
2817 | note = "Use `compare_exchange` or `compare_exchange_weak` instead" ) |
2818 | ] |
2819 | #[$cfg_cas] |
2820 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
2821 | pub fn compare_and_swap(&self, |
2822 | current: $int_type, |
2823 | new: $int_type, |
2824 | order: Ordering) -> $int_type { |
2825 | match self.compare_exchange(current, |
2826 | new, |
2827 | order, |
2828 | strongest_failure_ordering(order)) { |
2829 | Ok(x) => x, |
2830 | Err(x) => x, |
2831 | } |
2832 | } |
2833 | |
2834 | /// Stores a value into the atomic integer if the current value is the same as |
2835 | /// the `current` value. |
2836 | /// |
2837 | /// The return value is a result indicating whether the new value was written and |
2838 | /// containing the previous value. On success this value is guaranteed to be equal to |
2839 | /// `current`. |
2840 | /// |
2841 | /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory |
2842 | /// ordering of this operation. `success` describes the required ordering for the |
2843 | /// read-modify-write operation that takes place if the comparison with `current` succeeds. |
2844 | /// `failure` describes the required ordering for the load operation that takes place when |
2845 | /// the comparison fails. Using [`Acquire`] as success ordering makes the store part |
2846 | /// of this operation [`Relaxed`], and using [`Release`] makes the successful load |
2847 | /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]. |
2848 | /// |
2849 | /// **Note**: This method is only available on platforms that support atomic operations on |
2850 | #[doc = concat!("[`" , $s_int_type, "`]." )] |
2851 | /// |
2852 | /// # Examples |
2853 | /// |
2854 | /// ``` |
2855 | #[doc = concat!($extra_feature, "use std::sync::atomic::{" , stringify!($atomic_type), ", Ordering};" )] |
2856 | /// |
2857 | #[doc = concat!("let some_var = " , stringify!($atomic_type), "::new(5);" )] |
2858 | /// |
2859 | /// assert_eq!(some_var.compare_exchange(5, 10, |
2860 | /// Ordering::Acquire, |
2861 | /// Ordering::Relaxed), |
2862 | /// Ok(5)); |
2863 | /// assert_eq!(some_var.load(Ordering::Relaxed), 10); |
2864 | /// |
2865 | /// assert_eq!(some_var.compare_exchange(6, 12, |
2866 | /// Ordering::SeqCst, |
2867 | /// Ordering::Acquire), |
2868 | /// Err(10)); |
2869 | /// assert_eq!(some_var.load(Ordering::Relaxed), 10); |
2870 | /// ``` |
2871 | #[inline] |
2872 | #[$stable_cxchg] |
2873 | #[$cfg_cas] |
2874 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
2875 | pub fn compare_exchange(&self, |
2876 | current: $int_type, |
2877 | new: $int_type, |
2878 | success: Ordering, |
2879 | failure: Ordering) -> Result<$int_type, $int_type> { |
2880 | // SAFETY: data races are prevented by atomic intrinsics. |
2881 | unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) } |
2882 | } |
2883 | |
2884 | /// Stores a value into the atomic integer if the current value is the same as |
2885 | /// the `current` value. |
2886 | /// |
2887 | #[doc = concat!("Unlike [`" , stringify!($atomic_type), "::compare_exchange`]," )] |
2888 | /// this function is allowed to spuriously fail even |
2889 | /// when the comparison succeeds, which can result in more efficient code on some |
2890 | /// platforms. The return value is a result indicating whether the new value was |
2891 | /// written and containing the previous value. |
2892 | /// |
2893 | /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory |
2894 | /// ordering of this operation. `success` describes the required ordering for the |
2895 | /// read-modify-write operation that takes place if the comparison with `current` succeeds. |
2896 | /// `failure` describes the required ordering for the load operation that takes place when |
2897 | /// the comparison fails. Using [`Acquire`] as success ordering makes the store part |
2898 | /// of this operation [`Relaxed`], and using [`Release`] makes the successful load |
2899 | /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]. |
2900 | /// |
2901 | /// **Note**: This method is only available on platforms that support atomic operations on |
2902 | #[doc = concat!("[`" , $s_int_type, "`]." )] |
2903 | /// |
2904 | /// # Examples |
2905 | /// |
2906 | /// ``` |
2907 | #[doc = concat!($extra_feature, "use std::sync::atomic::{" , stringify!($atomic_type), ", Ordering};" )] |
2908 | /// |
2909 | #[doc = concat!("let val = " , stringify!($atomic_type), "::new(4);" )] |
2910 | /// |
2911 | /// let mut old = val.load(Ordering::Relaxed); |
2912 | /// loop { |
2913 | /// let new = old * 2; |
2914 | /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) { |
2915 | /// Ok(_) => break, |
2916 | /// Err(x) => old = x, |
2917 | /// } |
2918 | /// } |
2919 | /// ``` |
2920 | #[inline] |
2921 | #[$stable_cxchg] |
2922 | #[$cfg_cas] |
2923 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
2924 | pub fn compare_exchange_weak(&self, |
2925 | current: $int_type, |
2926 | new: $int_type, |
2927 | success: Ordering, |
2928 | failure: Ordering) -> Result<$int_type, $int_type> { |
2929 | // SAFETY: data races are prevented by atomic intrinsics. |
2930 | unsafe { |
2931 | atomic_compare_exchange_weak(self.v.get(), current, new, success, failure) |
2932 | } |
2933 | } |
2934 | |
2935 | /// Adds to the current value, returning the previous value. |
2936 | /// |
2937 | /// This operation wraps around on overflow. |
2938 | /// |
2939 | /// `fetch_add` takes an [`Ordering`] argument which describes the memory ordering |
2940 | /// of this operation. All ordering modes are possible. Note that using |
2941 | /// [`Acquire`] makes the store part of this operation [`Relaxed`], and |
2942 | /// using [`Release`] makes the load part [`Relaxed`]. |
2943 | /// |
2944 | /// **Note**: This method is only available on platforms that support atomic operations on |
2945 | #[doc = concat!("[`" , $s_int_type, "`]." )] |
2946 | /// |
2947 | /// # Examples |
2948 | /// |
2949 | /// ``` |
2950 | #[doc = concat!($extra_feature, "use std::sync::atomic::{" , stringify!($atomic_type), ", Ordering};" )] |
2951 | /// |
2952 | #[doc = concat!("let foo = " , stringify!($atomic_type), "::new(0);" )] |
2953 | /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0); |
2954 | /// assert_eq!(foo.load(Ordering::SeqCst), 10); |
2955 | /// ``` |
2956 | #[inline] |
2957 | #[$stable] |
2958 | #[$cfg_cas] |
2959 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
2960 | pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type { |
2961 | // SAFETY: data races are prevented by atomic intrinsics. |
2962 | unsafe { atomic_add(self.v.get(), val, order) } |
2963 | } |
2964 | |
2965 | /// Subtracts from the current value, returning the previous value. |
2966 | /// |
2967 | /// This operation wraps around on overflow. |
2968 | /// |
2969 | /// `fetch_sub` takes an [`Ordering`] argument which describes the memory ordering |
2970 | /// of this operation. All ordering modes are possible. Note that using |
2971 | /// [`Acquire`] makes the store part of this operation [`Relaxed`], and |
2972 | /// using [`Release`] makes the load part [`Relaxed`]. |
2973 | /// |
2974 | /// **Note**: This method is only available on platforms that support atomic operations on |
2975 | #[doc = concat!("[`" , $s_int_type, "`]." )] |
2976 | /// |
2977 | /// # Examples |
2978 | /// |
2979 | /// ``` |
2980 | #[doc = concat!($extra_feature, "use std::sync::atomic::{" , stringify!($atomic_type), ", Ordering};" )] |
2981 | /// |
2982 | #[doc = concat!("let foo = " , stringify!($atomic_type), "::new(20);" )] |
2983 | /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 20); |
2984 | /// assert_eq!(foo.load(Ordering::SeqCst), 10); |
2985 | /// ``` |
2986 | #[inline] |
2987 | #[$stable] |
2988 | #[$cfg_cas] |
2989 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
2990 | pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type { |
2991 | // SAFETY: data races are prevented by atomic intrinsics. |
2992 | unsafe { atomic_sub(self.v.get(), val, order) } |
2993 | } |
2994 | |
2995 | /// Bitwise "and" with the current value. |
2996 | /// |
2997 | /// Performs a bitwise "and" operation on the current value and the argument `val`, and |
2998 | /// sets the new value to the result. |
2999 | /// |
3000 | /// Returns the previous value. |
3001 | /// |
3002 | /// `fetch_and` takes an [`Ordering`] argument which describes the memory ordering |
3003 | /// of this operation. All ordering modes are possible. Note that using |
3004 | /// [`Acquire`] makes the store part of this operation [`Relaxed`], and |
3005 | /// using [`Release`] makes the load part [`Relaxed`]. |
3006 | /// |
3007 | /// **Note**: This method is only available on platforms that support atomic operations on |
3008 | #[doc = concat!("[`" , $s_int_type, "`]." )] |
3009 | /// |
3010 | /// # Examples |
3011 | /// |
3012 | /// ``` |
3013 | #[doc = concat!($extra_feature, "use std::sync::atomic::{" , stringify!($atomic_type), ", Ordering};" )] |
3014 | /// |
3015 | #[doc = concat!("let foo = " , stringify!($atomic_type), "::new(0b101101);" )] |
3016 | /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101); |
3017 | /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001); |
3018 | /// ``` |
3019 | #[inline] |
3020 | #[$stable] |
3021 | #[$cfg_cas] |
3022 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
3023 | pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type { |
3024 | // SAFETY: data races are prevented by atomic intrinsics. |
3025 | unsafe { atomic_and(self.v.get(), val, order) } |
3026 | } |
3027 | |
3028 | /// Bitwise "nand" with the current value. |
3029 | /// |
3030 | /// Performs a bitwise "nand" operation on the current value and the argument `val`, and |
3031 | /// sets the new value to the result. |
3032 | /// |
3033 | /// Returns the previous value. |
3034 | /// |
3035 | /// `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering |
3036 | /// of this operation. All ordering modes are possible. Note that using |
3037 | /// [`Acquire`] makes the store part of this operation [`Relaxed`], and |
3038 | /// using [`Release`] makes the load part [`Relaxed`]. |
3039 | /// |
3040 | /// **Note**: This method is only available on platforms that support atomic operations on |
3041 | #[doc = concat!("[`" , $s_int_type, "`]." )] |
3042 | /// |
3043 | /// # Examples |
3044 | /// |
3045 | /// ``` |
3046 | #[doc = concat!($extra_feature, "use std::sync::atomic::{" , stringify!($atomic_type), ", Ordering};" )] |
3047 | /// |
3048 | #[doc = concat!("let foo = " , stringify!($atomic_type), "::new(0x13);" )] |
3049 | /// assert_eq!(foo.fetch_nand(0x31, Ordering::SeqCst), 0x13); |
3050 | /// assert_eq!(foo.load(Ordering::SeqCst), !(0x13 & 0x31)); |
3051 | /// ``` |
3052 | #[inline] |
3053 | #[$stable_nand] |
3054 | #[$cfg_cas] |
3055 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
3056 | pub fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type { |
3057 | // SAFETY: data races are prevented by atomic intrinsics. |
3058 | unsafe { atomic_nand(self.v.get(), val, order) } |
3059 | } |
3060 | |
3061 | /// Bitwise "or" with the current value. |
3062 | /// |
3063 | /// Performs a bitwise "or" operation on the current value and the argument `val`, and |
3064 | /// sets the new value to the result. |
3065 | /// |
3066 | /// Returns the previous value. |
3067 | /// |
3068 | /// `fetch_or` takes an [`Ordering`] argument which describes the memory ordering |
3069 | /// of this operation. All ordering modes are possible. Note that using |
3070 | /// [`Acquire`] makes the store part of this operation [`Relaxed`], and |
3071 | /// using [`Release`] makes the load part [`Relaxed`]. |
3072 | /// |
3073 | /// **Note**: This method is only available on platforms that support atomic operations on |
3074 | #[doc = concat!("[`" , $s_int_type, "`]." )] |
3075 | /// |
3076 | /// # Examples |
3077 | /// |
3078 | /// ``` |
3079 | #[doc = concat!($extra_feature, "use std::sync::atomic::{" , stringify!($atomic_type), ", Ordering};" )] |
3080 | /// |
3081 | #[doc = concat!("let foo = " , stringify!($atomic_type), "::new(0b101101);" )] |
3082 | /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101); |
3083 | /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111); |
3084 | /// ``` |
3085 | #[inline] |
3086 | #[$stable] |
3087 | #[$cfg_cas] |
3088 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
3089 | pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type { |
3090 | // SAFETY: data races are prevented by atomic intrinsics. |
3091 | unsafe { atomic_or(self.v.get(), val, order) } |
3092 | } |
3093 | |
3094 | /// Bitwise "xor" with the current value. |
3095 | /// |
3096 | /// Performs a bitwise "xor" operation on the current value and the argument `val`, and |
3097 | /// sets the new value to the result. |
3098 | /// |
3099 | /// Returns the previous value. |
3100 | /// |
3101 | /// `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering |
3102 | /// of this operation. All ordering modes are possible. Note that using |
3103 | /// [`Acquire`] makes the store part of this operation [`Relaxed`], and |
3104 | /// using [`Release`] makes the load part [`Relaxed`]. |
3105 | /// |
3106 | /// **Note**: This method is only available on platforms that support atomic operations on |
3107 | #[doc = concat!("[`" , $s_int_type, "`]." )] |
3108 | /// |
3109 | /// # Examples |
3110 | /// |
3111 | /// ``` |
3112 | #[doc = concat!($extra_feature, "use std::sync::atomic::{" , stringify!($atomic_type), ", Ordering};" )] |
3113 | /// |
3114 | #[doc = concat!("let foo = " , stringify!($atomic_type), "::new(0b101101);" )] |
3115 | /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101); |
3116 | /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110); |
3117 | /// ``` |
3118 | #[inline] |
3119 | #[$stable] |
3120 | #[$cfg_cas] |
3121 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
3122 | pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type { |
3123 | // SAFETY: data races are prevented by atomic intrinsics. |
3124 | unsafe { atomic_xor(self.v.get(), val, order) } |
3125 | } |
3126 | |
3127 | /// Fetches the value, and applies a function to it that returns an optional |
3128 | /// new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else |
3129 | /// `Err(previous_value)`. |
3130 | /// |
3131 | /// Note: This may call the function multiple times if the value has been changed from other threads in |
3132 | /// the meantime, as long as the function returns `Some(_)`, but the function will have been applied |
3133 | /// only once to the stored value. |
3134 | /// |
3135 | /// `fetch_update` takes two [`Ordering`] arguments to describe the memory ordering of this operation. |
3136 | /// The first describes the required ordering for when the operation finally succeeds while the second |
3137 | /// describes the required ordering for loads. These correspond to the success and failure orderings of |
3138 | #[doc = concat!("[`" , stringify!($atomic_type), "::compare_exchange`]" )] |
3139 | /// respectively. |
3140 | /// |
3141 | /// Using [`Acquire`] as success ordering makes the store part |
3142 | /// of this operation [`Relaxed`], and using [`Release`] makes the final successful load |
3143 | /// [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]. |
3144 | /// |
3145 | /// **Note**: This method is only available on platforms that support atomic operations on |
3146 | #[doc = concat!("[`" , $s_int_type, "`]." )] |
3147 | /// |
3148 | /// # Considerations |
3149 | /// |
3150 | /// This method is not magic; it is not provided by the hardware. |
3151 | /// It is implemented in terms of |
3152 | #[doc = concat!("[`" , stringify!($atomic_type), "::compare_exchange_weak`]," )] |
3153 | /// and suffers from the same drawbacks. |
3154 | /// In particular, this method will not circumvent the [ABA Problem]. |
3155 | /// |
3156 | /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem |
3157 | /// |
3158 | /// # Examples |
3159 | /// |
3160 | /// ```rust |
3161 | #[doc = concat!($extra_feature, "use std::sync::atomic::{" , stringify!($atomic_type), ", Ordering};" )] |
3162 | /// |
3163 | #[doc = concat!("let x = " , stringify!($atomic_type), "::new(7);" )] |
3164 | /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(7)); |
3165 | /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(7)); |
3166 | /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(8)); |
3167 | /// assert_eq!(x.load(Ordering::SeqCst), 9); |
3168 | /// ``` |
3169 | #[inline] |
3170 | #[stable(feature = "no_more_cas" , since = "1.45.0" )] |
3171 | #[$cfg_cas] |
3172 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
3173 | pub fn fetch_update<F>(&self, |
3174 | set_order: Ordering, |
3175 | fetch_order: Ordering, |
3176 | mut f: F) -> Result<$int_type, $int_type> |
3177 | where F: FnMut($int_type) -> Option<$int_type> { |
3178 | let mut prev = self.load(fetch_order); |
3179 | while let Some(next) = f(prev) { |
3180 | match self.compare_exchange_weak(prev, next, set_order, fetch_order) { |
3181 | x @ Ok(_) => return x, |
3182 | Err(next_prev) => prev = next_prev |
3183 | } |
3184 | } |
3185 | Err(prev) |
3186 | } |
3187 | |
3188 | /// Fetches the value, and applies a function to it that returns an optional |
3189 | /// new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else |
3190 | /// `Err(previous_value)`. |
3191 | /// |
3192 | #[doc = concat!("See also: [`update`](`" , stringify!($atomic_type), "::update`)." )] |
3193 | /// |
3194 | /// Note: This may call the function multiple times if the value has been changed from other threads in |
3195 | /// the meantime, as long as the function returns `Some(_)`, but the function will have been applied |
3196 | /// only once to the stored value. |
3197 | /// |
3198 | /// `try_update` takes two [`Ordering`] arguments to describe the memory ordering of this operation. |
3199 | /// The first describes the required ordering for when the operation finally succeeds while the second |
3200 | /// describes the required ordering for loads. These correspond to the success and failure orderings of |
3201 | #[doc = concat!("[`" , stringify!($atomic_type), "::compare_exchange`]" )] |
3202 | /// respectively. |
3203 | /// |
3204 | /// Using [`Acquire`] as success ordering makes the store part |
3205 | /// of this operation [`Relaxed`], and using [`Release`] makes the final successful load |
3206 | /// [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]. |
3207 | /// |
3208 | /// **Note**: This method is only available on platforms that support atomic operations on |
3209 | #[doc = concat!("[`" , $s_int_type, "`]." )] |
3210 | /// |
3211 | /// # Considerations |
3212 | /// |
3213 | /// This method is not magic; it is not provided by the hardware. |
3214 | /// It is implemented in terms of |
3215 | #[doc = concat!("[`" , stringify!($atomic_type), "::compare_exchange_weak`]," )] |
3216 | /// and suffers from the same drawbacks. |
3217 | /// In particular, this method will not circumvent the [ABA Problem]. |
3218 | /// |
3219 | /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem |
3220 | /// |
3221 | /// # Examples |
3222 | /// |
3223 | /// ```rust |
3224 | /// #![feature(atomic_try_update)] |
3225 | #[doc = concat!($extra_feature, "use std::sync::atomic::{" , stringify!($atomic_type), ", Ordering};" )] |
3226 | /// |
3227 | #[doc = concat!("let x = " , stringify!($atomic_type), "::new(7);" )] |
3228 | /// assert_eq!(x.try_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(7)); |
3229 | /// assert_eq!(x.try_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(7)); |
3230 | /// assert_eq!(x.try_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(8)); |
3231 | /// assert_eq!(x.load(Ordering::SeqCst), 9); |
3232 | /// ``` |
3233 | #[inline] |
3234 | #[unstable(feature = "atomic_try_update" , issue = "135894" )] |
3235 | #[$cfg_cas] |
3236 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
3237 | pub fn try_update( |
3238 | &self, |
3239 | set_order: Ordering, |
3240 | fetch_order: Ordering, |
3241 | f: impl FnMut($int_type) -> Option<$int_type>, |
3242 | ) -> Result<$int_type, $int_type> { |
3243 | // FIXME(atomic_try_update): this is currently an unstable alias to `fetch_update`; |
3244 | // when stabilizing, turn `fetch_update` into a deprecated alias to `try_update`. |
3245 | self.fetch_update(set_order, fetch_order, f) |
3246 | } |
3247 | |
3248 | /// Fetches the value, applies a function to it that it return a new value. |
3249 | /// The new value is stored and the old value is returned. |
3250 | /// |
3251 | #[doc = concat!("See also: [`try_update`](`" , stringify!($atomic_type), "::try_update`)." )] |
3252 | /// |
3253 | /// Note: This may call the function multiple times if the value has been changed from other threads in |
3254 | /// the meantime, but the function will have been applied only once to the stored value. |
3255 | /// |
3256 | /// `update` takes two [`Ordering`] arguments to describe the memory ordering of this operation. |
3257 | /// The first describes the required ordering for when the operation finally succeeds while the second |
3258 | /// describes the required ordering for loads. These correspond to the success and failure orderings of |
3259 | #[doc = concat!("[`" , stringify!($atomic_type), "::compare_exchange`]" )] |
3260 | /// respectively. |
3261 | /// |
3262 | /// Using [`Acquire`] as success ordering makes the store part |
3263 | /// of this operation [`Relaxed`], and using [`Release`] makes the final successful load |
3264 | /// [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]. |
3265 | /// |
3266 | /// **Note**: This method is only available on platforms that support atomic operations on |
3267 | #[doc = concat!("[`" , $s_int_type, "`]." )] |
3268 | /// |
3269 | /// # Considerations |
3270 | /// |
3271 | /// This method is not magic; it is not provided by the hardware. |
3272 | /// It is implemented in terms of |
3273 | #[doc = concat!("[`" , stringify!($atomic_type), "::compare_exchange_weak`]," )] |
3274 | /// and suffers from the same drawbacks. |
3275 | /// In particular, this method will not circumvent the [ABA Problem]. |
3276 | /// |
3277 | /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem |
3278 | /// |
3279 | /// # Examples |
3280 | /// |
3281 | /// ```rust |
3282 | /// #![feature(atomic_try_update)] |
3283 | #[doc = concat!($extra_feature, "use std::sync::atomic::{" , stringify!($atomic_type), ", Ordering};" )] |
3284 | /// |
3285 | #[doc = concat!("let x = " , stringify!($atomic_type), "::new(7);" )] |
3286 | /// assert_eq!(x.update(Ordering::SeqCst, Ordering::SeqCst, |x| x + 1), 7); |
3287 | /// assert_eq!(x.update(Ordering::SeqCst, Ordering::SeqCst, |x| x + 1), 8); |
3288 | /// assert_eq!(x.load(Ordering::SeqCst), 9); |
3289 | /// ``` |
3290 | #[inline] |
3291 | #[unstable(feature = "atomic_try_update" , issue = "135894" )] |
3292 | #[$cfg_cas] |
3293 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
3294 | pub fn update( |
3295 | &self, |
3296 | set_order: Ordering, |
3297 | fetch_order: Ordering, |
3298 | mut f: impl FnMut($int_type) -> $int_type, |
3299 | ) -> $int_type { |
3300 | let mut prev = self.load(fetch_order); |
3301 | loop { |
3302 | match self.compare_exchange_weak(prev, f(prev), set_order, fetch_order) { |
3303 | Ok(x) => break x, |
3304 | Err(next_prev) => prev = next_prev, |
3305 | } |
3306 | } |
3307 | } |
3308 | |
3309 | /// Maximum with the current value. |
3310 | /// |
3311 | /// Finds the maximum of the current value and the argument `val`, and |
3312 | /// sets the new value to the result. |
3313 | /// |
3314 | /// Returns the previous value. |
3315 | /// |
3316 | /// `fetch_max` takes an [`Ordering`] argument which describes the memory ordering |
3317 | /// of this operation. All ordering modes are possible. Note that using |
3318 | /// [`Acquire`] makes the store part of this operation [`Relaxed`], and |
3319 | /// using [`Release`] makes the load part [`Relaxed`]. |
3320 | /// |
3321 | /// **Note**: This method is only available on platforms that support atomic operations on |
3322 | #[doc = concat!("[`" , $s_int_type, "`]." )] |
3323 | /// |
3324 | /// # Examples |
3325 | /// |
3326 | /// ``` |
3327 | #[doc = concat!($extra_feature, "use std::sync::atomic::{" , stringify!($atomic_type), ", Ordering};" )] |
3328 | /// |
3329 | #[doc = concat!("let foo = " , stringify!($atomic_type), "::new(23);" )] |
3330 | /// assert_eq!(foo.fetch_max(42, Ordering::SeqCst), 23); |
3331 | /// assert_eq!(foo.load(Ordering::SeqCst), 42); |
3332 | /// ``` |
3333 | /// |
3334 | /// If you want to obtain the maximum value in one step, you can use the following: |
3335 | /// |
3336 | /// ``` |
3337 | #[doc = concat!($extra_feature, "use std::sync::atomic::{" , stringify!($atomic_type), ", Ordering};" )] |
3338 | /// |
3339 | #[doc = concat!("let foo = " , stringify!($atomic_type), "::new(23);" )] |
3340 | /// let bar = 42; |
3341 | /// let max_foo = foo.fetch_max(bar, Ordering::SeqCst).max(bar); |
3342 | /// assert!(max_foo == 42); |
3343 | /// ``` |
3344 | #[inline] |
3345 | #[stable(feature = "atomic_min_max" , since = "1.45.0" )] |
3346 | #[$cfg_cas] |
3347 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
3348 | pub fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type { |
3349 | // SAFETY: data races are prevented by atomic intrinsics. |
3350 | unsafe { $max_fn(self.v.get(), val, order) } |
3351 | } |
3352 | |
3353 | /// Minimum with the current value. |
3354 | /// |
3355 | /// Finds the minimum of the current value and the argument `val`, and |
3356 | /// sets the new value to the result. |
3357 | /// |
3358 | /// Returns the previous value. |
3359 | /// |
3360 | /// `fetch_min` takes an [`Ordering`] argument which describes the memory ordering |
3361 | /// of this operation. All ordering modes are possible. Note that using |
3362 | /// [`Acquire`] makes the store part of this operation [`Relaxed`], and |
3363 | /// using [`Release`] makes the load part [`Relaxed`]. |
3364 | /// |
3365 | /// **Note**: This method is only available on platforms that support atomic operations on |
3366 | #[doc = concat!("[`" , $s_int_type, "`]." )] |
3367 | /// |
3368 | /// # Examples |
3369 | /// |
3370 | /// ``` |
3371 | #[doc = concat!($extra_feature, "use std::sync::atomic::{" , stringify!($atomic_type), ", Ordering};" )] |
3372 | /// |
3373 | #[doc = concat!("let foo = " , stringify!($atomic_type), "::new(23);" )] |
3374 | /// assert_eq!(foo.fetch_min(42, Ordering::Relaxed), 23); |
3375 | /// assert_eq!(foo.load(Ordering::Relaxed), 23); |
3376 | /// assert_eq!(foo.fetch_min(22, Ordering::Relaxed), 23); |
3377 | /// assert_eq!(foo.load(Ordering::Relaxed), 22); |
3378 | /// ``` |
3379 | /// |
3380 | /// If you want to obtain the minimum value in one step, you can use the following: |
3381 | /// |
3382 | /// ``` |
3383 | #[doc = concat!($extra_feature, "use std::sync::atomic::{" , stringify!($atomic_type), ", Ordering};" )] |
3384 | /// |
3385 | #[doc = concat!("let foo = " , stringify!($atomic_type), "::new(23);" )] |
3386 | /// let bar = 12; |
3387 | /// let min_foo = foo.fetch_min(bar, Ordering::SeqCst).min(bar); |
3388 | /// assert_eq!(min_foo, 12); |
3389 | /// ``` |
3390 | #[inline] |
3391 | #[stable(feature = "atomic_min_max" , since = "1.45.0" )] |
3392 | #[$cfg_cas] |
3393 | #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces |
3394 | pub fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type { |
3395 | // SAFETY: data races are prevented by atomic intrinsics. |
3396 | unsafe { $min_fn(self.v.get(), val, order) } |
3397 | } |
3398 | |
3399 | /// Returns a mutable pointer to the underlying integer. |
3400 | /// |
3401 | /// Doing non-atomic reads and writes on the resulting integer can be a data race. |
3402 | /// This method is mostly useful for FFI, where the function signature may use |
3403 | #[doc = concat!("`*mut " , stringify!($int_type), "` instead of `&" , stringify!($atomic_type), "`." )] |
3404 | /// |
3405 | /// Returning an `*mut` pointer from a shared reference to this atomic is safe because the |
3406 | /// atomic types work with interior mutability. All modifications of an atomic change the value |
3407 | /// through a shared reference, and can do so safely as long as they use atomic operations. Any |
3408 | /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the same |
3409 | /// restriction: operations on it must be atomic. |
3410 | /// |
3411 | /// # Examples |
3412 | /// |
3413 | /// ```ignore (extern-declaration) |
3414 | /// # fn main() { |
3415 | #[doc = concat!($extra_feature, "use std::sync::atomic::" , stringify!($atomic_type), ";" )] |
3416 | /// |
3417 | /// extern "C" { |
3418 | #[doc = concat!(" fn my_atomic_op(arg: *mut " , stringify!($int_type), ");" )] |
3419 | /// } |
3420 | /// |
3421 | #[doc = concat!("let atomic = " , stringify!($atomic_type), "::new(1);" )] |
3422 | /// |
3423 | /// // SAFETY: Safe as long as `my_atomic_op` is atomic. |
3424 | /// unsafe { |
3425 | /// my_atomic_op(atomic.as_ptr()); |
3426 | /// } |
3427 | /// # } |
3428 | /// ``` |
3429 | #[inline] |
3430 | #[stable(feature = "atomic_as_ptr" , since = "1.70.0" )] |
3431 | #[rustc_const_stable(feature = "atomic_as_ptr" , since = "1.70.0" )] |
3432 | #[rustc_never_returns_null_ptr] |
3433 | pub const fn as_ptr(&self) -> *mut $int_type { |
3434 | self.v.get() |
3435 | } |
3436 | } |
3437 | } |
3438 | } |
3439 | |
3440 | #[cfg (target_has_atomic_load_store = "8" )] |
3441 | atomic_int! { |
3442 | cfg (target_has_atomic = "8" ), |
3443 | cfg (target_has_atomic_equal_alignment = "8" ), |
3444 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3445 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3446 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3447 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3448 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3449 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3450 | rustc_const_stable (feature = "const_integer_atomics" , since = "1.34.0" ), |
3451 | rustc_const_stable (feature = "const_atomic_into_inner" , since = "1.79.0" ), |
3452 | rustc_diagnostic_item = "AtomicI8" , |
3453 | "i8" , |
3454 | "" , |
3455 | atomic_min, atomic_max, |
3456 | 1, |
3457 | i8 AtomicI8 |
3458 | } |
3459 | #[cfg (target_has_atomic_load_store = "8" )] |
3460 | atomic_int! { |
3461 | cfg (target_has_atomic = "8" ), |
3462 | cfg (target_has_atomic_equal_alignment = "8" ), |
3463 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3464 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3465 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3466 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3467 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3468 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3469 | rustc_const_stable (feature = "const_integer_atomics" , since = "1.34.0" ), |
3470 | rustc_const_stable (feature = "const_atomic_into_inner" , since = "1.79.0" ), |
3471 | rustc_diagnostic_item = "AtomicU8" , |
3472 | "u8" , |
3473 | "" , |
3474 | atomic_umin, atomic_umax, |
3475 | 1, |
3476 | u8 AtomicU8 |
3477 | } |
3478 | #[cfg (target_has_atomic_load_store = "16" )] |
3479 | atomic_int! { |
3480 | cfg (target_has_atomic = "16" ), |
3481 | cfg (target_has_atomic_equal_alignment = "16" ), |
3482 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3483 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3484 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3485 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3486 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3487 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3488 | rustc_const_stable (feature = "const_integer_atomics" , since = "1.34.0" ), |
3489 | rustc_const_stable (feature = "const_atomic_into_inner" , since = "1.79.0" ), |
3490 | rustc_diagnostic_item = "AtomicI16" , |
3491 | "i16" , |
3492 | "" , |
3493 | atomic_min, atomic_max, |
3494 | 2, |
3495 | i16 AtomicI16 |
3496 | } |
3497 | #[cfg (target_has_atomic_load_store = "16" )] |
3498 | atomic_int! { |
3499 | cfg (target_has_atomic = "16" ), |
3500 | cfg (target_has_atomic_equal_alignment = "16" ), |
3501 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3502 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3503 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3504 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3505 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3506 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3507 | rustc_const_stable (feature = "const_integer_atomics" , since = "1.34.0" ), |
3508 | rustc_const_stable (feature = "const_atomic_into_inner" , since = "1.79.0" ), |
3509 | rustc_diagnostic_item = "AtomicU16" , |
3510 | "u16" , |
3511 | "" , |
3512 | atomic_umin, atomic_umax, |
3513 | 2, |
3514 | u16 AtomicU16 |
3515 | } |
3516 | #[cfg (target_has_atomic_load_store = "32" )] |
3517 | atomic_int! { |
3518 | cfg (target_has_atomic = "32" ), |
3519 | cfg (target_has_atomic_equal_alignment = "32" ), |
3520 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3521 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3522 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3523 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3524 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3525 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3526 | rustc_const_stable (feature = "const_integer_atomics" , since = "1.34.0" ), |
3527 | rustc_const_stable (feature = "const_atomic_into_inner" , since = "1.79.0" ), |
3528 | rustc_diagnostic_item = "AtomicI32" , |
3529 | "i32" , |
3530 | "" , |
3531 | atomic_min, atomic_max, |
3532 | 4, |
3533 | i32 AtomicI32 |
3534 | } |
3535 | #[cfg (target_has_atomic_load_store = "32" )] |
3536 | atomic_int! { |
3537 | cfg (target_has_atomic = "32" ), |
3538 | cfg (target_has_atomic_equal_alignment = "32" ), |
3539 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3540 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3541 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3542 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3543 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3544 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3545 | rustc_const_stable (feature = "const_integer_atomics" , since = "1.34.0" ), |
3546 | rustc_const_stable (feature = "const_atomic_into_inner" , since = "1.79.0" ), |
3547 | rustc_diagnostic_item = "AtomicU32" , |
3548 | "u32" , |
3549 | "" , |
3550 | atomic_umin, atomic_umax, |
3551 | 4, |
3552 | u32 AtomicU32 |
3553 | } |
3554 | #[cfg (target_has_atomic_load_store = "64" )] |
3555 | atomic_int! { |
3556 | cfg (target_has_atomic = "64" ), |
3557 | cfg (target_has_atomic_equal_alignment = "64" ), |
3558 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3559 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3560 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3561 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3562 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3563 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3564 | rustc_const_stable (feature = "const_integer_atomics" , since = "1.34.0" ), |
3565 | rustc_const_stable (feature = "const_atomic_into_inner" , since = "1.79.0" ), |
3566 | rustc_diagnostic_item = "AtomicI64" , |
3567 | "i64" , |
3568 | "" , |
3569 | atomic_min, atomic_max, |
3570 | 8, |
3571 | i64 AtomicI64 |
3572 | } |
3573 | #[cfg (target_has_atomic_load_store = "64" )] |
3574 | atomic_int! { |
3575 | cfg (target_has_atomic = "64" ), |
3576 | cfg (target_has_atomic_equal_alignment = "64" ), |
3577 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3578 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3579 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3580 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3581 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3582 | stable (feature = "integer_atomics_stable" , since = "1.34.0" ), |
3583 | rustc_const_stable (feature = "const_integer_atomics" , since = "1.34.0" ), |
3584 | rustc_const_stable (feature = "const_atomic_into_inner" , since = "1.79.0" ), |
3585 | rustc_diagnostic_item = "AtomicU64" , |
3586 | "u64" , |
3587 | "" , |
3588 | atomic_umin, atomic_umax, |
3589 | 8, |
3590 | u64 AtomicU64 |
3591 | } |
3592 | #[cfg (target_has_atomic_load_store = "128" )] |
3593 | atomic_int! { |
3594 | cfg (target_has_atomic = "128" ), |
3595 | cfg (target_has_atomic_equal_alignment = "128" ), |
3596 | unstable (feature = "integer_atomics" , issue = "99069" ), |
3597 | unstable (feature = "integer_atomics" , issue = "99069" ), |
3598 | unstable (feature = "integer_atomics" , issue = "99069" ), |
3599 | unstable (feature = "integer_atomics" , issue = "99069" ), |
3600 | unstable (feature = "integer_atomics" , issue = "99069" ), |
3601 | unstable (feature = "integer_atomics" , issue = "99069" ), |
3602 | rustc_const_unstable (feature = "integer_atomics" , issue = "99069" ), |
3603 | rustc_const_unstable (feature = "integer_atomics" , issue = "99069" ), |
3604 | rustc_diagnostic_item = "AtomicI128" , |
3605 | "i128" , |
3606 | "#![feature(integer_atomics)] \n\n" , |
3607 | atomic_min, atomic_max, |
3608 | 16, |
3609 | i128 AtomicI128 |
3610 | } |
3611 | #[cfg (target_has_atomic_load_store = "128" )] |
3612 | atomic_int! { |
3613 | cfg (target_has_atomic = "128" ), |
3614 | cfg (target_has_atomic_equal_alignment = "128" ), |
3615 | unstable (feature = "integer_atomics" , issue = "99069" ), |
3616 | unstable (feature = "integer_atomics" , issue = "99069" ), |
3617 | unstable (feature = "integer_atomics" , issue = "99069" ), |
3618 | unstable (feature = "integer_atomics" , issue = "99069" ), |
3619 | unstable (feature = "integer_atomics" , issue = "99069" ), |
3620 | unstable (feature = "integer_atomics" , issue = "99069" ), |
3621 | rustc_const_unstable (feature = "integer_atomics" , issue = "99069" ), |
3622 | rustc_const_unstable (feature = "integer_atomics" , issue = "99069" ), |
3623 | rustc_diagnostic_item = "AtomicU128" , |
3624 | "u128" , |
3625 | "#![feature(integer_atomics)] \n\n" , |
3626 | atomic_umin, atomic_umax, |
3627 | 16, |
3628 | u128 AtomicU128 |
3629 | } |
3630 | |
3631 | #[cfg (target_has_atomic_load_store = "ptr" )] |
3632 | macro_rules! atomic_int_ptr_sized { |
3633 | ( $($target_pointer_width:literal $align:literal)* ) => { $( |
3634 | #[cfg(target_pointer_width = $target_pointer_width)] |
3635 | atomic_int! { |
3636 | cfg(target_has_atomic = "ptr" ), |
3637 | cfg(target_has_atomic_equal_alignment = "ptr" ), |
3638 | stable(feature = "rust1" , since = "1.0.0" ), |
3639 | stable(feature = "extended_compare_and_swap" , since = "1.10.0" ), |
3640 | stable(feature = "atomic_debug" , since = "1.3.0" ), |
3641 | stable(feature = "atomic_access" , since = "1.15.0" ), |
3642 | stable(feature = "atomic_from" , since = "1.23.0" ), |
3643 | stable(feature = "atomic_nand" , since = "1.27.0" ), |
3644 | rustc_const_stable(feature = "const_ptr_sized_atomics" , since = "1.24.0" ), |
3645 | rustc_const_stable(feature = "const_atomic_into_inner" , since = "1.79.0" ), |
3646 | rustc_diagnostic_item = "AtomicIsize" , |
3647 | "isize" , |
3648 | "" , |
3649 | atomic_min, atomic_max, |
3650 | $align, |
3651 | isize AtomicIsize |
3652 | } |
3653 | #[cfg(target_pointer_width = $target_pointer_width)] |
3654 | atomic_int! { |
3655 | cfg(target_has_atomic = "ptr" ), |
3656 | cfg(target_has_atomic_equal_alignment = "ptr" ), |
3657 | stable(feature = "rust1" , since = "1.0.0" ), |
3658 | stable(feature = "extended_compare_and_swap" , since = "1.10.0" ), |
3659 | stable(feature = "atomic_debug" , since = "1.3.0" ), |
3660 | stable(feature = "atomic_access" , since = "1.15.0" ), |
3661 | stable(feature = "atomic_from" , since = "1.23.0" ), |
3662 | stable(feature = "atomic_nand" , since = "1.27.0" ), |
3663 | rustc_const_stable(feature = "const_ptr_sized_atomics" , since = "1.24.0" ), |
3664 | rustc_const_stable(feature = "const_atomic_into_inner" , since = "1.79.0" ), |
3665 | rustc_diagnostic_item = "AtomicUsize" , |
3666 | "usize" , |
3667 | "" , |
3668 | atomic_umin, atomic_umax, |
3669 | $align, |
3670 | usize AtomicUsize |
3671 | } |
3672 | |
3673 | /// An [`AtomicIsize`] initialized to `0`. |
3674 | #[cfg(target_pointer_width = $target_pointer_width)] |
3675 | #[stable(feature = "rust1" , since = "1.0.0" )] |
3676 | #[deprecated( |
3677 | since = "1.34.0" , |
3678 | note = "the `new` function is now preferred" , |
3679 | suggestion = "AtomicIsize::new(0)" , |
3680 | )] |
3681 | pub const ATOMIC_ISIZE_INIT: AtomicIsize = AtomicIsize::new(0); |
3682 | |
3683 | /// An [`AtomicUsize`] initialized to `0`. |
3684 | #[cfg(target_pointer_width = $target_pointer_width)] |
3685 | #[stable(feature = "rust1" , since = "1.0.0" )] |
3686 | #[deprecated( |
3687 | since = "1.34.0" , |
3688 | note = "the `new` function is now preferred" , |
3689 | suggestion = "AtomicUsize::new(0)" , |
3690 | )] |
3691 | pub const ATOMIC_USIZE_INIT: AtomicUsize = AtomicUsize::new(0); |
3692 | )* }; |
3693 | } |
3694 | |
3695 | #[cfg (target_has_atomic_load_store = "ptr" )] |
3696 | atomic_int_ptr_sized! { |
3697 | "16" 2 |
3698 | "32" 4 |
3699 | "64" 8 |
3700 | } |
3701 | |
3702 | #[inline ] |
3703 | #[cfg (target_has_atomic)] |
3704 | fn strongest_failure_ordering(order: Ordering) -> Ordering { |
3705 | match order { |
3706 | Release => Relaxed, |
3707 | Relaxed => Relaxed, |
3708 | SeqCst => SeqCst, |
3709 | Acquire => Acquire, |
3710 | AcqRel => Acquire, |
3711 | } |
3712 | } |
3713 | |
3714 | #[inline ] |
3715 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
3716 | unsafe fn atomic_store<T: Copy>(dst: *mut T, val: T, order: Ordering) { |
3717 | // SAFETY: the caller must uphold the safety contract for `atomic_store`. |
3718 | unsafe { |
3719 | match order { |
3720 | Relaxed => intrinsics::atomic_store_relaxed(dst, val), |
3721 | Release => intrinsics::atomic_store_release(dst, val), |
3722 | SeqCst => intrinsics::atomic_store_seqcst(dst, val), |
3723 | Acquire => panic!("there is no such thing as an acquire store" ), |
3724 | AcqRel => panic!("there is no such thing as an acquire-release store" ), |
3725 | } |
3726 | } |
3727 | } |
3728 | |
3729 | #[inline ] |
3730 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
3731 | unsafe fn atomic_load<T: Copy>(dst: *const T, order: Ordering) -> T { |
3732 | // SAFETY: the caller must uphold the safety contract for `atomic_load`. |
3733 | unsafe { |
3734 | match order { |
3735 | Relaxed => intrinsics::atomic_load_relaxed(src:dst), |
3736 | Acquire => intrinsics::atomic_load_acquire(src:dst), |
3737 | SeqCst => intrinsics::atomic_load_seqcst(src:dst), |
3738 | Release => panic!("there is no such thing as a release load" ), |
3739 | AcqRel => panic!("there is no such thing as an acquire-release load" ), |
3740 | } |
3741 | } |
3742 | } |
3743 | |
3744 | #[inline ] |
3745 | #[cfg (target_has_atomic)] |
3746 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
3747 | unsafe fn atomic_swap<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { |
3748 | // SAFETY: the caller must uphold the safety contract for `atomic_swap`. |
3749 | unsafe { |
3750 | match order { |
3751 | Relaxed => intrinsics::atomic_xchg_relaxed(dst, src:val), |
3752 | Acquire => intrinsics::atomic_xchg_acquire(dst, src:val), |
3753 | Release => intrinsics::atomic_xchg_release(dst, src:val), |
3754 | AcqRel => intrinsics::atomic_xchg_acqrel(dst, src:val), |
3755 | SeqCst => intrinsics::atomic_xchg_seqcst(dst, src:val), |
3756 | } |
3757 | } |
3758 | } |
3759 | |
3760 | /// Returns the previous value (like __sync_fetch_and_add). |
3761 | #[inline ] |
3762 | #[cfg (target_has_atomic)] |
3763 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
3764 | unsafe fn atomic_add<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { |
3765 | // SAFETY: the caller must uphold the safety contract for `atomic_add`. |
3766 | unsafe { |
3767 | match order { |
3768 | Relaxed => intrinsics::atomic_xadd_relaxed(dst, src:val), |
3769 | Acquire => intrinsics::atomic_xadd_acquire(dst, src:val), |
3770 | Release => intrinsics::atomic_xadd_release(dst, src:val), |
3771 | AcqRel => intrinsics::atomic_xadd_acqrel(dst, src:val), |
3772 | SeqCst => intrinsics::atomic_xadd_seqcst(dst, src:val), |
3773 | } |
3774 | } |
3775 | } |
3776 | |
3777 | /// Returns the previous value (like __sync_fetch_and_sub). |
3778 | #[inline ] |
3779 | #[cfg (target_has_atomic)] |
3780 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
3781 | unsafe fn atomic_sub<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { |
3782 | // SAFETY: the caller must uphold the safety contract for `atomic_sub`. |
3783 | unsafe { |
3784 | match order { |
3785 | Relaxed => intrinsics::atomic_xsub_relaxed(dst, src:val), |
3786 | Acquire => intrinsics::atomic_xsub_acquire(dst, src:val), |
3787 | Release => intrinsics::atomic_xsub_release(dst, src:val), |
3788 | AcqRel => intrinsics::atomic_xsub_acqrel(dst, src:val), |
3789 | SeqCst => intrinsics::atomic_xsub_seqcst(dst, src:val), |
3790 | } |
3791 | } |
3792 | } |
3793 | |
3794 | #[inline ] |
3795 | #[cfg (target_has_atomic)] |
3796 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
3797 | unsafe fn atomic_compare_exchange<T: Copy>( |
3798 | dst: *mut T, |
3799 | old: T, |
3800 | new: T, |
3801 | success: Ordering, |
3802 | failure: Ordering, |
3803 | ) -> Result<T, T> { |
3804 | // SAFETY: the caller must uphold the safety contract for `atomic_compare_exchange`. |
3805 | let (val, ok) = unsafe { |
3806 | match (success, failure) { |
3807 | (Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed_relaxed(dst, old, new), |
3808 | (Relaxed, Acquire) => intrinsics::atomic_cxchg_relaxed_acquire(dst, old, new), |
3809 | (Relaxed, SeqCst) => intrinsics::atomic_cxchg_relaxed_seqcst(dst, old, new), |
3810 | (Acquire, Relaxed) => intrinsics::atomic_cxchg_acquire_relaxed(dst, old, new), |
3811 | (Acquire, Acquire) => intrinsics::atomic_cxchg_acquire_acquire(dst, old, new), |
3812 | (Acquire, SeqCst) => intrinsics::atomic_cxchg_acquire_seqcst(dst, old, new), |
3813 | (Release, Relaxed) => intrinsics::atomic_cxchg_release_relaxed(dst, old, new), |
3814 | (Release, Acquire) => intrinsics::atomic_cxchg_release_acquire(dst, old, new), |
3815 | (Release, SeqCst) => intrinsics::atomic_cxchg_release_seqcst(dst, old, new), |
3816 | (AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_relaxed(dst, old, new), |
3817 | (AcqRel, Acquire) => intrinsics::atomic_cxchg_acqrel_acquire(dst, old, new), |
3818 | (AcqRel, SeqCst) => intrinsics::atomic_cxchg_acqrel_seqcst(dst, old, new), |
3819 | (SeqCst, Relaxed) => intrinsics::atomic_cxchg_seqcst_relaxed(dst, old, new), |
3820 | (SeqCst, Acquire) => intrinsics::atomic_cxchg_seqcst_acquire(dst, old, new), |
3821 | (SeqCst, SeqCst) => intrinsics::atomic_cxchg_seqcst_seqcst(dst, old, new), |
3822 | (_, AcqRel) => panic!("there is no such thing as an acquire-release failure ordering" ), |
3823 | (_, Release) => panic!("there is no such thing as a release failure ordering" ), |
3824 | } |
3825 | }; |
3826 | if ok { Ok(val) } else { Err(val) } |
3827 | } |
3828 | |
3829 | #[inline ] |
3830 | #[cfg (target_has_atomic)] |
3831 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
3832 | unsafe fn atomic_compare_exchange_weak<T: Copy>( |
3833 | dst: *mut T, |
3834 | old: T, |
3835 | new: T, |
3836 | success: Ordering, |
3837 | failure: Ordering, |
3838 | ) -> Result<T, T> { |
3839 | // SAFETY: the caller must uphold the safety contract for `atomic_compare_exchange_weak`. |
3840 | let (val, ok) = unsafe { |
3841 | match (success, failure) { |
3842 | (Relaxed, Relaxed) => intrinsics::atomic_cxchgweak_relaxed_relaxed(dst, old, new), |
3843 | (Relaxed, Acquire) => intrinsics::atomic_cxchgweak_relaxed_acquire(dst, old, new), |
3844 | (Relaxed, SeqCst) => intrinsics::atomic_cxchgweak_relaxed_seqcst(dst, old, new), |
3845 | (Acquire, Relaxed) => intrinsics::atomic_cxchgweak_acquire_relaxed(dst, old, new), |
3846 | (Acquire, Acquire) => intrinsics::atomic_cxchgweak_acquire_acquire(dst, old, new), |
3847 | (Acquire, SeqCst) => intrinsics::atomic_cxchgweak_acquire_seqcst(dst, old, new), |
3848 | (Release, Relaxed) => intrinsics::atomic_cxchgweak_release_relaxed(dst, old, new), |
3849 | (Release, Acquire) => intrinsics::atomic_cxchgweak_release_acquire(dst, old, new), |
3850 | (Release, SeqCst) => intrinsics::atomic_cxchgweak_release_seqcst(dst, old, new), |
3851 | (AcqRel, Relaxed) => intrinsics::atomic_cxchgweak_acqrel_relaxed(dst, old, new), |
3852 | (AcqRel, Acquire) => intrinsics::atomic_cxchgweak_acqrel_acquire(dst, old, new), |
3853 | (AcqRel, SeqCst) => intrinsics::atomic_cxchgweak_acqrel_seqcst(dst, old, new), |
3854 | (SeqCst, Relaxed) => intrinsics::atomic_cxchgweak_seqcst_relaxed(dst, old, new), |
3855 | (SeqCst, Acquire) => intrinsics::atomic_cxchgweak_seqcst_acquire(dst, old, new), |
3856 | (SeqCst, SeqCst) => intrinsics::atomic_cxchgweak_seqcst_seqcst(dst, old, new), |
3857 | (_, AcqRel) => panic!("there is no such thing as an acquire-release failure ordering" ), |
3858 | (_, Release) => panic!("there is no such thing as a release failure ordering" ), |
3859 | } |
3860 | }; |
3861 | if ok { Ok(val) } else { Err(val) } |
3862 | } |
3863 | |
3864 | #[inline ] |
3865 | #[cfg (target_has_atomic)] |
3866 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
3867 | unsafe fn atomic_and<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { |
3868 | // SAFETY: the caller must uphold the safety contract for `atomic_and` |
3869 | unsafe { |
3870 | match order { |
3871 | Relaxed => intrinsics::atomic_and_relaxed(dst, src:val), |
3872 | Acquire => intrinsics::atomic_and_acquire(dst, src:val), |
3873 | Release => intrinsics::atomic_and_release(dst, src:val), |
3874 | AcqRel => intrinsics::atomic_and_acqrel(dst, src:val), |
3875 | SeqCst => intrinsics::atomic_and_seqcst(dst, src:val), |
3876 | } |
3877 | } |
3878 | } |
3879 | |
3880 | #[inline ] |
3881 | #[cfg (target_has_atomic)] |
3882 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
3883 | unsafe fn atomic_nand<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { |
3884 | // SAFETY: the caller must uphold the safety contract for `atomic_nand` |
3885 | unsafe { |
3886 | match order { |
3887 | Relaxed => intrinsics::atomic_nand_relaxed(dst, src:val), |
3888 | Acquire => intrinsics::atomic_nand_acquire(dst, src:val), |
3889 | Release => intrinsics::atomic_nand_release(dst, src:val), |
3890 | AcqRel => intrinsics::atomic_nand_acqrel(dst, src:val), |
3891 | SeqCst => intrinsics::atomic_nand_seqcst(dst, src:val), |
3892 | } |
3893 | } |
3894 | } |
3895 | |
3896 | #[inline ] |
3897 | #[cfg (target_has_atomic)] |
3898 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
3899 | unsafe fn atomic_or<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { |
3900 | // SAFETY: the caller must uphold the safety contract for `atomic_or` |
3901 | unsafe { |
3902 | match order { |
3903 | SeqCst => intrinsics::atomic_or_seqcst(dst, src:val), |
3904 | Acquire => intrinsics::atomic_or_acquire(dst, src:val), |
3905 | Release => intrinsics::atomic_or_release(dst, src:val), |
3906 | AcqRel => intrinsics::atomic_or_acqrel(dst, src:val), |
3907 | Relaxed => intrinsics::atomic_or_relaxed(dst, src:val), |
3908 | } |
3909 | } |
3910 | } |
3911 | |
3912 | #[inline ] |
3913 | #[cfg (target_has_atomic)] |
3914 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
3915 | unsafe fn atomic_xor<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { |
3916 | // SAFETY: the caller must uphold the safety contract for `atomic_xor` |
3917 | unsafe { |
3918 | match order { |
3919 | SeqCst => intrinsics::atomic_xor_seqcst(dst, src:val), |
3920 | Acquire => intrinsics::atomic_xor_acquire(dst, src:val), |
3921 | Release => intrinsics::atomic_xor_release(dst, src:val), |
3922 | AcqRel => intrinsics::atomic_xor_acqrel(dst, src:val), |
3923 | Relaxed => intrinsics::atomic_xor_relaxed(dst, src:val), |
3924 | } |
3925 | } |
3926 | } |
3927 | |
3928 | /// returns the max value (signed comparison) |
3929 | #[inline ] |
3930 | #[cfg (target_has_atomic)] |
3931 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
3932 | unsafe fn atomic_max<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { |
3933 | // SAFETY: the caller must uphold the safety contract for `atomic_max` |
3934 | unsafe { |
3935 | match order { |
3936 | Relaxed => intrinsics::atomic_max_relaxed(dst, src:val), |
3937 | Acquire => intrinsics::atomic_max_acquire(dst, src:val), |
3938 | Release => intrinsics::atomic_max_release(dst, src:val), |
3939 | AcqRel => intrinsics::atomic_max_acqrel(dst, src:val), |
3940 | SeqCst => intrinsics::atomic_max_seqcst(dst, src:val), |
3941 | } |
3942 | } |
3943 | } |
3944 | |
3945 | /// returns the min value (signed comparison) |
3946 | #[inline ] |
3947 | #[cfg (target_has_atomic)] |
3948 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
3949 | unsafe fn atomic_min<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { |
3950 | // SAFETY: the caller must uphold the safety contract for `atomic_min` |
3951 | unsafe { |
3952 | match order { |
3953 | Relaxed => intrinsics::atomic_min_relaxed(dst, src:val), |
3954 | Acquire => intrinsics::atomic_min_acquire(dst, src:val), |
3955 | Release => intrinsics::atomic_min_release(dst, src:val), |
3956 | AcqRel => intrinsics::atomic_min_acqrel(dst, src:val), |
3957 | SeqCst => intrinsics::atomic_min_seqcst(dst, src:val), |
3958 | } |
3959 | } |
3960 | } |
3961 | |
3962 | /// returns the max value (unsigned comparison) |
3963 | #[inline ] |
3964 | #[cfg (target_has_atomic)] |
3965 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
3966 | unsafe fn atomic_umax<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { |
3967 | // SAFETY: the caller must uphold the safety contract for `atomic_umax` |
3968 | unsafe { |
3969 | match order { |
3970 | Relaxed => intrinsics::atomic_umax_relaxed(dst, src:val), |
3971 | Acquire => intrinsics::atomic_umax_acquire(dst, src:val), |
3972 | Release => intrinsics::atomic_umax_release(dst, src:val), |
3973 | AcqRel => intrinsics::atomic_umax_acqrel(dst, src:val), |
3974 | SeqCst => intrinsics::atomic_umax_seqcst(dst, src:val), |
3975 | } |
3976 | } |
3977 | } |
3978 | |
3979 | /// returns the min value (unsigned comparison) |
3980 | #[inline ] |
3981 | #[cfg (target_has_atomic)] |
3982 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
3983 | unsafe fn atomic_umin<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { |
3984 | // SAFETY: the caller must uphold the safety contract for `atomic_umin` |
3985 | unsafe { |
3986 | match order { |
3987 | Relaxed => intrinsics::atomic_umin_relaxed(dst, src:val), |
3988 | Acquire => intrinsics::atomic_umin_acquire(dst, src:val), |
3989 | Release => intrinsics::atomic_umin_release(dst, src:val), |
3990 | AcqRel => intrinsics::atomic_umin_acqrel(dst, src:val), |
3991 | SeqCst => intrinsics::atomic_umin_seqcst(dst, src:val), |
3992 | } |
3993 | } |
3994 | } |
3995 | |
3996 | /// An atomic fence. |
3997 | /// |
3998 | /// Fences create synchronization between themselves and atomic operations or fences in other |
3999 | /// threads. To achieve this, a fence prevents the compiler and CPU from reordering certain types of |
4000 | /// memory operations around it. |
4001 | /// |
4002 | /// A fence 'A' which has (at least) [`Release`] ordering semantics, synchronizes |
4003 | /// with a fence 'B' with (at least) [`Acquire`] semantics, if and only if there |
4004 | /// exist operations X and Y, both operating on some atomic object 'm' such |
4005 | /// that A is sequenced before X, Y is sequenced before B and Y observes |
4006 | /// the change to m. This provides a happens-before dependence between A and B. |
4007 | /// |
4008 | /// ```text |
4009 | /// Thread 1 Thread 2 |
4010 | /// |
4011 | /// fence(Release); A -------------- |
4012 | /// m.store(3, Relaxed); X --------- | |
4013 | /// | | |
4014 | /// | | |
4015 | /// -------------> Y if m.load(Relaxed) == 3 { |
4016 | /// |-------> B fence(Acquire); |
4017 | /// ... |
4018 | /// } |
4019 | /// ``` |
4020 | /// |
4021 | /// Note that in the example above, it is crucial that the accesses to `m` are atomic. Fences cannot |
4022 | /// be used to establish synchronization among non-atomic accesses in different threads. However, |
4023 | /// thanks to the happens-before relationship between A and B, any non-atomic accesses that |
4024 | /// happen-before A are now also properly synchronized with any non-atomic accesses that |
4025 | /// happen-after B. |
4026 | /// |
4027 | /// Atomic operations with [`Release`] or [`Acquire`] semantics can also synchronize |
4028 | /// with a fence. |
4029 | /// |
4030 | /// A fence which has [`SeqCst`] ordering, in addition to having both [`Acquire`] |
4031 | /// and [`Release`] semantics, participates in the global program order of the |
4032 | /// other [`SeqCst`] operations and/or fences. |
4033 | /// |
4034 | /// Accepts [`Acquire`], [`Release`], [`AcqRel`] and [`SeqCst`] orderings. |
4035 | /// |
4036 | /// # Panics |
4037 | /// |
4038 | /// Panics if `order` is [`Relaxed`]. |
4039 | /// |
4040 | /// # Examples |
4041 | /// |
4042 | /// ``` |
4043 | /// use std::sync::atomic::AtomicBool; |
4044 | /// use std::sync::atomic::fence; |
4045 | /// use std::sync::atomic::Ordering; |
4046 | /// |
4047 | /// // A mutual exclusion primitive based on spinlock. |
4048 | /// pub struct Mutex { |
4049 | /// flag: AtomicBool, |
4050 | /// } |
4051 | /// |
4052 | /// impl Mutex { |
4053 | /// pub fn new() -> Mutex { |
4054 | /// Mutex { |
4055 | /// flag: AtomicBool::new(false), |
4056 | /// } |
4057 | /// } |
4058 | /// |
4059 | /// pub fn lock(&self) { |
4060 | /// // Wait until the old value is `false`. |
4061 | /// while self |
4062 | /// .flag |
4063 | /// .compare_exchange_weak(false, true, Ordering::Relaxed, Ordering::Relaxed) |
4064 | /// .is_err() |
4065 | /// {} |
4066 | /// // This fence synchronizes-with store in `unlock`. |
4067 | /// fence(Ordering::Acquire); |
4068 | /// } |
4069 | /// |
4070 | /// pub fn unlock(&self) { |
4071 | /// self.flag.store(false, Ordering::Release); |
4072 | /// } |
4073 | /// } |
4074 | /// ``` |
4075 | #[inline ] |
4076 | #[stable (feature = "rust1" , since = "1.0.0" )] |
4077 | #[rustc_diagnostic_item = "fence" ] |
4078 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
4079 | pub fn fence(order: Ordering) { |
4080 | // SAFETY: using an atomic fence is safe. |
4081 | unsafe { |
4082 | match order { |
4083 | Acquire => intrinsics::atomic_fence_acquire(), |
4084 | Release => intrinsics::atomic_fence_release(), |
4085 | AcqRel => intrinsics::atomic_fence_acqrel(), |
4086 | SeqCst => intrinsics::atomic_fence_seqcst(), |
4087 | Relaxed => panic!("there is no such thing as a relaxed fence" ), |
4088 | } |
4089 | } |
4090 | } |
4091 | |
4092 | /// A "compiler-only" atomic fence. |
4093 | /// |
4094 | /// Like [`fence`], this function establishes synchronization with other atomic operations and |
4095 | /// fences. However, unlike [`fence`], `compiler_fence` only establishes synchronization with |
4096 | /// operations *in the same thread*. This may at first sound rather useless, since code within a |
4097 | /// thread is typically already totally ordered and does not need any further synchronization. |
4098 | /// However, there are cases where code can run on the same thread without being ordered: |
4099 | /// - The most common case is that of a *signal handler*: a signal handler runs in the same thread |
4100 | /// as the code it interrupted, but it is not ordered with respect to that code. `compiler_fence` |
4101 | /// can be used to establish synchronization between a thread and its signal handler, the same way |
4102 | /// that `fence` can be used to establish synchronization across threads. |
4103 | /// - Similar situations can arise in embedded programming with interrupt handlers, or in custom |
4104 | /// implementations of preemptive green threads. In general, `compiler_fence` can establish |
4105 | /// synchronization with code that is guaranteed to run on the same hardware CPU. |
4106 | /// |
4107 | /// See [`fence`] for how a fence can be used to achieve synchronization. Note that just like |
4108 | /// [`fence`], synchronization still requires atomic operations to be used in both threads -- it is |
4109 | /// not possible to perform synchronization entirely with fences and non-atomic operations. |
4110 | /// |
4111 | /// `compiler_fence` does not emit any machine code, but restricts the kinds of memory re-ordering |
4112 | /// the compiler is allowed to do. `compiler_fence` corresponds to [`atomic_signal_fence`] in C and |
4113 | /// C++. |
4114 | /// |
4115 | /// [`atomic_signal_fence`]: https://en.cppreference.com/w/cpp/atomic/atomic_signal_fence |
4116 | /// |
4117 | /// # Panics |
4118 | /// |
4119 | /// Panics if `order` is [`Relaxed`]. |
4120 | /// |
4121 | /// # Examples |
4122 | /// |
4123 | /// Without the two `compiler_fence` calls, the read of `IMPORTANT_VARIABLE` in `signal_handler` |
4124 | /// is *undefined behavior* due to a data race, despite everything happening in a single thread. |
4125 | /// This is because the signal handler is considered to run concurrently with its associated |
4126 | /// thread, and explicit synchronization is required to pass data between a thread and its |
4127 | /// signal handler. The code below uses two `compiler_fence` calls to establish the usual |
4128 | /// release-acquire synchronization pattern (see [`fence`] for an image). |
4129 | /// |
4130 | /// ``` |
4131 | /// use std::sync::atomic::AtomicBool; |
4132 | /// use std::sync::atomic::Ordering; |
4133 | /// use std::sync::atomic::compiler_fence; |
4134 | /// |
4135 | /// static mut IMPORTANT_VARIABLE: usize = 0; |
4136 | /// static IS_READY: AtomicBool = AtomicBool::new(false); |
4137 | /// |
4138 | /// fn main() { |
4139 | /// unsafe { IMPORTANT_VARIABLE = 42 }; |
4140 | /// // Marks earlier writes as being released with future relaxed stores. |
4141 | /// compiler_fence(Ordering::Release); |
4142 | /// IS_READY.store(true, Ordering::Relaxed); |
4143 | /// } |
4144 | /// |
4145 | /// fn signal_handler() { |
4146 | /// if IS_READY.load(Ordering::Relaxed) { |
4147 | /// // Acquires writes that were released with relaxed stores that we read from. |
4148 | /// compiler_fence(Ordering::Acquire); |
4149 | /// assert_eq!(unsafe { IMPORTANT_VARIABLE }, 42); |
4150 | /// } |
4151 | /// } |
4152 | /// ``` |
4153 | #[inline ] |
4154 | #[stable (feature = "compiler_fences" , since = "1.21.0" )] |
4155 | #[rustc_diagnostic_item = "compiler_fence" ] |
4156 | #[cfg_attr (miri, track_caller)] // even without panics, this helps for Miri backtraces |
4157 | pub fn compiler_fence(order: Ordering) { |
4158 | // SAFETY: using an atomic fence is safe. |
4159 | unsafe { |
4160 | match order { |
4161 | Acquire => intrinsics::atomic_singlethreadfence_acquire(), |
4162 | Release => intrinsics::atomic_singlethreadfence_release(), |
4163 | AcqRel => intrinsics::atomic_singlethreadfence_acqrel(), |
4164 | SeqCst => intrinsics::atomic_singlethreadfence_seqcst(), |
4165 | Relaxed => panic!("there is no such thing as a relaxed compiler fence" ), |
4166 | } |
4167 | } |
4168 | } |
4169 | |
4170 | #[cfg (target_has_atomic_load_store = "8" )] |
4171 | #[stable (feature = "atomic_debug" , since = "1.3.0" )] |
4172 | impl fmt::Debug for AtomicBool { |
4173 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
4174 | fmt::Debug::fmt(&self.load(order:Ordering::Relaxed), f) |
4175 | } |
4176 | } |
4177 | |
4178 | #[cfg (target_has_atomic_load_store = "ptr" )] |
4179 | #[stable (feature = "atomic_debug" , since = "1.3.0" )] |
4180 | impl<T> fmt::Debug for AtomicPtr<T> { |
4181 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
4182 | fmt::Debug::fmt(&self.load(order:Ordering::Relaxed), f) |
4183 | } |
4184 | } |
4185 | |
4186 | #[cfg (target_has_atomic_load_store = "ptr" )] |
4187 | #[stable (feature = "atomic_pointer" , since = "1.24.0" )] |
4188 | impl<T> fmt::Pointer for AtomicPtr<T> { |
4189 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
4190 | fmt::Pointer::fmt(&self.load(order:Ordering::Relaxed), f) |
4191 | } |
4192 | } |
4193 | |
4194 | /// Signals the processor that it is inside a busy-wait spin-loop ("spin lock"). |
4195 | /// |
4196 | /// This function is deprecated in favor of [`hint::spin_loop`]. |
4197 | /// |
4198 | /// [`hint::spin_loop`]: crate::hint::spin_loop |
4199 | #[inline ] |
4200 | #[stable (feature = "spin_loop_hint" , since = "1.24.0" )] |
4201 | #[deprecated (since = "1.51.0" , note = "use hint::spin_loop instead" )] |
4202 | pub fn spin_loop_hint() { |
4203 | spin_loop() |
4204 | } |
4205 | |