1// Necessary for implementing atomic methods for `AtomicUnit`
2#![allow(clippy::unit_arg)]
3
4use crate::primitive::sync::atomic::{self, Ordering};
5use core::cell::UnsafeCell;
6use core::cmp;
7use core::fmt;
8use core::mem::{self, ManuallyDrop, MaybeUninit};
9use core::panic::{RefUnwindSafe, UnwindSafe};
10use core::ptr;
11
12use super::seq_lock::SeqLock;
13
14/// A thread-safe mutable memory location.
15///
16/// This type is equivalent to [`Cell`], except it can also be shared among multiple threads.
17///
18/// Operations on `AtomicCell`s use atomic instructions whenever possible, and synchronize using
19/// global locks otherwise. You can call [`AtomicCell::<T>::is_lock_free()`] to check whether
20/// atomic instructions or locks will be used.
21///
22/// Atomic loads use the [`Acquire`] ordering and atomic stores use the [`Release`] ordering.
23///
24/// [`Cell`]: std::cell::Cell
25/// [`AtomicCell::<T>::is_lock_free()`]: AtomicCell::is_lock_free
26/// [`Acquire`]: std::sync::atomic::Ordering::Acquire
27/// [`Release`]: std::sync::atomic::Ordering::Release
28#[repr(transparent)]
29pub struct AtomicCell<T> {
30 /// The inner value.
31 ///
32 /// If this value can be transmuted into a primitive atomic type, it will be treated as such.
33 /// Otherwise, all potentially concurrent operations on this data will be protected by a global
34 /// lock.
35 ///
36 /// Using MaybeUninit to prevent code outside the cell from observing partially initialized state:
37 /// <https://github.com/crossbeam-rs/crossbeam/issues/833>
38 ///
39 /// Note:
40 /// - we'll never store uninitialized `T` due to our API only using initialized `T`.
41 /// - this `MaybeUninit` does *not* fix <https://github.com/crossbeam-rs/crossbeam/issues/315>.
42 value: UnsafeCell<MaybeUninit<T>>,
43}
44
45unsafe impl<T: Send> Send for AtomicCell<T> {}
46unsafe impl<T: Send> Sync for AtomicCell<T> {}
47
48impl<T> UnwindSafe for AtomicCell<T> {}
49impl<T> RefUnwindSafe for AtomicCell<T> {}
50
51impl<T> AtomicCell<T> {
52 /// Creates a new atomic cell initialized with `val`.
53 ///
54 /// # Examples
55 ///
56 /// ```
57 /// use crossbeam_utils::atomic::AtomicCell;
58 ///
59 /// let a = AtomicCell::new(7);
60 /// ```
61 pub const fn new(val: T) -> AtomicCell<T> {
62 AtomicCell {
63 value: UnsafeCell::new(MaybeUninit::new(val)),
64 }
65 }
66
67 /// Consumes the atomic and returns the contained value.
68 ///
69 /// This is safe because passing `self` by value guarantees that no other threads are
70 /// concurrently accessing the atomic data.
71 ///
72 /// # Examples
73 ///
74 /// ```
75 /// use crossbeam_utils::atomic::AtomicCell;
76 ///
77 /// let a = AtomicCell::new(7);
78 /// let v = a.into_inner();
79 ///
80 /// assert_eq!(v, 7);
81 /// ```
82 pub fn into_inner(self) -> T {
83 let this = ManuallyDrop::new(self);
84 // SAFETY:
85 // - passing `self` by value guarantees that no other threads are concurrently
86 // accessing the atomic data
87 // - the raw pointer passed in is valid because we got it from an owned value.
88 // - `ManuallyDrop` prevents double dropping `T`
89 unsafe { this.as_ptr().read() }
90 }
91
92 /// Returns `true` if operations on values of this type are lock-free.
93 ///
94 /// If the compiler or the platform doesn't support the necessary atomic instructions,
95 /// `AtomicCell<T>` will use global locks for every potentially concurrent atomic operation.
96 ///
97 /// # Examples
98 ///
99 /// ```
100 /// use crossbeam_utils::atomic::AtomicCell;
101 ///
102 /// // This type is internally represented as `AtomicUsize` so we can just use atomic
103 /// // operations provided by it.
104 /// assert_eq!(AtomicCell::<usize>::is_lock_free(), true);
105 ///
106 /// // A wrapper struct around `isize`.
107 /// struct Foo {
108 /// bar: isize,
109 /// }
110 /// // `AtomicCell<Foo>` will be internally represented as `AtomicIsize`.
111 /// assert_eq!(AtomicCell::<Foo>::is_lock_free(), true);
112 ///
113 /// // Operations on zero-sized types are always lock-free.
114 /// assert_eq!(AtomicCell::<()>::is_lock_free(), true);
115 ///
116 /// // Very large types cannot be represented as any of the standard atomic types, so atomic
117 /// // operations on them will have to use global locks for synchronization.
118 /// assert_eq!(AtomicCell::<[u8; 1000]>::is_lock_free(), false);
119 /// ```
120 pub const fn is_lock_free() -> bool {
121 atomic_is_lock_free::<T>()
122 }
123
124 /// Stores `val` into the atomic cell.
125 ///
126 /// # Examples
127 ///
128 /// ```
129 /// use crossbeam_utils::atomic::AtomicCell;
130 ///
131 /// let a = AtomicCell::new(7);
132 ///
133 /// assert_eq!(a.load(), 7);
134 /// a.store(8);
135 /// assert_eq!(a.load(), 8);
136 /// ```
137 pub fn store(&self, val: T) {
138 if mem::needs_drop::<T>() {
139 drop(self.swap(val));
140 } else {
141 unsafe {
142 atomic_store(self.as_ptr(), val);
143 }
144 }
145 }
146
147 /// Stores `val` into the atomic cell and returns the previous value.
148 ///
149 /// # Examples
150 ///
151 /// ```
152 /// use crossbeam_utils::atomic::AtomicCell;
153 ///
154 /// let a = AtomicCell::new(7);
155 ///
156 /// assert_eq!(a.load(), 7);
157 /// assert_eq!(a.swap(8), 7);
158 /// assert_eq!(a.load(), 8);
159 /// ```
160 pub fn swap(&self, val: T) -> T {
161 unsafe { atomic_swap(self.as_ptr(), val) }
162 }
163
164 /// Returns a raw pointer to the underlying data in this atomic cell.
165 ///
166 /// # Examples
167 ///
168 /// ```
169 /// use crossbeam_utils::atomic::AtomicCell;
170 ///
171 /// let a = AtomicCell::new(5);
172 ///
173 /// let ptr = a.as_ptr();
174 /// ```
175 #[inline]
176 pub fn as_ptr(&self) -> *mut T {
177 self.value.get().cast::<T>()
178 }
179}
180
181impl<T: Default> AtomicCell<T> {
182 /// Takes the value of the atomic cell, leaving `Default::default()` in its place.
183 ///
184 /// # Examples
185 ///
186 /// ```
187 /// use crossbeam_utils::atomic::AtomicCell;
188 ///
189 /// let a = AtomicCell::new(5);
190 /// let five = a.take();
191 ///
192 /// assert_eq!(five, 5);
193 /// assert_eq!(a.into_inner(), 0);
194 /// ```
195 pub fn take(&self) -> T {
196 self.swap(Default::default())
197 }
198}
199
200impl<T: Copy> AtomicCell<T> {
201 /// Loads a value from the atomic cell.
202 ///
203 /// # Examples
204 ///
205 /// ```
206 /// use crossbeam_utils::atomic::AtomicCell;
207 ///
208 /// let a = AtomicCell::new(7);
209 ///
210 /// assert_eq!(a.load(), 7);
211 /// ```
212 pub fn load(&self) -> T {
213 unsafe { atomic_load(self.as_ptr()) }
214 }
215}
216
217impl<T: Copy + Eq> AtomicCell<T> {
218 /// If the current value equals `current`, stores `new` into the atomic cell.
219 ///
220 /// The return value is always the previous value. If it is equal to `current`, then the value
221 /// was updated.
222 ///
223 /// # Examples
224 ///
225 /// ```
226 /// # #![allow(deprecated)]
227 /// use crossbeam_utils::atomic::AtomicCell;
228 ///
229 /// let a = AtomicCell::new(1);
230 ///
231 /// assert_eq!(a.compare_and_swap(2, 3), 1);
232 /// assert_eq!(a.load(), 1);
233 ///
234 /// assert_eq!(a.compare_and_swap(1, 2), 1);
235 /// assert_eq!(a.load(), 2);
236 /// ```
237 // TODO: remove in the next major version.
238 #[deprecated(note = "Use `compare_exchange` instead")]
239 pub fn compare_and_swap(&self, current: T, new: T) -> T {
240 match self.compare_exchange(current, new) {
241 Ok(v) => v,
242 Err(v) => v,
243 }
244 }
245
246 /// If the current value equals `current`, stores `new` into the atomic cell.
247 ///
248 /// The return value is a result indicating whether the new value was written and containing
249 /// the previous value. On success this value is guaranteed to be equal to `current`.
250 ///
251 /// # Examples
252 ///
253 /// ```
254 /// use crossbeam_utils::atomic::AtomicCell;
255 ///
256 /// let a = AtomicCell::new(1);
257 ///
258 /// assert_eq!(a.compare_exchange(2, 3), Err(1));
259 /// assert_eq!(a.load(), 1);
260 ///
261 /// assert_eq!(a.compare_exchange(1, 2), Ok(1));
262 /// assert_eq!(a.load(), 2);
263 /// ```
264 pub fn compare_exchange(&self, current: T, new: T) -> Result<T, T> {
265 unsafe { atomic_compare_exchange_weak(self.as_ptr(), current, new) }
266 }
267
268 /// Fetches the value, and applies a function to it that returns an optional
269 /// new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else
270 /// `Err(previous_value)`.
271 ///
272 /// Note: This may call the function multiple times if the value has been changed from other threads in
273 /// the meantime, as long as the function returns `Some(_)`, but the function will have been applied
274 /// only once to the stored value.
275 ///
276 /// # Examples
277 ///
278 /// ```rust
279 /// use crossbeam_utils::atomic::AtomicCell;
280 ///
281 /// let a = AtomicCell::new(7);
282 /// assert_eq!(a.fetch_update(|_| None), Err(7));
283 /// assert_eq!(a.fetch_update(|a| Some(a + 1)), Ok(7));
284 /// assert_eq!(a.fetch_update(|a| Some(a + 1)), Ok(8));
285 /// assert_eq!(a.load(), 9);
286 /// ```
287 #[inline]
288 pub fn fetch_update<F>(&self, mut f: F) -> Result<T, T>
289 where
290 F: FnMut(T) -> Option<T>,
291 {
292 let mut prev = self.load();
293 while let Some(next) = f(prev) {
294 match self.compare_exchange(prev, next) {
295 x @ Ok(_) => return x,
296 Err(next_prev) => prev = next_prev,
297 }
298 }
299 Err(prev)
300 }
301}
302
303// `MaybeUninit` prevents `T` from being dropped, so we need to implement `Drop`
304// for `AtomicCell` to avoid leaks of non-`Copy` types.
305impl<T> Drop for AtomicCell<T> {
306 fn drop(&mut self) {
307 if mem::needs_drop::<T>() {
308 // SAFETY:
309 // - the mutable reference guarantees that no other threads are concurrently accessing the atomic data
310 // - the raw pointer passed in is valid because we got it from a reference
311 // - `MaybeUninit` prevents double dropping `T`
312 unsafe {
313 self.as_ptr().drop_in_place();
314 }
315 }
316 }
317}
318
319macro_rules! atomic {
320 // If values of type `$t` can be transmuted into values of the primitive atomic type `$atomic`,
321 // declares variable `$a` of type `$atomic` and executes `$atomic_op`, breaking out of the loop.
322 (@check, $t:ty, $atomic:ty, $a:ident, $atomic_op:expr) => {
323 if can_transmute::<$t, $atomic>() {
324 let $a: &$atomic;
325 break $atomic_op;
326 }
327 };
328
329 // If values of type `$t` can be transmuted into values of a primitive atomic type, declares
330 // variable `$a` of that type and executes `$atomic_op`. Otherwise, just executes
331 // `$fallback_op`.
332 ($t:ty, $a:ident, $atomic_op:expr, $fallback_op:expr) => {
333 loop {
334 atomic!(@check, $t, AtomicUnit, $a, $atomic_op);
335
336 atomic!(@check, $t, atomic::AtomicU8, $a, $atomic_op);
337 atomic!(@check, $t, atomic::AtomicU16, $a, $atomic_op);
338 atomic!(@check, $t, atomic::AtomicU32, $a, $atomic_op);
339 #[cfg(target_has_atomic = "64")]
340 atomic!(@check, $t, atomic::AtomicU64, $a, $atomic_op);
341 // TODO: AtomicU128 is unstable
342 // atomic!(@check, $t, atomic::AtomicU128, $a, $atomic_op);
343
344 break $fallback_op;
345 }
346 };
347}
348
349macro_rules! impl_arithmetic {
350 ($t:ty, fallback, $example:tt) => {
351 impl AtomicCell<$t> {
352 /// Increments the current value by `val` and returns the previous value.
353 ///
354 /// The addition wraps on overflow.
355 ///
356 /// # Examples
357 ///
358 /// ```
359 /// use crossbeam_utils::atomic::AtomicCell;
360 ///
361 #[doc = $example]
362 ///
363 /// assert_eq!(a.fetch_add(3), 7);
364 /// assert_eq!(a.load(), 10);
365 /// ```
366 #[inline]
367 pub fn fetch_add(&self, val: $t) -> $t {
368 let _guard = lock(self.as_ptr() as usize).write();
369 let value = unsafe { &mut *(self.as_ptr()) };
370 let old = *value;
371 *value = value.wrapping_add(val);
372 old
373 }
374
375 /// Decrements the current value by `val` and returns the previous value.
376 ///
377 /// The subtraction wraps on overflow.
378 ///
379 /// # Examples
380 ///
381 /// ```
382 /// use crossbeam_utils::atomic::AtomicCell;
383 ///
384 #[doc = $example]
385 ///
386 /// assert_eq!(a.fetch_sub(3), 7);
387 /// assert_eq!(a.load(), 4);
388 /// ```
389 #[inline]
390 pub fn fetch_sub(&self, val: $t) -> $t {
391 let _guard = lock(self.as_ptr() as usize).write();
392 let value = unsafe { &mut *(self.as_ptr()) };
393 let old = *value;
394 *value = value.wrapping_sub(val);
395 old
396 }
397
398 /// Applies bitwise "and" to the current value and returns the previous value.
399 ///
400 /// # Examples
401 ///
402 /// ```
403 /// use crossbeam_utils::atomic::AtomicCell;
404 ///
405 #[doc = $example]
406 ///
407 /// assert_eq!(a.fetch_and(3), 7);
408 /// assert_eq!(a.load(), 3);
409 /// ```
410 #[inline]
411 pub fn fetch_and(&self, val: $t) -> $t {
412 let _guard = lock(self.as_ptr() as usize).write();
413 let value = unsafe { &mut *(self.as_ptr()) };
414 let old = *value;
415 *value &= val;
416 old
417 }
418
419 /// Applies bitwise "nand" to the current value and returns the previous value.
420 ///
421 /// # Examples
422 ///
423 /// ```
424 /// use crossbeam_utils::atomic::AtomicCell;
425 ///
426 #[doc = $example]
427 ///
428 /// assert_eq!(a.fetch_nand(3), 7);
429 /// assert_eq!(a.load(), !(7 & 3));
430 /// ```
431 #[inline]
432 pub fn fetch_nand(&self, val: $t) -> $t {
433 let _guard = lock(self.as_ptr() as usize).write();
434 let value = unsafe { &mut *(self.as_ptr()) };
435 let old = *value;
436 *value = !(old & val);
437 old
438 }
439
440 /// Applies bitwise "or" to the current value and returns the previous value.
441 ///
442 /// # Examples
443 ///
444 /// ```
445 /// use crossbeam_utils::atomic::AtomicCell;
446 ///
447 #[doc = $example]
448 ///
449 /// assert_eq!(a.fetch_or(16), 7);
450 /// assert_eq!(a.load(), 23);
451 /// ```
452 #[inline]
453 pub fn fetch_or(&self, val: $t) -> $t {
454 let _guard = lock(self.as_ptr() as usize).write();
455 let value = unsafe { &mut *(self.as_ptr()) };
456 let old = *value;
457 *value |= val;
458 old
459 }
460
461 /// Applies bitwise "xor" to the current value and returns the previous value.
462 ///
463 /// # Examples
464 ///
465 /// ```
466 /// use crossbeam_utils::atomic::AtomicCell;
467 ///
468 #[doc = $example]
469 ///
470 /// assert_eq!(a.fetch_xor(2), 7);
471 /// assert_eq!(a.load(), 5);
472 /// ```
473 #[inline]
474 pub fn fetch_xor(&self, val: $t) -> $t {
475 let _guard = lock(self.as_ptr() as usize).write();
476 let value = unsafe { &mut *(self.as_ptr()) };
477 let old = *value;
478 *value ^= val;
479 old
480 }
481
482 /// Compares and sets the maximum of the current value and `val`,
483 /// and returns the previous value.
484 ///
485 /// # Examples
486 ///
487 /// ```
488 /// use crossbeam_utils::atomic::AtomicCell;
489 ///
490 #[doc = $example]
491 ///
492 /// assert_eq!(a.fetch_max(2), 7);
493 /// assert_eq!(a.load(), 7);
494 /// ```
495 #[inline]
496 pub fn fetch_max(&self, val: $t) -> $t {
497 let _guard = lock(self.as_ptr() as usize).write();
498 let value = unsafe { &mut *(self.as_ptr()) };
499 let old = *value;
500 *value = cmp::max(old, val);
501 old
502 }
503
504 /// Compares and sets the minimum of the current value and `val`,
505 /// and returns the previous value.
506 ///
507 /// # Examples
508 ///
509 /// ```
510 /// use crossbeam_utils::atomic::AtomicCell;
511 ///
512 #[doc = $example]
513 ///
514 /// assert_eq!(a.fetch_min(2), 7);
515 /// assert_eq!(a.load(), 2);
516 /// ```
517 #[inline]
518 pub fn fetch_min(&self, val: $t) -> $t {
519 let _guard = lock(self.as_ptr() as usize).write();
520 let value = unsafe { &mut *(self.as_ptr()) };
521 let old = *value;
522 *value = cmp::min(old, val);
523 old
524 }
525 }
526 };
527 ($t:ty, $atomic:ident, $example:tt) => {
528 impl AtomicCell<$t> {
529 /// Increments the current value by `val` and returns the previous value.
530 ///
531 /// The addition wraps on overflow.
532 ///
533 /// # Examples
534 ///
535 /// ```
536 /// use crossbeam_utils::atomic::AtomicCell;
537 ///
538 #[doc = $example]
539 ///
540 /// assert_eq!(a.fetch_add(3), 7);
541 /// assert_eq!(a.load(), 10);
542 /// ```
543 #[inline]
544 pub fn fetch_add(&self, val: $t) -> $t {
545 atomic! {
546 $t, _a,
547 {
548 let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) };
549 a.fetch_add(val, Ordering::AcqRel)
550 },
551 {
552 let _guard = lock(self.as_ptr() as usize).write();
553 let value = unsafe { &mut *(self.as_ptr()) };
554 let old = *value;
555 *value = value.wrapping_add(val);
556 old
557 }
558 }
559 }
560
561 /// Decrements the current value by `val` and returns the previous value.
562 ///
563 /// The subtraction wraps on overflow.
564 ///
565 /// # Examples
566 ///
567 /// ```
568 /// use crossbeam_utils::atomic::AtomicCell;
569 ///
570 #[doc = $example]
571 ///
572 /// assert_eq!(a.fetch_sub(3), 7);
573 /// assert_eq!(a.load(), 4);
574 /// ```
575 #[inline]
576 pub fn fetch_sub(&self, val: $t) -> $t {
577 atomic! {
578 $t, _a,
579 {
580 let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) };
581 a.fetch_sub(val, Ordering::AcqRel)
582 },
583 {
584 let _guard = lock(self.as_ptr() as usize).write();
585 let value = unsafe { &mut *(self.as_ptr()) };
586 let old = *value;
587 *value = value.wrapping_sub(val);
588 old
589 }
590 }
591 }
592
593 /// Applies bitwise "and" to the current value and returns the previous value.
594 ///
595 /// # Examples
596 ///
597 /// ```
598 /// use crossbeam_utils::atomic::AtomicCell;
599 ///
600 #[doc = $example]
601 ///
602 /// assert_eq!(a.fetch_and(3), 7);
603 /// assert_eq!(a.load(), 3);
604 /// ```
605 #[inline]
606 pub fn fetch_and(&self, val: $t) -> $t {
607 atomic! {
608 $t, _a,
609 {
610 let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) };
611 a.fetch_and(val, Ordering::AcqRel)
612 },
613 {
614 let _guard = lock(self.as_ptr() as usize).write();
615 let value = unsafe { &mut *(self.as_ptr()) };
616 let old = *value;
617 *value &= val;
618 old
619 }
620 }
621 }
622
623 /// Applies bitwise "nand" to the current value and returns the previous value.
624 ///
625 /// # Examples
626 ///
627 /// ```
628 /// use crossbeam_utils::atomic::AtomicCell;
629 ///
630 #[doc = $example]
631 ///
632 /// assert_eq!(a.fetch_nand(3), 7);
633 /// assert_eq!(a.load(), !(7 & 3));
634 /// ```
635 #[inline]
636 pub fn fetch_nand(&self, val: $t) -> $t {
637 atomic! {
638 $t, _a,
639 {
640 let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) };
641 a.fetch_nand(val, Ordering::AcqRel)
642 },
643 {
644 let _guard = lock(self.as_ptr() as usize).write();
645 let value = unsafe { &mut *(self.as_ptr()) };
646 let old = *value;
647 *value = !(old & val);
648 old
649 }
650 }
651 }
652
653 /// Applies bitwise "or" to the current value and returns the previous value.
654 ///
655 /// # Examples
656 ///
657 /// ```
658 /// use crossbeam_utils::atomic::AtomicCell;
659 ///
660 #[doc = $example]
661 ///
662 /// assert_eq!(a.fetch_or(16), 7);
663 /// assert_eq!(a.load(), 23);
664 /// ```
665 #[inline]
666 pub fn fetch_or(&self, val: $t) -> $t {
667 atomic! {
668 $t, _a,
669 {
670 let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) };
671 a.fetch_or(val, Ordering::AcqRel)
672 },
673 {
674 let _guard = lock(self.as_ptr() as usize).write();
675 let value = unsafe { &mut *(self.as_ptr()) };
676 let old = *value;
677 *value |= val;
678 old
679 }
680 }
681 }
682
683 /// Applies bitwise "xor" to the current value and returns the previous value.
684 ///
685 /// # Examples
686 ///
687 /// ```
688 /// use crossbeam_utils::atomic::AtomicCell;
689 ///
690 #[doc = $example]
691 ///
692 /// assert_eq!(a.fetch_xor(2), 7);
693 /// assert_eq!(a.load(), 5);
694 /// ```
695 #[inline]
696 pub fn fetch_xor(&self, val: $t) -> $t {
697 atomic! {
698 $t, _a,
699 {
700 let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) };
701 a.fetch_xor(val, Ordering::AcqRel)
702 },
703 {
704 let _guard = lock(self.as_ptr() as usize).write();
705 let value = unsafe { &mut *(self.as_ptr()) };
706 let old = *value;
707 *value ^= val;
708 old
709 }
710 }
711 }
712
713 /// Compares and sets the maximum of the current value and `val`,
714 /// and returns the previous value.
715 ///
716 /// # Examples
717 ///
718 /// ```
719 /// use crossbeam_utils::atomic::AtomicCell;
720 ///
721 #[doc = $example]
722 ///
723 /// assert_eq!(a.fetch_max(9), 7);
724 /// assert_eq!(a.load(), 9);
725 /// ```
726 #[inline]
727 pub fn fetch_max(&self, val: $t) -> $t {
728 atomic! {
729 $t, _a,
730 {
731 let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) };
732 a.fetch_max(val, Ordering::AcqRel)
733 },
734 {
735 let _guard = lock(self.as_ptr() as usize).write();
736 let value = unsafe { &mut *(self.as_ptr()) };
737 let old = *value;
738 *value = cmp::max(old, val);
739 old
740 }
741 }
742 }
743
744 /// Compares and sets the minimum of the current value and `val`,
745 /// and returns the previous value.
746 ///
747 /// # Examples
748 ///
749 /// ```
750 /// use crossbeam_utils::atomic::AtomicCell;
751 ///
752 #[doc = $example]
753 ///
754 /// assert_eq!(a.fetch_min(2), 7);
755 /// assert_eq!(a.load(), 2);
756 /// ```
757 #[inline]
758 pub fn fetch_min(&self, val: $t) -> $t {
759 atomic! {
760 $t, _a,
761 {
762 let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) };
763 a.fetch_min(val, Ordering::AcqRel)
764 },
765 {
766 let _guard = lock(self.as_ptr() as usize).write();
767 let value = unsafe { &mut *(self.as_ptr()) };
768 let old = *value;
769 *value = cmp::min(old, val);
770 old
771 }
772 }
773 }
774 }
775 };
776}
777
778impl_arithmetic!(u8, AtomicU8, "let a = AtomicCell::new(7u8);");
779impl_arithmetic!(i8, AtomicI8, "let a = AtomicCell::new(7i8);");
780impl_arithmetic!(u16, AtomicU16, "let a = AtomicCell::new(7u16);");
781impl_arithmetic!(i16, AtomicI16, "let a = AtomicCell::new(7i16);");
782
783impl_arithmetic!(u32, AtomicU32, "let a = AtomicCell::new(7u32);");
784impl_arithmetic!(i32, AtomicI32, "let a = AtomicCell::new(7i32);");
785
786#[cfg(target_has_atomic = "64")]
787impl_arithmetic!(u64, AtomicU64, "let a = AtomicCell::new(7u64);");
788#[cfg(target_has_atomic = "64")]
789impl_arithmetic!(i64, AtomicI64, "let a = AtomicCell::new(7i64);");
790#[cfg(not(target_has_atomic = "64"))]
791impl_arithmetic!(u64, fallback, "let a = AtomicCell::new(7u64);");
792#[cfg(not(target_has_atomic = "64"))]
793impl_arithmetic!(i64, fallback, "let a = AtomicCell::new(7i64);");
794
795// TODO: AtomicU128 is unstable
796// impl_arithmetic!(u128, AtomicU128, "let a = AtomicCell::new(7u128);");
797// impl_arithmetic!(i128, AtomicI128, "let a = AtomicCell::new(7i128);");
798impl_arithmetic!(u128, fallback, "let a = AtomicCell::new(7u128);");
799impl_arithmetic!(i128, fallback, "let a = AtomicCell::new(7i128);");
800
801impl_arithmetic!(usize, AtomicUsize, "let a = AtomicCell::new(7usize);");
802impl_arithmetic!(isize, AtomicIsize, "let a = AtomicCell::new(7isize);");
803
804impl AtomicCell<bool> {
805 /// Applies logical "and" to the current value and returns the previous value.
806 ///
807 /// # Examples
808 ///
809 /// ```
810 /// use crossbeam_utils::atomic::AtomicCell;
811 ///
812 /// let a = AtomicCell::new(true);
813 ///
814 /// assert_eq!(a.fetch_and(true), true);
815 /// assert_eq!(a.load(), true);
816 ///
817 /// assert_eq!(a.fetch_and(false), true);
818 /// assert_eq!(a.load(), false);
819 /// ```
820 #[inline]
821 pub fn fetch_and(&self, val: bool) -> bool {
822 atomic! {
823 bool, _a,
824 {
825 let a = unsafe { &*(self.as_ptr() as *const atomic::AtomicBool) };
826 a.fetch_and(val, Ordering::AcqRel)
827 },
828 {
829 let _guard = lock(self.as_ptr() as usize).write();
830 let value = unsafe { &mut *(self.as_ptr()) };
831 let old = *value;
832 *value &= val;
833 old
834 }
835 }
836 }
837
838 /// Applies logical "nand" to the current value and returns the previous value.
839 ///
840 /// # Examples
841 ///
842 /// ```
843 /// use crossbeam_utils::atomic::AtomicCell;
844 ///
845 /// let a = AtomicCell::new(true);
846 ///
847 /// assert_eq!(a.fetch_nand(false), true);
848 /// assert_eq!(a.load(), true);
849 ///
850 /// assert_eq!(a.fetch_nand(true), true);
851 /// assert_eq!(a.load(), false);
852 ///
853 /// assert_eq!(a.fetch_nand(false), false);
854 /// assert_eq!(a.load(), true);
855 /// ```
856 #[inline]
857 pub fn fetch_nand(&self, val: bool) -> bool {
858 atomic! {
859 bool, _a,
860 {
861 let a = unsafe { &*(self.as_ptr() as *const atomic::AtomicBool) };
862 a.fetch_nand(val, Ordering::AcqRel)
863 },
864 {
865 let _guard = lock(self.as_ptr() as usize).write();
866 let value = unsafe { &mut *(self.as_ptr()) };
867 let old = *value;
868 *value = !(old & val);
869 old
870 }
871 }
872 }
873
874 /// Applies logical "or" to the current value and returns the previous value.
875 ///
876 /// # Examples
877 ///
878 /// ```
879 /// use crossbeam_utils::atomic::AtomicCell;
880 ///
881 /// let a = AtomicCell::new(false);
882 ///
883 /// assert_eq!(a.fetch_or(false), false);
884 /// assert_eq!(a.load(), false);
885 ///
886 /// assert_eq!(a.fetch_or(true), false);
887 /// assert_eq!(a.load(), true);
888 /// ```
889 #[inline]
890 pub fn fetch_or(&self, val: bool) -> bool {
891 atomic! {
892 bool, _a,
893 {
894 let a = unsafe { &*(self.as_ptr() as *const atomic::AtomicBool) };
895 a.fetch_or(val, Ordering::AcqRel)
896 },
897 {
898 let _guard = lock(self.as_ptr() as usize).write();
899 let value = unsafe { &mut *(self.as_ptr()) };
900 let old = *value;
901 *value |= val;
902 old
903 }
904 }
905 }
906
907 /// Applies logical "xor" to the current value and returns the previous value.
908 ///
909 /// # Examples
910 ///
911 /// ```
912 /// use crossbeam_utils::atomic::AtomicCell;
913 ///
914 /// let a = AtomicCell::new(true);
915 ///
916 /// assert_eq!(a.fetch_xor(false), true);
917 /// assert_eq!(a.load(), true);
918 ///
919 /// assert_eq!(a.fetch_xor(true), true);
920 /// assert_eq!(a.load(), false);
921 /// ```
922 #[inline]
923 pub fn fetch_xor(&self, val: bool) -> bool {
924 atomic! {
925 bool, _a,
926 {
927 let a = unsafe { &*(self.as_ptr() as *const atomic::AtomicBool) };
928 a.fetch_xor(val, Ordering::AcqRel)
929 },
930 {
931 let _guard = lock(self.as_ptr() as usize).write();
932 let value = unsafe { &mut *(self.as_ptr()) };
933 let old = *value;
934 *value ^= val;
935 old
936 }
937 }
938 }
939}
940
941impl<T: Default> Default for AtomicCell<T> {
942 fn default() -> AtomicCell<T> {
943 AtomicCell::new(T::default())
944 }
945}
946
947impl<T> From<T> for AtomicCell<T> {
948 #[inline]
949 fn from(val: T) -> AtomicCell<T> {
950 AtomicCell::new(val)
951 }
952}
953
954impl<T: Copy + fmt::Debug> fmt::Debug for AtomicCell<T> {
955 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
956 f.debug_struct("AtomicCell")
957 .field("value", &self.load())
958 .finish()
959 }
960}
961
962/// Returns `true` if values of type `A` can be transmuted into values of type `B`.
963const fn can_transmute<A, B>() -> bool {
964 // Sizes must be equal, but alignment of `A` must be greater or equal than that of `B`.
965 (mem::size_of::<A>() == mem::size_of::<B>()) & (mem::align_of::<A>() >= mem::align_of::<B>())
966}
967
968/// Returns a reference to the global lock associated with the `AtomicCell` at address `addr`.
969///
970/// This function is used to protect atomic data which doesn't fit into any of the primitive atomic
971/// types in `std::sync::atomic`. Operations on such atomics must therefore use a global lock.
972///
973/// However, there is not only one global lock but an array of many locks, and one of them is
974/// picked based on the given address. Having many locks reduces contention and improves
975/// scalability.
976#[inline]
977#[must_use]
978fn lock(addr: usize) -> &'static SeqLock {
979 // The number of locks is a prime number because we want to make sure `addr % LEN` gets
980 // dispersed across all locks.
981 //
982 // Note that addresses are always aligned to some power of 2, depending on type `T` in
983 // `AtomicCell<T>`. If `LEN` was an even number, then `addr % LEN` would be an even number,
984 // too, which means only half of the locks would get utilized!
985 //
986 // It is also possible for addresses to accidentally get aligned to a number that is not a
987 // power of 2. Consider this example:
988 //
989 // ```
990 // #[repr(C)]
991 // struct Foo {
992 // a: AtomicCell<u8>,
993 // b: u8,
994 // c: u8,
995 // }
996 // ```
997 //
998 // Now, if we have a slice of type `&[Foo]`, it is possible that field `a` in all items gets
999 // stored at addresses that are multiples of 3. It'd be too bad if `LEN` was divisible by 3.
1000 // In order to protect from such cases, we simply choose a large prime number for `LEN`.
1001 const LEN: usize = 97;
1002 #[allow(clippy::declare_interior_mutable_const)]
1003 const L: SeqLock = SeqLock::new();
1004 static LOCKS: [SeqLock; LEN] = [L; LEN];
1005
1006 // If the modulus is a constant number, the compiler will use crazy math to transform this into
1007 // a sequence of cheap arithmetic operations rather than using the slow modulo instruction.
1008 &LOCKS[addr % LEN]
1009}
1010
1011/// An atomic `()`.
1012///
1013/// All operations are noops.
1014struct AtomicUnit;
1015
1016impl AtomicUnit {
1017 #[inline]
1018 fn load(&self, _order: Ordering) {}
1019
1020 #[inline]
1021 fn store(&self, _val: (), _order: Ordering) {}
1022
1023 #[inline]
1024 fn swap(&self, _val: (), _order: Ordering) {}
1025
1026 #[inline]
1027 fn compare_exchange_weak(
1028 &self,
1029 _current: (),
1030 _new: (),
1031 _success: Ordering,
1032 _failure: Ordering,
1033 ) -> Result<(), ()> {
1034 Ok(())
1035 }
1036}
1037
1038/// Returns `true` if operations on `AtomicCell<T>` are lock-free.
1039const fn atomic_is_lock_free<T>() -> bool {
1040 atomic! { T, _a, true, false }
1041}
1042
1043/// Atomically reads data from `src`.
1044///
1045/// This operation uses the `Acquire` ordering. If possible, an atomic instructions is used, and a
1046/// global lock otherwise.
1047unsafe fn atomic_load<T>(src: *mut T) -> T
1048where
1049 T: Copy,
1050{
1051 atomic! {
1052 T, a,
1053 {
1054 a = &*(src as *const _ as *const _);
1055 mem::transmute_copy(&a.load(Ordering::Acquire))
1056 },
1057 {
1058 let lock = lock(src as usize);
1059
1060 // Try doing an optimistic read first.
1061 if let Some(stamp) = lock.optimistic_read() {
1062 // We need a volatile read here because other threads might concurrently modify the
1063 // value. In theory, data races are *always* UB, even if we use volatile reads and
1064 // discard the data when a data race is detected. The proper solution would be to
1065 // do atomic reads and atomic writes, but we can't atomically read and write all
1066 // kinds of data since `AtomicU8` is not available on stable Rust yet.
1067 // Load as `MaybeUninit` because we may load a value that is not valid as `T`.
1068 let val = ptr::read_volatile(src.cast::<MaybeUninit<T>>());
1069
1070 if lock.validate_read(stamp) {
1071 return val.assume_init();
1072 }
1073 }
1074
1075 // Grab a regular write lock so that writers don't starve this load.
1076 let guard = lock.write();
1077 let val = ptr::read(src);
1078 // The value hasn't been changed. Drop the guard without incrementing the stamp.
1079 guard.abort();
1080 val
1081 }
1082 }
1083}
1084
1085/// Atomically writes `val` to `dst`.
1086///
1087/// This operation uses the `Release` ordering. If possible, an atomic instructions is used, and a
1088/// global lock otherwise.
1089unsafe fn atomic_store<T>(dst: *mut T, val: T) {
1090 atomic! {
1091 T, a,
1092 {
1093 a = &*(dst as *const _ as *const _);
1094 a.store(mem::transmute_copy(&val), Ordering::Release);
1095 mem::forget(val);
1096 },
1097 {
1098 let _guard = lock(dst as usize).write();
1099 ptr::write(dst, val);
1100 }
1101 }
1102}
1103
1104/// Atomically swaps data at `dst` with `val`.
1105///
1106/// This operation uses the `AcqRel` ordering. If possible, an atomic instructions is used, and a
1107/// global lock otherwise.
1108unsafe fn atomic_swap<T>(dst: *mut T, val: T) -> T {
1109 atomic! {
1110 T, a,
1111 {
1112 a = &*(dst as *const _ as *const _);
1113 let res = mem::transmute_copy(&a.swap(mem::transmute_copy(&val), Ordering::AcqRel));
1114 mem::forget(val);
1115 res
1116 },
1117 {
1118 let _guard = lock(dst as usize).write();
1119 ptr::replace(dst, val)
1120 }
1121 }
1122}
1123
1124/// Atomically compares data at `dst` to `current` and, if equal byte-for-byte, exchanges data at
1125/// `dst` with `new`.
1126///
1127/// Returns the old value on success, or the current value at `dst` on failure.
1128///
1129/// This operation uses the `AcqRel` ordering. If possible, an atomic instructions is used, and a
1130/// global lock otherwise.
1131#[allow(clippy::let_unit_value)]
1132unsafe fn atomic_compare_exchange_weak<T>(dst: *mut T, mut current: T, new: T) -> Result<T, T>
1133where
1134 T: Copy + Eq,
1135{
1136 atomic! {
1137 T, a,
1138 {
1139 a = &*(dst as *const _ as *const _);
1140 let mut current_raw = mem::transmute_copy(&current);
1141 let new_raw = mem::transmute_copy(&new);
1142
1143 loop {
1144 match a.compare_exchange_weak(
1145 current_raw,
1146 new_raw,
1147 Ordering::AcqRel,
1148 Ordering::Acquire,
1149 ) {
1150 Ok(_) => break Ok(current),
1151 Err(previous_raw) => {
1152 let previous = mem::transmute_copy(&previous_raw);
1153
1154 if !T::eq(&previous, &current) {
1155 break Err(previous);
1156 }
1157
1158 // The compare-exchange operation has failed and didn't store `new`. The
1159 // failure is either spurious, or `previous` was semantically equal to
1160 // `current` but not byte-equal. Let's retry with `previous` as the new
1161 // `current`.
1162 current = previous;
1163 current_raw = previous_raw;
1164 }
1165 }
1166 }
1167 },
1168 {
1169 let guard = lock(dst as usize).write();
1170
1171 if T::eq(&*dst, &current) {
1172 Ok(ptr::replace(dst, new))
1173 } else {
1174 let val = ptr::read(dst);
1175 // The value hasn't been changed. Drop the guard without incrementing the stamp.
1176 guard.abort();
1177 Err(val)
1178 }
1179 }
1180 }
1181}
1182