1// Necessary for implementing atomic methods for `AtomicUnit`
2#![allow(clippy::unit_arg)]
3
4use crate::primitive::sync::atomic::{self, AtomicBool};
5use core::cell::UnsafeCell;
6use core::cmp;
7use core::fmt;
8use core::mem::{self, ManuallyDrop, MaybeUninit};
9use core::sync::atomic::Ordering;
10
11use core::ptr;
12
13#[cfg(feature = "std")]
14use std::panic::{RefUnwindSafe, UnwindSafe};
15
16use super::seq_lock::SeqLock;
17
18/// A thread-safe mutable memory location.
19///
20/// This type is equivalent to [`Cell`], except it can also be shared among multiple threads.
21///
22/// Operations on `AtomicCell`s use atomic instructions whenever possible, and synchronize using
23/// global locks otherwise. You can call [`AtomicCell::<T>::is_lock_free()`] to check whether
24/// atomic instructions or locks will be used.
25///
26/// Atomic loads use the [`Acquire`] ordering and atomic stores use the [`Release`] ordering.
27///
28/// [`Cell`]: std::cell::Cell
29/// [`AtomicCell::<T>::is_lock_free()`]: AtomicCell::is_lock_free
30/// [`Acquire`]: std::sync::atomic::Ordering::Acquire
31/// [`Release`]: std::sync::atomic::Ordering::Release
32#[repr(transparent)]
33pub struct AtomicCell<T> {
34 /// The inner value.
35 ///
36 /// If this value can be transmuted into a primitive atomic type, it will be treated as such.
37 /// Otherwise, all potentially concurrent operations on this data will be protected by a global
38 /// lock.
39 ///
40 /// Using MaybeUninit to prevent code outside the cell from observing partially initialized state:
41 /// <https://github.com/crossbeam-rs/crossbeam/issues/833>
42 ///
43 /// Note:
44 /// - we'll never store uninitialized `T` due to our API only using initialized `T`.
45 /// - this `MaybeUninit` does *not* fix <https://github.com/crossbeam-rs/crossbeam/issues/315>.
46 value: UnsafeCell<MaybeUninit<T>>,
47}
48
49unsafe impl<T: Send> Send for AtomicCell<T> {}
50unsafe impl<T: Send> Sync for AtomicCell<T> {}
51
52#[cfg(feature = "std")]
53impl<T> UnwindSafe for AtomicCell<T> {}
54#[cfg(feature = "std")]
55impl<T> RefUnwindSafe for AtomicCell<T> {}
56
57impl<T> AtomicCell<T> {
58 /// Creates a new atomic cell initialized with `val`.
59 ///
60 /// # Examples
61 ///
62 /// ```
63 /// use crossbeam_utils::atomic::AtomicCell;
64 ///
65 /// let a = AtomicCell::new(7);
66 /// ```
67 pub const fn new(val: T) -> AtomicCell<T> {
68 AtomicCell {
69 value: UnsafeCell::new(MaybeUninit::new(val)),
70 }
71 }
72
73 /// Consumes the atomic and returns the contained value.
74 ///
75 /// This is safe because passing `self` by value guarantees that no other threads are
76 /// concurrently accessing the atomic data.
77 ///
78 /// # Examples
79 ///
80 /// ```
81 /// use crossbeam_utils::atomic::AtomicCell;
82 ///
83 /// let a = AtomicCell::new(7);
84 /// let v = a.into_inner();
85 ///
86 /// assert_eq!(v, 7);
87 /// ```
88 pub fn into_inner(self) -> T {
89 let this = ManuallyDrop::new(self);
90 // SAFETY:
91 // - passing `self` by value guarantees that no other threads are concurrently
92 // accessing the atomic data
93 // - the raw pointer passed in is valid because we got it from an owned value.
94 // - `ManuallyDrop` prevents double dropping `T`
95 unsafe { this.as_ptr().read() }
96 }
97
98 /// Returns `true` if operations on values of this type are lock-free.
99 ///
100 /// If the compiler or the platform doesn't support the necessary atomic instructions,
101 /// `AtomicCell<T>` will use global locks for every potentially concurrent atomic operation.
102 ///
103 /// # Examples
104 ///
105 /// ```
106 /// use crossbeam_utils::atomic::AtomicCell;
107 ///
108 /// // This type is internally represented as `AtomicUsize` so we can just use atomic
109 /// // operations provided by it.
110 /// assert_eq!(AtomicCell::<usize>::is_lock_free(), true);
111 ///
112 /// // A wrapper struct around `isize`.
113 /// struct Foo {
114 /// bar: isize,
115 /// }
116 /// // `AtomicCell<Foo>` will be internally represented as `AtomicIsize`.
117 /// assert_eq!(AtomicCell::<Foo>::is_lock_free(), true);
118 ///
119 /// // Operations on zero-sized types are always lock-free.
120 /// assert_eq!(AtomicCell::<()>::is_lock_free(), true);
121 ///
122 /// // Very large types cannot be represented as any of the standard atomic types, so atomic
123 /// // operations on them will have to use global locks for synchronization.
124 /// assert_eq!(AtomicCell::<[u8; 1000]>::is_lock_free(), false);
125 /// ```
126 pub const fn is_lock_free() -> bool {
127 atomic_is_lock_free::<T>()
128 }
129
130 /// Stores `val` into the atomic cell.
131 ///
132 /// # Examples
133 ///
134 /// ```
135 /// use crossbeam_utils::atomic::AtomicCell;
136 ///
137 /// let a = AtomicCell::new(7);
138 ///
139 /// assert_eq!(a.load(), 7);
140 /// a.store(8);
141 /// assert_eq!(a.load(), 8);
142 /// ```
143 pub fn store(&self, val: T) {
144 if mem::needs_drop::<T>() {
145 drop(self.swap(val));
146 } else {
147 unsafe {
148 atomic_store(self.as_ptr(), val);
149 }
150 }
151 }
152
153 /// Stores `val` into the atomic cell and returns the previous value.
154 ///
155 /// # Examples
156 ///
157 /// ```
158 /// use crossbeam_utils::atomic::AtomicCell;
159 ///
160 /// let a = AtomicCell::new(7);
161 ///
162 /// assert_eq!(a.load(), 7);
163 /// assert_eq!(a.swap(8), 7);
164 /// assert_eq!(a.load(), 8);
165 /// ```
166 pub fn swap(&self, val: T) -> T {
167 unsafe { atomic_swap(self.as_ptr(), val) }
168 }
169
170 /// Returns a raw pointer to the underlying data in this atomic cell.
171 ///
172 /// # Examples
173 ///
174 /// ```
175 /// use crossbeam_utils::atomic::AtomicCell;
176 ///
177 /// let a = AtomicCell::new(5);
178 ///
179 /// let ptr = a.as_ptr();
180 /// ```
181 #[inline]
182 pub fn as_ptr(&self) -> *mut T {
183 self.value.get().cast::<T>()
184 }
185}
186
187impl<T: Default> AtomicCell<T> {
188 /// Takes the value of the atomic cell, leaving `Default::default()` in its place.
189 ///
190 /// # Examples
191 ///
192 /// ```
193 /// use crossbeam_utils::atomic::AtomicCell;
194 ///
195 /// let a = AtomicCell::new(5);
196 /// let five = a.take();
197 ///
198 /// assert_eq!(five, 5);
199 /// assert_eq!(a.into_inner(), 0);
200 /// ```
201 pub fn take(&self) -> T {
202 self.swap(val:Default::default())
203 }
204}
205
206impl<T: Copy> AtomicCell<T> {
207 /// Loads a value from the atomic cell.
208 ///
209 /// # Examples
210 ///
211 /// ```
212 /// use crossbeam_utils::atomic::AtomicCell;
213 ///
214 /// let a = AtomicCell::new(7);
215 ///
216 /// assert_eq!(a.load(), 7);
217 /// ```
218 pub fn load(&self) -> T {
219 unsafe { atomic_load(self.as_ptr()) }
220 }
221}
222
223impl<T: Copy + Eq> AtomicCell<T> {
224 /// If the current value equals `current`, stores `new` into the atomic cell.
225 ///
226 /// The return value is always the previous value. If it is equal to `current`, then the value
227 /// was updated.
228 ///
229 /// # Examples
230 ///
231 /// ```
232 /// # #![allow(deprecated)]
233 /// use crossbeam_utils::atomic::AtomicCell;
234 ///
235 /// let a = AtomicCell::new(1);
236 ///
237 /// assert_eq!(a.compare_and_swap(2, 3), 1);
238 /// assert_eq!(a.load(), 1);
239 ///
240 /// assert_eq!(a.compare_and_swap(1, 2), 1);
241 /// assert_eq!(a.load(), 2);
242 /// ```
243 // TODO: remove in the next major version.
244 #[deprecated(note = "Use `compare_exchange` instead")]
245 pub fn compare_and_swap(&self, current: T, new: T) -> T {
246 match self.compare_exchange(current, new) {
247 Ok(v) => v,
248 Err(v) => v,
249 }
250 }
251
252 /// If the current value equals `current`, stores `new` into the atomic cell.
253 ///
254 /// The return value is a result indicating whether the new value was written and containing
255 /// the previous value. On success this value is guaranteed to be equal to `current`.
256 ///
257 /// # Examples
258 ///
259 /// ```
260 /// use crossbeam_utils::atomic::AtomicCell;
261 ///
262 /// let a = AtomicCell::new(1);
263 ///
264 /// assert_eq!(a.compare_exchange(2, 3), Err(1));
265 /// assert_eq!(a.load(), 1);
266 ///
267 /// assert_eq!(a.compare_exchange(1, 2), Ok(1));
268 /// assert_eq!(a.load(), 2);
269 /// ```
270 pub fn compare_exchange(&self, current: T, new: T) -> Result<T, T> {
271 unsafe { atomic_compare_exchange_weak(self.as_ptr(), current, new) }
272 }
273
274 /// Fetches the value, and applies a function to it that returns an optional
275 /// new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else
276 /// `Err(previous_value)`.
277 ///
278 /// Note: This may call the function multiple times if the value has been changed from other threads in
279 /// the meantime, as long as the function returns `Some(_)`, but the function will have been applied
280 /// only once to the stored value.
281 ///
282 /// # Examples
283 ///
284 /// ```rust
285 /// use crossbeam_utils::atomic::AtomicCell;
286 ///
287 /// let a = AtomicCell::new(7);
288 /// assert_eq!(a.fetch_update(|_| None), Err(7));
289 /// assert_eq!(a.fetch_update(|a| Some(a + 1)), Ok(7));
290 /// assert_eq!(a.fetch_update(|a| Some(a + 1)), Ok(8));
291 /// assert_eq!(a.load(), 9);
292 /// ```
293 #[inline]
294 pub fn fetch_update<F>(&self, mut f: F) -> Result<T, T>
295 where
296 F: FnMut(T) -> Option<T>,
297 {
298 let mut prev = self.load();
299 while let Some(next) = f(prev) {
300 match self.compare_exchange(prev, next) {
301 x @ Ok(_) => return x,
302 Err(next_prev) => prev = next_prev,
303 }
304 }
305 Err(prev)
306 }
307}
308
309// `MaybeUninit` prevents `T` from being dropped, so we need to implement `Drop`
310// for `AtomicCell` to avoid leaks of non-`Copy` types.
311impl<T> Drop for AtomicCell<T> {
312 fn drop(&mut self) {
313 if mem::needs_drop::<T>() {
314 // SAFETY:
315 // - the mutable reference guarantees that no other threads are concurrently accessing the atomic data
316 // - the raw pointer passed in is valid because we got it from a reference
317 // - `MaybeUninit` prevents double dropping `T`
318 unsafe {
319 self.as_ptr().drop_in_place();
320 }
321 }
322 }
323}
324
325macro_rules! impl_arithmetic {
326 ($t:ty, fallback, $example:tt) => {
327 impl AtomicCell<$t> {
328 /// Increments the current value by `val` and returns the previous value.
329 ///
330 /// The addition wraps on overflow.
331 ///
332 /// # Examples
333 ///
334 /// ```
335 /// use crossbeam_utils::atomic::AtomicCell;
336 ///
337 #[doc = $example]
338 ///
339 /// assert_eq!(a.fetch_add(3), 7);
340 /// assert_eq!(a.load(), 10);
341 /// ```
342 #[inline]
343 pub fn fetch_add(&self, val: $t) -> $t {
344 let _guard = lock(self.as_ptr() as usize).write();
345 let value = unsafe { &mut *(self.as_ptr()) };
346 let old = *value;
347 *value = value.wrapping_add(val);
348 old
349 }
350
351 /// Decrements the current value by `val` and returns the previous value.
352 ///
353 /// The subtraction wraps on overflow.
354 ///
355 /// # Examples
356 ///
357 /// ```
358 /// use crossbeam_utils::atomic::AtomicCell;
359 ///
360 #[doc = $example]
361 ///
362 /// assert_eq!(a.fetch_sub(3), 7);
363 /// assert_eq!(a.load(), 4);
364 /// ```
365 #[inline]
366 pub fn fetch_sub(&self, val: $t) -> $t {
367 let _guard = lock(self.as_ptr() as usize).write();
368 let value = unsafe { &mut *(self.as_ptr()) };
369 let old = *value;
370 *value = value.wrapping_sub(val);
371 old
372 }
373
374 /// Applies bitwise "and" to the current value and returns the previous value.
375 ///
376 /// # Examples
377 ///
378 /// ```
379 /// use crossbeam_utils::atomic::AtomicCell;
380 ///
381 #[doc = $example]
382 ///
383 /// assert_eq!(a.fetch_and(3), 7);
384 /// assert_eq!(a.load(), 3);
385 /// ```
386 #[inline]
387 pub fn fetch_and(&self, val: $t) -> $t {
388 let _guard = lock(self.as_ptr() as usize).write();
389 let value = unsafe { &mut *(self.as_ptr()) };
390 let old = *value;
391 *value &= val;
392 old
393 }
394
395 /// Applies bitwise "nand" to the current value and returns the previous value.
396 ///
397 /// # Examples
398 ///
399 /// ```
400 /// use crossbeam_utils::atomic::AtomicCell;
401 ///
402 #[doc = $example]
403 ///
404 /// assert_eq!(a.fetch_nand(3), 7);
405 /// assert_eq!(a.load(), !(7 & 3));
406 /// ```
407 #[inline]
408 pub fn fetch_nand(&self, val: $t) -> $t {
409 let _guard = lock(self.as_ptr() as usize).write();
410 let value = unsafe { &mut *(self.as_ptr()) };
411 let old = *value;
412 *value = !(old & val);
413 old
414 }
415
416 /// Applies bitwise "or" to the current value and returns the previous value.
417 ///
418 /// # Examples
419 ///
420 /// ```
421 /// use crossbeam_utils::atomic::AtomicCell;
422 ///
423 #[doc = $example]
424 ///
425 /// assert_eq!(a.fetch_or(16), 7);
426 /// assert_eq!(a.load(), 23);
427 /// ```
428 #[inline]
429 pub fn fetch_or(&self, val: $t) -> $t {
430 let _guard = lock(self.as_ptr() as usize).write();
431 let value = unsafe { &mut *(self.as_ptr()) };
432 let old = *value;
433 *value |= val;
434 old
435 }
436
437 /// Applies bitwise "xor" to the current value and returns the previous value.
438 ///
439 /// # Examples
440 ///
441 /// ```
442 /// use crossbeam_utils::atomic::AtomicCell;
443 ///
444 #[doc = $example]
445 ///
446 /// assert_eq!(a.fetch_xor(2), 7);
447 /// assert_eq!(a.load(), 5);
448 /// ```
449 #[inline]
450 pub fn fetch_xor(&self, val: $t) -> $t {
451 let _guard = lock(self.as_ptr() as usize).write();
452 let value = unsafe { &mut *(self.as_ptr()) };
453 let old = *value;
454 *value ^= val;
455 old
456 }
457
458 /// Compares and sets the maximum of the current value and `val`,
459 /// and returns the previous value.
460 ///
461 /// # Examples
462 ///
463 /// ```
464 /// use crossbeam_utils::atomic::AtomicCell;
465 ///
466 #[doc = $example]
467 ///
468 /// assert_eq!(a.fetch_max(2), 7);
469 /// assert_eq!(a.load(), 7);
470 /// ```
471 #[inline]
472 pub fn fetch_max(&self, val: $t) -> $t {
473 let _guard = lock(self.as_ptr() as usize).write();
474 let value = unsafe { &mut *(self.as_ptr()) };
475 let old = *value;
476 *value = cmp::max(old, val);
477 old
478 }
479
480 /// Compares and sets the minimum of the current value and `val`,
481 /// and returns the previous value.
482 ///
483 /// # Examples
484 ///
485 /// ```
486 /// use crossbeam_utils::atomic::AtomicCell;
487 ///
488 #[doc = $example]
489 ///
490 /// assert_eq!(a.fetch_min(2), 7);
491 /// assert_eq!(a.load(), 2);
492 /// ```
493 #[inline]
494 pub fn fetch_min(&self, val: $t) -> $t {
495 let _guard = lock(self.as_ptr() as usize).write();
496 let value = unsafe { &mut *(self.as_ptr()) };
497 let old = *value;
498 *value = cmp::min(old, val);
499 old
500 }
501 }
502 };
503 ($t:ty, $atomic:ty, $example:tt) => {
504 impl AtomicCell<$t> {
505 /// Increments the current value by `val` and returns the previous value.
506 ///
507 /// The addition wraps on overflow.
508 ///
509 /// # Examples
510 ///
511 /// ```
512 /// use crossbeam_utils::atomic::AtomicCell;
513 ///
514 #[doc = $example]
515 ///
516 /// assert_eq!(a.fetch_add(3), 7);
517 /// assert_eq!(a.load(), 10);
518 /// ```
519 #[inline]
520 pub fn fetch_add(&self, val: $t) -> $t {
521 if can_transmute::<$t, $atomic>() {
522 let a = unsafe { &*(self.as_ptr() as *const $atomic) };
523 a.fetch_add(val, Ordering::AcqRel)
524 } else {
525 let _guard = lock(self.as_ptr() as usize).write();
526 let value = unsafe { &mut *(self.as_ptr()) };
527 let old = *value;
528 *value = value.wrapping_add(val);
529 old
530 }
531 }
532
533 /// Decrements the current value by `val` and returns the previous value.
534 ///
535 /// The subtraction wraps on overflow.
536 ///
537 /// # Examples
538 ///
539 /// ```
540 /// use crossbeam_utils::atomic::AtomicCell;
541 ///
542 #[doc = $example]
543 ///
544 /// assert_eq!(a.fetch_sub(3), 7);
545 /// assert_eq!(a.load(), 4);
546 /// ```
547 #[inline]
548 pub fn fetch_sub(&self, val: $t) -> $t {
549 if can_transmute::<$t, $atomic>() {
550 let a = unsafe { &*(self.as_ptr() as *const $atomic) };
551 a.fetch_sub(val, Ordering::AcqRel)
552 } else {
553 let _guard = lock(self.as_ptr() as usize).write();
554 let value = unsafe { &mut *(self.as_ptr()) };
555 let old = *value;
556 *value = value.wrapping_sub(val);
557 old
558 }
559 }
560
561 /// Applies bitwise "and" to the current value and returns the previous value.
562 ///
563 /// # Examples
564 ///
565 /// ```
566 /// use crossbeam_utils::atomic::AtomicCell;
567 ///
568 #[doc = $example]
569 ///
570 /// assert_eq!(a.fetch_and(3), 7);
571 /// assert_eq!(a.load(), 3);
572 /// ```
573 #[inline]
574 pub fn fetch_and(&self, val: $t) -> $t {
575 if can_transmute::<$t, $atomic>() {
576 let a = unsafe { &*(self.as_ptr() as *const $atomic) };
577 a.fetch_and(val, Ordering::AcqRel)
578 } else {
579 let _guard = lock(self.as_ptr() as usize).write();
580 let value = unsafe { &mut *(self.as_ptr()) };
581 let old = *value;
582 *value &= val;
583 old
584 }
585 }
586
587 /// Applies bitwise "nand" to the current value and returns the previous value.
588 ///
589 /// # Examples
590 ///
591 /// ```
592 /// use crossbeam_utils::atomic::AtomicCell;
593 ///
594 #[doc = $example]
595 ///
596 /// assert_eq!(a.fetch_nand(3), 7);
597 /// assert_eq!(a.load(), !(7 & 3));
598 /// ```
599 #[inline]
600 pub fn fetch_nand(&self, val: $t) -> $t {
601 if can_transmute::<$t, $atomic>() {
602 let a = unsafe { &*(self.as_ptr() as *const $atomic) };
603 a.fetch_nand(val, Ordering::AcqRel)
604 } else {
605 let _guard = lock(self.as_ptr() as usize).write();
606 let value = unsafe { &mut *(self.as_ptr()) };
607 let old = *value;
608 *value = !(old & val);
609 old
610 }
611 }
612
613 /// Applies bitwise "or" to the current value and returns the previous value.
614 ///
615 /// # Examples
616 ///
617 /// ```
618 /// use crossbeam_utils::atomic::AtomicCell;
619 ///
620 #[doc = $example]
621 ///
622 /// assert_eq!(a.fetch_or(16), 7);
623 /// assert_eq!(a.load(), 23);
624 /// ```
625 #[inline]
626 pub fn fetch_or(&self, val: $t) -> $t {
627 if can_transmute::<$t, $atomic>() {
628 let a = unsafe { &*(self.as_ptr() as *const $atomic) };
629 a.fetch_or(val, Ordering::AcqRel)
630 } else {
631 let _guard = lock(self.as_ptr() as usize).write();
632 let value = unsafe { &mut *(self.as_ptr()) };
633 let old = *value;
634 *value |= val;
635 old
636 }
637 }
638
639 /// Applies bitwise "xor" to the current value and returns the previous value.
640 ///
641 /// # Examples
642 ///
643 /// ```
644 /// use crossbeam_utils::atomic::AtomicCell;
645 ///
646 #[doc = $example]
647 ///
648 /// assert_eq!(a.fetch_xor(2), 7);
649 /// assert_eq!(a.load(), 5);
650 /// ```
651 #[inline]
652 pub fn fetch_xor(&self, val: $t) -> $t {
653 if can_transmute::<$t, $atomic>() {
654 let a = unsafe { &*(self.as_ptr() as *const $atomic) };
655 a.fetch_xor(val, Ordering::AcqRel)
656 } else {
657 let _guard = lock(self.as_ptr() as usize).write();
658 let value = unsafe { &mut *(self.as_ptr()) };
659 let old = *value;
660 *value ^= val;
661 old
662 }
663 }
664
665 /// Compares and sets the maximum of the current value and `val`,
666 /// and returns the previous value.
667 ///
668 /// # Examples
669 ///
670 /// ```
671 /// use crossbeam_utils::atomic::AtomicCell;
672 ///
673 #[doc = $example]
674 ///
675 /// assert_eq!(a.fetch_max(9), 7);
676 /// assert_eq!(a.load(), 9);
677 /// ```
678 #[inline]
679 pub fn fetch_max(&self, val: $t) -> $t {
680 if can_transmute::<$t, $atomic>() {
681 // TODO: Atomic*::fetch_max requires Rust 1.45.
682 self.fetch_update(|old| Some(cmp::max(old, val))).unwrap()
683 } else {
684 let _guard = lock(self.as_ptr() as usize).write();
685 let value = unsafe { &mut *(self.as_ptr()) };
686 let old = *value;
687 *value = cmp::max(old, val);
688 old
689 }
690 }
691
692 /// Compares and sets the minimum of the current value and `val`,
693 /// and returns the previous value.
694 ///
695 /// # Examples
696 ///
697 /// ```
698 /// use crossbeam_utils::atomic::AtomicCell;
699 ///
700 #[doc = $example]
701 ///
702 /// assert_eq!(a.fetch_min(2), 7);
703 /// assert_eq!(a.load(), 2);
704 /// ```
705 #[inline]
706 pub fn fetch_min(&self, val: $t) -> $t {
707 if can_transmute::<$t, $atomic>() {
708 // TODO: Atomic*::fetch_min requires Rust 1.45.
709 self.fetch_update(|old| Some(cmp::min(old, val))).unwrap()
710 } else {
711 let _guard = lock(self.as_ptr() as usize).write();
712 let value = unsafe { &mut *(self.as_ptr()) };
713 let old = *value;
714 *value = cmp::min(old, val);
715 old
716 }
717 }
718 }
719 };
720}
721
722impl_arithmetic!(u8, atomic::AtomicU8, "let a = AtomicCell::new(7u8);");
723impl_arithmetic!(i8, atomic::AtomicI8, "let a = AtomicCell::new(7i8);");
724impl_arithmetic!(u16, atomic::AtomicU16, "let a = AtomicCell::new(7u16);");
725impl_arithmetic!(i16, atomic::AtomicI16, "let a = AtomicCell::new(7i16);");
726impl_arithmetic!(u32, atomic::AtomicU32, "let a = AtomicCell::new(7u32);");
727impl_arithmetic!(i32, atomic::AtomicI32, "let a = AtomicCell::new(7i32);");
728#[cfg(not(crossbeam_no_atomic_64))]
729impl_arithmetic!(u64, atomic::AtomicU64, "let a = AtomicCell::new(7u64);");
730#[cfg(not(crossbeam_no_atomic_64))]
731impl_arithmetic!(i64, atomic::AtomicI64, "let a = AtomicCell::new(7i64);");
732#[cfg(crossbeam_no_atomic_64)]
733impl_arithmetic!(u64, fallback, "let a = AtomicCell::new(7u64);");
734#[cfg(crossbeam_no_atomic_64)]
735impl_arithmetic!(i64, fallback, "let a = AtomicCell::new(7i64);");
736// TODO: AtomicU128 is unstable
737// impl_arithmetic!(u128, atomic::AtomicU128, "let a = AtomicCell::new(7u128);");
738// impl_arithmetic!(i128, atomic::AtomicI128, "let a = AtomicCell::new(7i128);");
739impl_arithmetic!(u128, fallback, "let a = AtomicCell::new(7u128);");
740impl_arithmetic!(i128, fallback, "let a = AtomicCell::new(7i128);");
741
742impl_arithmetic!(
743 usize,
744 atomic::AtomicUsize,
745 "let a = AtomicCell::new(7usize);"
746);
747impl_arithmetic!(
748 isize,
749 atomic::AtomicIsize,
750 "let a = AtomicCell::new(7isize);"
751);
752
753impl AtomicCell<bool> {
754 /// Applies logical "and" to the current value and returns the previous value.
755 ///
756 /// # Examples
757 ///
758 /// ```
759 /// use crossbeam_utils::atomic::AtomicCell;
760 ///
761 /// let a = AtomicCell::new(true);
762 ///
763 /// assert_eq!(a.fetch_and(true), true);
764 /// assert_eq!(a.load(), true);
765 ///
766 /// assert_eq!(a.fetch_and(false), true);
767 /// assert_eq!(a.load(), false);
768 /// ```
769 #[inline]
770 pub fn fetch_and(&self, val: bool) -> bool {
771 let a = unsafe { &*(self.as_ptr() as *const AtomicBool) };
772 a.fetch_and(val, Ordering::AcqRel)
773 }
774
775 /// Applies logical "nand" to the current value and returns the previous value.
776 ///
777 /// # Examples
778 ///
779 /// ```
780 /// use crossbeam_utils::atomic::AtomicCell;
781 ///
782 /// let a = AtomicCell::new(true);
783 ///
784 /// assert_eq!(a.fetch_nand(false), true);
785 /// assert_eq!(a.load(), true);
786 ///
787 /// assert_eq!(a.fetch_nand(true), true);
788 /// assert_eq!(a.load(), false);
789 ///
790 /// assert_eq!(a.fetch_nand(false), false);
791 /// assert_eq!(a.load(), true);
792 /// ```
793 #[inline]
794 pub fn fetch_nand(&self, val: bool) -> bool {
795 let a = unsafe { &*(self.as_ptr() as *const AtomicBool) };
796 a.fetch_nand(val, Ordering::AcqRel)
797 }
798
799 /// Applies logical "or" to the current value and returns the previous value.
800 ///
801 /// # Examples
802 ///
803 /// ```
804 /// use crossbeam_utils::atomic::AtomicCell;
805 ///
806 /// let a = AtomicCell::new(false);
807 ///
808 /// assert_eq!(a.fetch_or(false), false);
809 /// assert_eq!(a.load(), false);
810 ///
811 /// assert_eq!(a.fetch_or(true), false);
812 /// assert_eq!(a.load(), true);
813 /// ```
814 #[inline]
815 pub fn fetch_or(&self, val: bool) -> bool {
816 let a = unsafe { &*(self.as_ptr() as *const AtomicBool) };
817 a.fetch_or(val, Ordering::AcqRel)
818 }
819
820 /// Applies logical "xor" to the current value and returns the previous value.
821 ///
822 /// # Examples
823 ///
824 /// ```
825 /// use crossbeam_utils::atomic::AtomicCell;
826 ///
827 /// let a = AtomicCell::new(true);
828 ///
829 /// assert_eq!(a.fetch_xor(false), true);
830 /// assert_eq!(a.load(), true);
831 ///
832 /// assert_eq!(a.fetch_xor(true), true);
833 /// assert_eq!(a.load(), false);
834 /// ```
835 #[inline]
836 pub fn fetch_xor(&self, val: bool) -> bool {
837 let a = unsafe { &*(self.as_ptr() as *const AtomicBool) };
838 a.fetch_xor(val, Ordering::AcqRel)
839 }
840}
841
842impl<T: Default> Default for AtomicCell<T> {
843 fn default() -> AtomicCell<T> {
844 AtomicCell::new(T::default())
845 }
846}
847
848impl<T> From<T> for AtomicCell<T> {
849 #[inline]
850 fn from(val: T) -> AtomicCell<T> {
851 AtomicCell::new(val)
852 }
853}
854
855impl<T: Copy + fmt::Debug> fmt::Debug for AtomicCell<T> {
856 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
857 f&mut DebugStruct<'_, '_>.debug_struct("AtomicCell")
858 .field(name:"value", &self.load())
859 .finish()
860 }
861}
862
863/// Returns `true` if values of type `A` can be transmuted into values of type `B`.
864const fn can_transmute<A, B>() -> bool {
865 // Sizes must be equal, but alignment of `A` must be greater or equal than that of `B`.
866 (mem::size_of::<A>() == mem::size_of::<B>()) & (mem::align_of::<A>() >= mem::align_of::<B>())
867}
868
869/// Returns a reference to the global lock associated with the `AtomicCell` at address `addr`.
870///
871/// This function is used to protect atomic data which doesn't fit into any of the primitive atomic
872/// types in `std::sync::atomic`. Operations on such atomics must therefore use a global lock.
873///
874/// However, there is not only one global lock but an array of many locks, and one of them is
875/// picked based on the given address. Having many locks reduces contention and improves
876/// scalability.
877#[inline]
878#[must_use]
879fn lock(addr: usize) -> &'static SeqLock {
880 // The number of locks is a prime number because we want to make sure `addr % LEN` gets
881 // dispersed across all locks.
882 //
883 // Note that addresses are always aligned to some power of 2, depending on type `T` in
884 // `AtomicCell<T>`. If `LEN` was an even number, then `addr % LEN` would be an even number,
885 // too, which means only half of the locks would get utilized!
886 //
887 // It is also possible for addresses to accidentally get aligned to a number that is not a
888 // power of 2. Consider this example:
889 //
890 // ```
891 // #[repr(C)]
892 // struct Foo {
893 // a: AtomicCell<u8>,
894 // b: u8,
895 // c: u8,
896 // }
897 // ```
898 //
899 // Now, if we have a slice of type `&[Foo]`, it is possible that field `a` in all items gets
900 // stored at addresses that are multiples of 3. It'd be too bad if `LEN` was divisible by 3.
901 // In order to protect from such cases, we simply choose a large prime number for `LEN`.
902 const LEN: usize = 97;
903 #[allow(clippy::declare_interior_mutable_const)]
904 const L: SeqLock = SeqLock::new();
905 static LOCKS: [SeqLock; LEN] = [L; LEN];
906
907 // If the modulus is a constant number, the compiler will use crazy math to transform this into
908 // a sequence of cheap arithmetic operations rather than using the slow modulo instruction.
909 &LOCKS[addr % LEN]
910}
911
912/// An atomic `()`.
913///
914/// All operations are noops.
915struct AtomicUnit;
916
917impl AtomicUnit {
918 #[inline]
919 fn load(&self, _order: Ordering) {}
920
921 #[inline]
922 fn store(&self, _val: (), _order: Ordering) {}
923
924 #[inline]
925 fn swap(&self, _val: (), _order: Ordering) {}
926
927 #[inline]
928 fn compare_exchange_weak(
929 &self,
930 _current: (),
931 _new: (),
932 _success: Ordering,
933 _failure: Ordering,
934 ) -> Result<(), ()> {
935 Ok(())
936 }
937}
938
939macro_rules! atomic {
940 // If values of type `$t` can be transmuted into values of the primitive atomic type `$atomic`,
941 // declares variable `$a` of type `$atomic` and executes `$atomic_op`, breaking out of the loop.
942 (@check, $t:ty, $atomic:ty, $a:ident, $atomic_op:expr) => {
943 if can_transmute::<$t, $atomic>() {
944 let $a: &$atomic;
945 break $atomic_op;
946 }
947 };
948
949 // If values of type `$t` can be transmuted into values of a primitive atomic type, declares
950 // variable `$a` of that type and executes `$atomic_op`. Otherwise, just executes
951 // `$fallback_op`.
952 ($t:ty, $a:ident, $atomic_op:expr, $fallback_op:expr) => {
953 loop {
954 atomic!(@check, $t, AtomicUnit, $a, $atomic_op);
955
956 atomic!(@check, $t, atomic::AtomicU8, $a, $atomic_op);
957 atomic!(@check, $t, atomic::AtomicU16, $a, $atomic_op);
958 atomic!(@check, $t, atomic::AtomicU32, $a, $atomic_op);
959 #[cfg(not(crossbeam_no_atomic_64))]
960 atomic!(@check, $t, atomic::AtomicU64, $a, $atomic_op);
961 // TODO: AtomicU128 is unstable
962 // atomic!(@check, $t, atomic::AtomicU128, $a, $atomic_op);
963
964 break $fallback_op;
965 }
966 };
967}
968
969/// Returns `true` if operations on `AtomicCell<T>` are lock-free.
970const fn atomic_is_lock_free<T>() -> bool {
971 // HACK(taiki-e): This is equivalent to `atomic! { T, _a, true, false }`, but can be used in const fn even in our MSRV (Rust 1.38).
972 let is_lock_free: bool = can_transmute::<T, AtomicUnit>()
973 | can_transmute::<T, atomic::AtomicU8>()
974 | can_transmute::<T, atomic::AtomicU16>()
975 | can_transmute::<T, atomic::AtomicU32>();
976 #[cfg(not(crossbeam_no_atomic_64))]
977 let is_lock_free: bool = is_lock_free | can_transmute::<T, atomic::AtomicU64>();
978 // TODO: AtomicU128 is unstable
979 // let is_lock_free = is_lock_free | can_transmute::<T, atomic::AtomicU128>();
980 is_lock_free
981}
982
983/// Atomically reads data from `src`.
984///
985/// This operation uses the `Acquire` ordering. If possible, an atomic instructions is used, and a
986/// global lock otherwise.
987unsafe fn atomic_load<T>(src: *mut T) -> T
988where
989 T: Copy,
990{
991 atomic! {
992 T, a,
993 {
994 a = &*(src as *const _ as *const _);
995 mem::transmute_copy(&a.load(Ordering::Acquire))
996 },
997 {
998 let lock = lock(src as usize);
999
1000 // Try doing an optimistic read first.
1001 if let Some(stamp) = lock.optimistic_read() {
1002 // We need a volatile read here because other threads might concurrently modify the
1003 // value. In theory, data races are *always* UB, even if we use volatile reads and
1004 // discard the data when a data race is detected. The proper solution would be to
1005 // do atomic reads and atomic writes, but we can't atomically read and write all
1006 // kinds of data since `AtomicU8` is not available on stable Rust yet.
1007 // Load as `MaybeUninit` because we may load a value that is not valid as `T`.
1008 let val = ptr::read_volatile(src.cast::<MaybeUninit<T>>());
1009
1010 if lock.validate_read(stamp) {
1011 return val.assume_init();
1012 }
1013 }
1014
1015 // Grab a regular write lock so that writers don't starve this load.
1016 let guard = lock.write();
1017 let val = ptr::read(src);
1018 // The value hasn't been changed. Drop the guard without incrementing the stamp.
1019 guard.abort();
1020 val
1021 }
1022 }
1023}
1024
1025/// Atomically writes `val` to `dst`.
1026///
1027/// This operation uses the `Release` ordering. If possible, an atomic instructions is used, and a
1028/// global lock otherwise.
1029unsafe fn atomic_store<T>(dst: *mut T, val: T) {
1030 atomic! {
1031 T, a,
1032 {
1033 a = &*(dst as *const _ as *const _);
1034 a.store(mem::transmute_copy(&val), Ordering::Release);
1035 mem::forget(val);
1036 },
1037 {
1038 let _guard = lock(dst as usize).write();
1039 ptr::write(dst, val);
1040 }
1041 }
1042}
1043
1044/// Atomically swaps data at `dst` with `val`.
1045///
1046/// This operation uses the `AcqRel` ordering. If possible, an atomic instructions is used, and a
1047/// global lock otherwise.
1048unsafe fn atomic_swap<T>(dst: *mut T, val: T) -> T {
1049 atomic! {
1050 T, a,
1051 {
1052 a = &*(dst as *const _ as *const _);
1053 let res = mem::transmute_copy(&a.swap(mem::transmute_copy(&val), Ordering::AcqRel));
1054 mem::forget(val);
1055 res
1056 },
1057 {
1058 let _guard = lock(dst as usize).write();
1059 ptr::replace(dst, val)
1060 }
1061 }
1062}
1063
1064/// Atomically compares data at `dst` to `current` and, if equal byte-for-byte, exchanges data at
1065/// `dst` with `new`.
1066///
1067/// Returns the old value on success, or the current value at `dst` on failure.
1068///
1069/// This operation uses the `AcqRel` ordering. If possible, an atomic instructions is used, and a
1070/// global lock otherwise.
1071#[allow(clippy::let_unit_value)]
1072unsafe fn atomic_compare_exchange_weak<T>(dst: *mut T, mut current: T, new: T) -> Result<T, T>
1073where
1074 T: Copy + Eq,
1075{
1076 atomic! {
1077 T, a,
1078 {
1079 a = &*(dst as *const _ as *const _);
1080 let mut current_raw = mem::transmute_copy(&current);
1081 let new_raw = mem::transmute_copy(&new);
1082
1083 loop {
1084 match a.compare_exchange_weak(
1085 current_raw,
1086 new_raw,
1087 Ordering::AcqRel,
1088 Ordering::Acquire,
1089 ) {
1090 Ok(_) => break Ok(current),
1091 Err(previous_raw) => {
1092 let previous = mem::transmute_copy(&previous_raw);
1093
1094 if !T::eq(&previous, &current) {
1095 break Err(previous);
1096 }
1097
1098 // The compare-exchange operation has failed and didn't store `new`. The
1099 // failure is either spurious, or `previous` was semantically equal to
1100 // `current` but not byte-equal. Let's retry with `previous` as the new
1101 // `current`.
1102 current = previous;
1103 current_raw = previous_raw;
1104 }
1105 }
1106 }
1107 },
1108 {
1109 let guard = lock(dst as usize).write();
1110
1111 if T::eq(&*dst, &current) {
1112 Ok(ptr::replace(dst, new))
1113 } else {
1114 let val = ptr::read(dst);
1115 // The value hasn't been changed. Drop the guard without incrementing the stamp.
1116 guard.abort();
1117 Err(val)
1118 }
1119 }
1120 }
1121}
1122