| 1 | use std::cell::UnsafeCell; |
| 2 | use std::fmt; |
| 3 | use std::ops; |
| 4 | |
| 5 | /// `AtomicUsize` providing an additional `unsync_load` function. |
| 6 | pub(crate) struct AtomicUsize { |
| 7 | inner: UnsafeCell<std::sync::atomic::AtomicUsize>, |
| 8 | } |
| 9 | |
| 10 | unsafe impl Send for AtomicUsize {} |
| 11 | unsafe impl Sync for AtomicUsize {} |
| 12 | |
| 13 | impl AtomicUsize { |
| 14 | pub(crate) const fn new(val: usize) -> AtomicUsize { |
| 15 | let inner = UnsafeCell::new(std::sync::atomic::AtomicUsize::new(val)); |
| 16 | AtomicUsize { inner } |
| 17 | } |
| 18 | |
| 19 | /// Performs an unsynchronized load. |
| 20 | /// |
| 21 | /// # Safety |
| 22 | /// |
| 23 | /// All mutations must have happened before the unsynchronized load. |
| 24 | /// Additionally, there must be no concurrent mutations. |
| 25 | pub(crate) unsafe fn unsync_load(&self) -> usize { |
| 26 | // See <https://github.com/tokio-rs/tokio/issues/6155> |
| 27 | self.load(std::sync::atomic::Ordering::Relaxed) |
| 28 | } |
| 29 | |
| 30 | pub(crate) fn with_mut<R>(&mut self, f: impl FnOnce(&mut usize) -> R) -> R { |
| 31 | // safety: we have mutable access |
| 32 | f(unsafe { (*self.inner.get()).get_mut() }) |
| 33 | } |
| 34 | } |
| 35 | |
| 36 | impl ops::Deref for AtomicUsize { |
| 37 | type Target = std::sync::atomic::AtomicUsize; |
| 38 | |
| 39 | fn deref(&self) -> &Self::Target { |
| 40 | // safety: it is always safe to access `&self` fns on the inner value as |
| 41 | // we never perform unsafe mutations. |
| 42 | unsafe { &*self.inner.get() } |
| 43 | } |
| 44 | } |
| 45 | |
| 46 | impl ops::DerefMut for AtomicUsize { |
| 47 | fn deref_mut(&mut self) -> &mut Self::Target { |
| 48 | // safety: we hold `&mut self` |
| 49 | unsafe { &mut *self.inner.get() } |
| 50 | } |
| 51 | } |
| 52 | |
| 53 | impl fmt::Debug for AtomicUsize { |
| 54 | fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { |
| 55 | (**self).fmt(fmt) |
| 56 | } |
| 57 | } |
| 58 | |