1use core::borrow::{Borrow, BorrowMut};
2use core::cmp;
3use core::fmt;
4use core::marker::PhantomData;
5use core::mem::{self, MaybeUninit};
6use core::ops::{Deref, DerefMut};
7use core::slice;
8use core::sync::atomic::Ordering;
9
10use crate::alloc::alloc;
11use crate::alloc::boxed::Box;
12use crate::guard::Guard;
13use crate::primitive::sync::atomic::AtomicUsize;
14use crossbeam_utils::atomic::AtomicConsume;
15
16/// Given ordering for the success case in a compare-exchange operation, returns the strongest
17/// appropriate ordering for the failure case.
18#[inline]
19fn strongest_failure_ordering(ord: Ordering) -> Ordering {
20 use self::Ordering::*;
21 match ord {
22 Relaxed | Release => Relaxed,
23 Acquire | AcqRel => Acquire,
24 _ => SeqCst,
25 }
26}
27
28/// The error returned on failed compare-and-set operation.
29// TODO: remove in the next major version.
30#[deprecated(note = "Use `CompareExchangeError` instead")]
31pub type CompareAndSetError<'g, T, P> = CompareExchangeError<'g, T, P>;
32
33/// The error returned on failed compare-and-swap operation.
34pub struct CompareExchangeError<'g, T: ?Sized + Pointable, P: Pointer<T>> {
35 /// The value in the atomic pointer at the time of the failed operation.
36 pub current: Shared<'g, T>,
37
38 /// The new value, which the operation failed to store.
39 pub new: P,
40}
41
42impl<T, P: Pointer<T> + fmt::Debug> fmt::Debug for CompareExchangeError<'_, T, P> {
43 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
44 f&mut DebugStruct<'_, '_>.debug_struct("CompareExchangeError")
45 .field("current", &self.current)
46 .field(name:"new", &self.new)
47 .finish()
48 }
49}
50
51/// Memory orderings for compare-and-set operations.
52///
53/// A compare-and-set operation can have different memory orderings depending on whether it
54/// succeeds or fails. This trait generalizes different ways of specifying memory orderings.
55///
56/// The two ways of specifying orderings for compare-and-set are:
57///
58/// 1. Just one `Ordering` for the success case. In case of failure, the strongest appropriate
59/// ordering is chosen.
60/// 2. A pair of `Ordering`s. The first one is for the success case, while the second one is
61/// for the failure case.
62// TODO: remove in the next major version.
63#[deprecated(
64 note = "`compare_and_set` and `compare_and_set_weak` that use this trait are deprecated, \
65 use `compare_exchange` or `compare_exchange_weak instead`"
66)]
67pub trait CompareAndSetOrdering {
68 /// The ordering of the operation when it succeeds.
69 fn success(&self) -> Ordering;
70
71 /// The ordering of the operation when it fails.
72 ///
73 /// The failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than
74 /// the success ordering.
75 fn failure(&self) -> Ordering;
76}
77
78#[allow(deprecated)]
79impl CompareAndSetOrdering for Ordering {
80 #[inline]
81 fn success(&self) -> Ordering {
82 *self
83 }
84
85 #[inline]
86 fn failure(&self) -> Ordering {
87 strongest_failure_ordering(*self)
88 }
89}
90
91#[allow(deprecated)]
92impl CompareAndSetOrdering for (Ordering, Ordering) {
93 #[inline]
94 fn success(&self) -> Ordering {
95 self.0
96 }
97
98 #[inline]
99 fn failure(&self) -> Ordering {
100 self.1
101 }
102}
103
104/// Returns a bitmask containing the unused least significant bits of an aligned pointer to `T`.
105#[inline]
106fn low_bits<T: ?Sized + Pointable>() -> usize {
107 (1 << T::ALIGN.trailing_zeros()) - 1
108}
109
110/// Panics if the pointer is not properly unaligned.
111#[inline]
112fn ensure_aligned<T: ?Sized + Pointable>(raw: usize) {
113 assert_eq!(raw & low_bits::<T>(), 0, "unaligned pointer");
114}
115
116/// Given a tagged pointer `data`, returns the same pointer, but tagged with `tag`.
117///
118/// `tag` is truncated to fit into the unused bits of the pointer to `T`.
119#[inline]
120fn compose_tag<T: ?Sized + Pointable>(data: usize, tag: usize) -> usize {
121 (data & !low_bits::<T>()) | (tag & low_bits::<T>())
122}
123
124/// Decomposes a tagged pointer `data` into the pointer and the tag.
125#[inline]
126fn decompose_tag<T: ?Sized + Pointable>(data: usize) -> (usize, usize) {
127 (data & !low_bits::<T>(), data & low_bits::<T>())
128}
129
130/// Types that are pointed to by a single word.
131///
132/// In concurrent programming, it is necessary to represent an object within a word because atomic
133/// operations (e.g., reads, writes, read-modify-writes) support only single words. This trait
134/// qualifies such types that are pointed to by a single word.
135///
136/// The trait generalizes `Box<T>` for a sized type `T`. In a box, an object of type `T` is
137/// allocated in heap and it is owned by a single-word pointer. This trait is also implemented for
138/// `[MaybeUninit<T>]` by storing its size along with its elements and pointing to the pair of array
139/// size and elements.
140///
141/// Pointers to `Pointable` types can be stored in [`Atomic`], [`Owned`], and [`Shared`]. In
142/// particular, Crossbeam supports dynamically sized slices as follows.
143///
144/// ```
145/// use std::mem::MaybeUninit;
146/// use crossbeam_epoch::Owned;
147///
148/// let o = Owned::<[MaybeUninit<i32>]>::init(10); // allocating [i32; 10]
149/// ```
150pub trait Pointable {
151 /// The alignment of pointer.
152 const ALIGN: usize;
153
154 /// The type for initializers.
155 type Init;
156
157 /// Initializes a with the given initializer.
158 ///
159 /// # Safety
160 ///
161 /// The result should be a multiple of `ALIGN`.
162 unsafe fn init(init: Self::Init) -> usize;
163
164 /// Dereferences the given pointer.
165 ///
166 /// # Safety
167 ///
168 /// - The given `ptr` should have been initialized with [`Pointable::init`].
169 /// - `ptr` should not have yet been dropped by [`Pointable::drop`].
170 /// - `ptr` should not be mutably dereferenced by [`Pointable::deref_mut`] concurrently.
171 unsafe fn deref<'a>(ptr: usize) -> &'a Self;
172
173 /// Mutably dereferences the given pointer.
174 ///
175 /// # Safety
176 ///
177 /// - The given `ptr` should have been initialized with [`Pointable::init`].
178 /// - `ptr` should not have yet been dropped by [`Pointable::drop`].
179 /// - `ptr` should not be dereferenced by [`Pointable::deref`] or [`Pointable::deref_mut`]
180 /// concurrently.
181 unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut Self;
182
183 /// Drops the object pointed to by the given pointer.
184 ///
185 /// # Safety
186 ///
187 /// - The given `ptr` should have been initialized with [`Pointable::init`].
188 /// - `ptr` should not have yet been dropped by [`Pointable::drop`].
189 /// - `ptr` should not be dereferenced by [`Pointable::deref`] or [`Pointable::deref_mut`]
190 /// concurrently.
191 unsafe fn drop(ptr: usize);
192}
193
194impl<T> Pointable for T {
195 const ALIGN: usize = mem::align_of::<T>();
196
197 type Init = T;
198
199 unsafe fn init(init: Self::Init) -> usize {
200 Box::into_raw(Box::new(init)) as usize
201 }
202
203 unsafe fn deref<'a>(ptr: usize) -> &'a Self {
204 &*(ptr as *const T)
205 }
206
207 unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut Self {
208 &mut *(ptr as *mut T)
209 }
210
211 unsafe fn drop(ptr: usize) {
212 drop(Box::from_raw(ptr as *mut T));
213 }
214}
215
216/// Array with size.
217///
218/// # Memory layout
219///
220/// An array consisting of size and elements:
221///
222/// ```text
223/// elements
224/// |
225/// |
226/// ------------------------------------
227/// | size | 0 | 1 | 2 | 3 | 4 | 5 | 6 |
228/// ------------------------------------
229/// ```
230///
231/// Its memory layout is different from that of `Box<[T]>` in that size is in the allocation (not
232/// along with pointer as in `Box<[T]>`).
233///
234/// Elements are not present in the type, but they will be in the allocation.
235/// ```
236///
237// TODO(@jeehoonkang): once we bump the minimum required Rust version to 1.44 or newer, use
238// [`alloc::alloc::Layout::extend`] instead.
239#[repr(C)]
240struct Array<T> {
241 /// The number of elements (not the number of bytes).
242 len: usize,
243 elements: [MaybeUninit<T>; 0],
244}
245
246impl<T> Pointable for [MaybeUninit<T>] {
247 const ALIGN: usize = mem::align_of::<Array<T>>();
248
249 type Init = usize;
250
251 unsafe fn init(len: Self::Init) -> usize {
252 let size = mem::size_of::<Array<T>>() + mem::size_of::<MaybeUninit<T>>() * len;
253 let align = mem::align_of::<Array<T>>();
254 let layout = alloc::Layout::from_size_align(size, align).unwrap();
255 let ptr = alloc::alloc(layout).cast::<Array<T>>();
256 if ptr.is_null() {
257 alloc::handle_alloc_error(layout);
258 }
259 (*ptr).len = len;
260 ptr as usize
261 }
262
263 unsafe fn deref<'a>(ptr: usize) -> &'a Self {
264 let array = &*(ptr as *const Array<T>);
265 slice::from_raw_parts(array.elements.as_ptr() as *const _, array.len)
266 }
267
268 unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut Self {
269 let array = &*(ptr as *mut Array<T>);
270 slice::from_raw_parts_mut(array.elements.as_ptr() as *mut _, array.len)
271 }
272
273 unsafe fn drop(ptr: usize) {
274 let array = &*(ptr as *mut Array<T>);
275 let size = mem::size_of::<Array<T>>() + mem::size_of::<MaybeUninit<T>>() * array.len;
276 let align = mem::align_of::<Array<T>>();
277 let layout = alloc::Layout::from_size_align(size, align).unwrap();
278 alloc::dealloc(ptr as *mut u8, layout);
279 }
280}
281
282/// An atomic pointer that can be safely shared between threads.
283///
284/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
285/// least significant bits of the address. For example, the tag for a pointer to a sized type `T`
286/// should be less than `(1 << mem::align_of::<T>().trailing_zeros())`.
287///
288/// Any method that loads the pointer must be passed a reference to a [`Guard`].
289///
290/// Crossbeam supports dynamically sized types. See [`Pointable`] for details.
291pub struct Atomic<T: ?Sized + Pointable> {
292 data: AtomicUsize,
293 _marker: PhantomData<*mut T>,
294}
295
296unsafe impl<T: ?Sized + Pointable + Send + Sync> Send for Atomic<T> {}
297unsafe impl<T: ?Sized + Pointable + Send + Sync> Sync for Atomic<T> {}
298
299impl<T> Atomic<T> {
300 /// Allocates `value` on the heap and returns a new atomic pointer pointing to it.
301 ///
302 /// # Examples
303 ///
304 /// ```
305 /// use crossbeam_epoch::Atomic;
306 ///
307 /// let a = Atomic::new(1234);
308 /// # unsafe { drop(a.into_owned()); } // avoid leak
309 /// ```
310 pub fn new(init: T) -> Atomic<T> {
311 Self::init(init)
312 }
313}
314
315impl<T: ?Sized + Pointable> Atomic<T> {
316 /// Allocates `value` on the heap and returns a new atomic pointer pointing to it.
317 ///
318 /// # Examples
319 ///
320 /// ```
321 /// use crossbeam_epoch::Atomic;
322 ///
323 /// let a = Atomic::<i32>::init(1234);
324 /// # unsafe { drop(a.into_owned()); } // avoid leak
325 /// ```
326 pub fn init(init: T::Init) -> Atomic<T> {
327 Self::from(Owned::init(init))
328 }
329
330 /// Returns a new atomic pointer pointing to the tagged pointer `data`.
331 fn from_usize(data: usize) -> Self {
332 Self {
333 data: AtomicUsize::new(data),
334 _marker: PhantomData,
335 }
336 }
337
338 /// Returns a new null atomic pointer.
339 ///
340 /// # Examples
341 ///
342 /// ```
343 /// use crossbeam_epoch::Atomic;
344 ///
345 /// let a = Atomic::<i32>::null();
346 /// ```
347 #[cfg(all(not(crossbeam_no_const_fn_trait_bound), not(crossbeam_loom)))]
348 pub const fn null() -> Atomic<T> {
349 Self {
350 data: AtomicUsize::new(0),
351 _marker: PhantomData,
352 }
353 }
354
355 /// Returns a new null atomic pointer.
356 #[cfg(not(all(not(crossbeam_no_const_fn_trait_bound), not(crossbeam_loom))))]
357 pub fn null() -> Atomic<T> {
358 Self {
359 data: AtomicUsize::new(0),
360 _marker: PhantomData,
361 }
362 }
363
364 /// Loads a `Shared` from the atomic pointer.
365 ///
366 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
367 /// operation.
368 ///
369 /// # Examples
370 ///
371 /// ```
372 /// use crossbeam_epoch::{self as epoch, Atomic};
373 /// use std::sync::atomic::Ordering::SeqCst;
374 ///
375 /// let a = Atomic::new(1234);
376 /// let guard = &epoch::pin();
377 /// let p = a.load(SeqCst, guard);
378 /// # unsafe { drop(a.into_owned()); } // avoid leak
379 /// ```
380 pub fn load<'g>(&self, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
381 unsafe { Shared::from_usize(self.data.load(ord)) }
382 }
383
384 /// Loads a `Shared` from the atomic pointer using a "consume" memory ordering.
385 ///
386 /// This is similar to the "acquire" ordering, except that an ordering is
387 /// only guaranteed with operations that "depend on" the result of the load.
388 /// However consume loads are usually much faster than acquire loads on
389 /// architectures with a weak memory model since they don't require memory
390 /// fence instructions.
391 ///
392 /// The exact definition of "depend on" is a bit vague, but it works as you
393 /// would expect in practice since a lot of software, especially the Linux
394 /// kernel, rely on this behavior.
395 ///
396 /// # Examples
397 ///
398 /// ```
399 /// use crossbeam_epoch::{self as epoch, Atomic};
400 ///
401 /// let a = Atomic::new(1234);
402 /// let guard = &epoch::pin();
403 /// let p = a.load_consume(guard);
404 /// # unsafe { drop(a.into_owned()); } // avoid leak
405 /// ```
406 pub fn load_consume<'g>(&self, _: &'g Guard) -> Shared<'g, T> {
407 unsafe { Shared::from_usize(self.data.load_consume()) }
408 }
409
410 /// Stores a `Shared` or `Owned` pointer into the atomic pointer.
411 ///
412 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
413 /// operation.
414 ///
415 /// # Examples
416 ///
417 /// ```
418 /// use crossbeam_epoch::{Atomic, Owned, Shared};
419 /// use std::sync::atomic::Ordering::SeqCst;
420 ///
421 /// let a = Atomic::new(1234);
422 /// # unsafe { drop(a.load(SeqCst, &crossbeam_epoch::pin()).into_owned()); } // avoid leak
423 /// a.store(Shared::null(), SeqCst);
424 /// a.store(Owned::new(1234), SeqCst);
425 /// # unsafe { drop(a.into_owned()); } // avoid leak
426 /// ```
427 pub fn store<P: Pointer<T>>(&self, new: P, ord: Ordering) {
428 self.data.store(new.into_usize(), ord);
429 }
430
431 /// Stores a `Shared` or `Owned` pointer into the atomic pointer, returning the previous
432 /// `Shared`.
433 ///
434 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
435 /// operation.
436 ///
437 /// # Examples
438 ///
439 /// ```
440 /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
441 /// use std::sync::atomic::Ordering::SeqCst;
442 ///
443 /// let a = Atomic::new(1234);
444 /// let guard = &epoch::pin();
445 /// let p = a.swap(Shared::null(), SeqCst, guard);
446 /// # unsafe { drop(p.into_owned()); } // avoid leak
447 /// ```
448 pub fn swap<'g, P: Pointer<T>>(&self, new: P, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
449 unsafe { Shared::from_usize(self.data.swap(new.into_usize(), ord)) }
450 }
451
452 /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
453 /// value is the same as `current`. The tag is also taken into account, so two pointers to the
454 /// same object, but with different tags, will not be considered equal.
455 ///
456 /// The return value is a result indicating whether the new pointer was written. On success the
457 /// pointer that was written is returned. On failure the actual current value and `new` are
458 /// returned.
459 ///
460 /// This method takes two `Ordering` arguments to describe the memory
461 /// ordering of this operation. `success` describes the required ordering for the
462 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
463 /// `failure` describes the required ordering for the load operation that takes place when
464 /// the comparison fails. Using `Acquire` as success ordering makes the store part
465 /// of this operation `Relaxed`, and using `Release` makes the successful load
466 /// `Relaxed`. The failure ordering can only be `SeqCst`, `Acquire` or `Relaxed`
467 /// and must be equivalent to or weaker than the success ordering.
468 ///
469 /// # Examples
470 ///
471 /// ```
472 /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
473 /// use std::sync::atomic::Ordering::SeqCst;
474 ///
475 /// let a = Atomic::new(1234);
476 ///
477 /// let guard = &epoch::pin();
478 /// let curr = a.load(SeqCst, guard);
479 /// let res1 = a.compare_exchange(curr, Shared::null(), SeqCst, SeqCst, guard);
480 /// let res2 = a.compare_exchange(curr, Owned::new(5678), SeqCst, SeqCst, guard);
481 /// # unsafe { drop(curr.into_owned()); } // avoid leak
482 /// ```
483 pub fn compare_exchange<'g, P>(
484 &self,
485 current: Shared<'_, T>,
486 new: P,
487 success: Ordering,
488 failure: Ordering,
489 _: &'g Guard,
490 ) -> Result<Shared<'g, T>, CompareExchangeError<'g, T, P>>
491 where
492 P: Pointer<T>,
493 {
494 let new = new.into_usize();
495 self.data
496 .compare_exchange(current.into_usize(), new, success, failure)
497 .map(|_| unsafe { Shared::from_usize(new) })
498 .map_err(|current| unsafe {
499 CompareExchangeError {
500 current: Shared::from_usize(current),
501 new: P::from_usize(new),
502 }
503 })
504 }
505
506 /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
507 /// value is the same as `current`. The tag is also taken into account, so two pointers to the
508 /// same object, but with different tags, will not be considered equal.
509 ///
510 /// Unlike [`compare_exchange`], this method is allowed to spuriously fail even when comparison
511 /// succeeds, which can result in more efficient code on some platforms. The return value is a
512 /// result indicating whether the new pointer was written. On success the pointer that was
513 /// written is returned. On failure the actual current value and `new` are returned.
514 ///
515 /// This method takes two `Ordering` arguments to describe the memory
516 /// ordering of this operation. `success` describes the required ordering for the
517 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
518 /// `failure` describes the required ordering for the load operation that takes place when
519 /// the comparison fails. Using `Acquire` as success ordering makes the store part
520 /// of this operation `Relaxed`, and using `Release` makes the successful load
521 /// `Relaxed`. The failure ordering can only be `SeqCst`, `Acquire` or `Relaxed`
522 /// and must be equivalent to or weaker than the success ordering.
523 ///
524 /// [`compare_exchange`]: Atomic::compare_exchange
525 ///
526 /// # Examples
527 ///
528 /// ```
529 /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
530 /// use std::sync::atomic::Ordering::SeqCst;
531 ///
532 /// let a = Atomic::new(1234);
533 /// let guard = &epoch::pin();
534 ///
535 /// let mut new = Owned::new(5678);
536 /// let mut ptr = a.load(SeqCst, guard);
537 /// # unsafe { drop(a.load(SeqCst, guard).into_owned()); } // avoid leak
538 /// loop {
539 /// match a.compare_exchange_weak(ptr, new, SeqCst, SeqCst, guard) {
540 /// Ok(p) => {
541 /// ptr = p;
542 /// break;
543 /// }
544 /// Err(err) => {
545 /// ptr = err.current;
546 /// new = err.new;
547 /// }
548 /// }
549 /// }
550 ///
551 /// let mut curr = a.load(SeqCst, guard);
552 /// loop {
553 /// match a.compare_exchange_weak(curr, Shared::null(), SeqCst, SeqCst, guard) {
554 /// Ok(_) => break,
555 /// Err(err) => curr = err.current,
556 /// }
557 /// }
558 /// # unsafe { drop(curr.into_owned()); } // avoid leak
559 /// ```
560 pub fn compare_exchange_weak<'g, P>(
561 &self,
562 current: Shared<'_, T>,
563 new: P,
564 success: Ordering,
565 failure: Ordering,
566 _: &'g Guard,
567 ) -> Result<Shared<'g, T>, CompareExchangeError<'g, T, P>>
568 where
569 P: Pointer<T>,
570 {
571 let new = new.into_usize();
572 self.data
573 .compare_exchange_weak(current.into_usize(), new, success, failure)
574 .map(|_| unsafe { Shared::from_usize(new) })
575 .map_err(|current| unsafe {
576 CompareExchangeError {
577 current: Shared::from_usize(current),
578 new: P::from_usize(new),
579 }
580 })
581 }
582
583 /// Fetches the pointer, and then applies a function to it that returns a new value.
584 /// Returns a `Result` of `Ok(previous_value)` if the function returned `Some`, else `Err(_)`.
585 ///
586 /// Note that the given function may be called multiple times if the value has been changed by
587 /// other threads in the meantime, as long as the function returns `Some(_)`, but the function
588 /// will have been applied only once to the stored value.
589 ///
590 /// `fetch_update` takes two [`Ordering`] arguments to describe the memory
591 /// ordering of this operation. The first describes the required ordering for
592 /// when the operation finally succeeds while the second describes the
593 /// required ordering for loads. These correspond to the success and failure
594 /// orderings of [`Atomic::compare_exchange`] respectively.
595 ///
596 /// Using [`Acquire`] as success ordering makes the store part of this
597 /// operation [`Relaxed`], and using [`Release`] makes the final successful
598 /// load [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`],
599 /// [`Acquire`] or [`Relaxed`] and must be equivalent to or weaker than the
600 /// success ordering.
601 ///
602 /// [`Relaxed`]: Ordering::Relaxed
603 /// [`Acquire`]: Ordering::Acquire
604 /// [`Release`]: Ordering::Release
605 /// [`SeqCst`]: Ordering::SeqCst
606 ///
607 /// # Examples
608 ///
609 /// ```
610 /// use crossbeam_epoch::{self as epoch, Atomic};
611 /// use std::sync::atomic::Ordering::SeqCst;
612 ///
613 /// let a = Atomic::new(1234);
614 /// let guard = &epoch::pin();
615 ///
616 /// let res1 = a.fetch_update(SeqCst, SeqCst, guard, |x| Some(x.with_tag(1)));
617 /// assert!(res1.is_ok());
618 ///
619 /// let res2 = a.fetch_update(SeqCst, SeqCst, guard, |x| None);
620 /// assert!(res2.is_err());
621 /// # unsafe { drop(a.into_owned()); } // avoid leak
622 /// ```
623 pub fn fetch_update<'g, F>(
624 &self,
625 set_order: Ordering,
626 fail_order: Ordering,
627 guard: &'g Guard,
628 mut func: F,
629 ) -> Result<Shared<'g, T>, Shared<'g, T>>
630 where
631 F: FnMut(Shared<'g, T>) -> Option<Shared<'g, T>>,
632 {
633 let mut prev = self.load(fail_order, guard);
634 while let Some(next) = func(prev) {
635 match self.compare_exchange_weak(prev, next, set_order, fail_order, guard) {
636 Ok(shared) => return Ok(shared),
637 Err(next_prev) => prev = next_prev.current,
638 }
639 }
640 Err(prev)
641 }
642
643 /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
644 /// value is the same as `current`. The tag is also taken into account, so two pointers to the
645 /// same object, but with different tags, will not be considered equal.
646 ///
647 /// The return value is a result indicating whether the new pointer was written. On success the
648 /// pointer that was written is returned. On failure the actual current value and `new` are
649 /// returned.
650 ///
651 /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory
652 /// ordering of this operation.
653 ///
654 /// # Migrating to `compare_exchange`
655 ///
656 /// `compare_and_set` is equivalent to `compare_exchange` with the following mapping for
657 /// memory orderings:
658 ///
659 /// Original | Success | Failure
660 /// -------- | ------- | -------
661 /// Relaxed | Relaxed | Relaxed
662 /// Acquire | Acquire | Acquire
663 /// Release | Release | Relaxed
664 /// AcqRel | AcqRel | Acquire
665 /// SeqCst | SeqCst | SeqCst
666 ///
667 /// # Examples
668 ///
669 /// ```
670 /// # #![allow(deprecated)]
671 /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
672 /// use std::sync::atomic::Ordering::SeqCst;
673 ///
674 /// let a = Atomic::new(1234);
675 ///
676 /// let guard = &epoch::pin();
677 /// let curr = a.load(SeqCst, guard);
678 /// let res1 = a.compare_and_set(curr, Shared::null(), SeqCst, guard);
679 /// let res2 = a.compare_and_set(curr, Owned::new(5678), SeqCst, guard);
680 /// # unsafe { drop(curr.into_owned()); } // avoid leak
681 /// ```
682 // TODO: remove in the next major version.
683 #[allow(deprecated)]
684 #[deprecated(note = "Use `compare_exchange` instead")]
685 pub fn compare_and_set<'g, O, P>(
686 &self,
687 current: Shared<'_, T>,
688 new: P,
689 ord: O,
690 guard: &'g Guard,
691 ) -> Result<Shared<'g, T>, CompareAndSetError<'g, T, P>>
692 where
693 O: CompareAndSetOrdering,
694 P: Pointer<T>,
695 {
696 self.compare_exchange(current, new, ord.success(), ord.failure(), guard)
697 }
698
699 /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
700 /// value is the same as `current`. The tag is also taken into account, so two pointers to the
701 /// same object, but with different tags, will not be considered equal.
702 ///
703 /// Unlike [`compare_and_set`], this method is allowed to spuriously fail even when comparison
704 /// succeeds, which can result in more efficient code on some platforms. The return value is a
705 /// result indicating whether the new pointer was written. On success the pointer that was
706 /// written is returned. On failure the actual current value and `new` are returned.
707 ///
708 /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory
709 /// ordering of this operation.
710 ///
711 /// [`compare_and_set`]: Atomic::compare_and_set
712 ///
713 /// # Migrating to `compare_exchange_weak`
714 ///
715 /// `compare_and_set_weak` is equivalent to `compare_exchange_weak` with the following mapping for
716 /// memory orderings:
717 ///
718 /// Original | Success | Failure
719 /// -------- | ------- | -------
720 /// Relaxed | Relaxed | Relaxed
721 /// Acquire | Acquire | Acquire
722 /// Release | Release | Relaxed
723 /// AcqRel | AcqRel | Acquire
724 /// SeqCst | SeqCst | SeqCst
725 ///
726 /// # Examples
727 ///
728 /// ```
729 /// # #![allow(deprecated)]
730 /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
731 /// use std::sync::atomic::Ordering::SeqCst;
732 ///
733 /// let a = Atomic::new(1234);
734 /// let guard = &epoch::pin();
735 ///
736 /// let mut new = Owned::new(5678);
737 /// let mut ptr = a.load(SeqCst, guard);
738 /// # unsafe { drop(a.load(SeqCst, guard).into_owned()); } // avoid leak
739 /// loop {
740 /// match a.compare_and_set_weak(ptr, new, SeqCst, guard) {
741 /// Ok(p) => {
742 /// ptr = p;
743 /// break;
744 /// }
745 /// Err(err) => {
746 /// ptr = err.current;
747 /// new = err.new;
748 /// }
749 /// }
750 /// }
751 ///
752 /// let mut curr = a.load(SeqCst, guard);
753 /// loop {
754 /// match a.compare_and_set_weak(curr, Shared::null(), SeqCst, guard) {
755 /// Ok(_) => break,
756 /// Err(err) => curr = err.current,
757 /// }
758 /// }
759 /// # unsafe { drop(curr.into_owned()); } // avoid leak
760 /// ```
761 // TODO: remove in the next major version.
762 #[allow(deprecated)]
763 #[deprecated(note = "Use `compare_exchange_weak` instead")]
764 pub fn compare_and_set_weak<'g, O, P>(
765 &self,
766 current: Shared<'_, T>,
767 new: P,
768 ord: O,
769 guard: &'g Guard,
770 ) -> Result<Shared<'g, T>, CompareAndSetError<'g, T, P>>
771 where
772 O: CompareAndSetOrdering,
773 P: Pointer<T>,
774 {
775 self.compare_exchange_weak(current, new, ord.success(), ord.failure(), guard)
776 }
777
778 /// Bitwise "and" with the current tag.
779 ///
780 /// Performs a bitwise "and" operation on the current tag and the argument `val`, and sets the
781 /// new tag to the result. Returns the previous pointer.
782 ///
783 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
784 /// operation.
785 ///
786 /// # Examples
787 ///
788 /// ```
789 /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
790 /// use std::sync::atomic::Ordering::SeqCst;
791 ///
792 /// let a = Atomic::<i32>::from(Shared::null().with_tag(3));
793 /// let guard = &epoch::pin();
794 /// assert_eq!(a.fetch_and(2, SeqCst, guard).tag(), 3);
795 /// assert_eq!(a.load(SeqCst, guard).tag(), 2);
796 /// ```
797 pub fn fetch_and<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
798 unsafe { Shared::from_usize(self.data.fetch_and(val | !low_bits::<T>(), ord)) }
799 }
800
801 /// Bitwise "or" with the current tag.
802 ///
803 /// Performs a bitwise "or" operation on the current tag and the argument `val`, and sets the
804 /// new tag to the result. Returns the previous pointer.
805 ///
806 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
807 /// operation.
808 ///
809 /// # Examples
810 ///
811 /// ```
812 /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
813 /// use std::sync::atomic::Ordering::SeqCst;
814 ///
815 /// let a = Atomic::<i32>::from(Shared::null().with_tag(1));
816 /// let guard = &epoch::pin();
817 /// assert_eq!(a.fetch_or(2, SeqCst, guard).tag(), 1);
818 /// assert_eq!(a.load(SeqCst, guard).tag(), 3);
819 /// ```
820 pub fn fetch_or<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
821 unsafe { Shared::from_usize(self.data.fetch_or(val & low_bits::<T>(), ord)) }
822 }
823
824 /// Bitwise "xor" with the current tag.
825 ///
826 /// Performs a bitwise "xor" operation on the current tag and the argument `val`, and sets the
827 /// new tag to the result. Returns the previous pointer.
828 ///
829 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
830 /// operation.
831 ///
832 /// # Examples
833 ///
834 /// ```
835 /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
836 /// use std::sync::atomic::Ordering::SeqCst;
837 ///
838 /// let a = Atomic::<i32>::from(Shared::null().with_tag(1));
839 /// let guard = &epoch::pin();
840 /// assert_eq!(a.fetch_xor(3, SeqCst, guard).tag(), 1);
841 /// assert_eq!(a.load(SeqCst, guard).tag(), 2);
842 /// ```
843 pub fn fetch_xor<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
844 unsafe { Shared::from_usize(self.data.fetch_xor(val & low_bits::<T>(), ord)) }
845 }
846
847 /// Takes ownership of the pointee.
848 ///
849 /// This consumes the atomic and converts it into [`Owned`]. As [`Atomic`] doesn't have a
850 /// destructor and doesn't drop the pointee while [`Owned`] does, this is suitable for
851 /// destructors of data structures.
852 ///
853 /// # Panics
854 ///
855 /// Panics if this pointer is null, but only in debug mode.
856 ///
857 /// # Safety
858 ///
859 /// This method may be called only if the pointer is valid and nobody else is holding a
860 /// reference to the same object.
861 ///
862 /// # Examples
863 ///
864 /// ```rust
865 /// # use std::mem;
866 /// # use crossbeam_epoch::Atomic;
867 /// struct DataStructure {
868 /// ptr: Atomic<usize>,
869 /// }
870 ///
871 /// impl Drop for DataStructure {
872 /// fn drop(&mut self) {
873 /// // By now the DataStructure lives only in our thread and we are sure we don't hold
874 /// // any Shared or & to it ourselves.
875 /// unsafe {
876 /// drop(mem::replace(&mut self.ptr, Atomic::null()).into_owned());
877 /// }
878 /// }
879 /// }
880 /// ```
881 pub unsafe fn into_owned(self) -> Owned<T> {
882 #[cfg(crossbeam_loom)]
883 {
884 // FIXME: loom does not yet support into_inner, so we use unsync_load for now,
885 // which should have the same synchronization properties:
886 // https://github.com/tokio-rs/loom/issues/117
887 Owned::from_usize(self.data.unsync_load())
888 }
889 #[cfg(not(crossbeam_loom))]
890 {
891 Owned::from_usize(self.data.into_inner())
892 }
893 }
894
895 /// Takes ownership of the pointee if it is non-null.
896 ///
897 /// This consumes the atomic and converts it into [`Owned`]. As [`Atomic`] doesn't have a
898 /// destructor and doesn't drop the pointee while [`Owned`] does, this is suitable for
899 /// destructors of data structures.
900 ///
901 /// # Safety
902 ///
903 /// This method may be called only if the pointer is valid and nobody else is holding a
904 /// reference to the same object, or the pointer is null.
905 ///
906 /// # Examples
907 ///
908 /// ```rust
909 /// # use std::mem;
910 /// # use crossbeam_epoch::Atomic;
911 /// struct DataStructure {
912 /// ptr: Atomic<usize>,
913 /// }
914 ///
915 /// impl Drop for DataStructure {
916 /// fn drop(&mut self) {
917 /// // By now the DataStructure lives only in our thread and we are sure we don't hold
918 /// // any Shared or & to it ourselves, but it may be null, so we have to be careful.
919 /// let old = mem::replace(&mut self.ptr, Atomic::null());
920 /// unsafe {
921 /// if let Some(x) = old.try_into_owned() {
922 /// drop(x)
923 /// }
924 /// }
925 /// }
926 /// }
927 /// ```
928 pub unsafe fn try_into_owned(self) -> Option<Owned<T>> {
929 // FIXME: See self.into_owned()
930 #[cfg(crossbeam_loom)]
931 let data = self.data.unsync_load();
932 #[cfg(not(crossbeam_loom))]
933 let data = self.data.into_inner();
934 if decompose_tag::<T>(data).0 == 0 {
935 None
936 } else {
937 Some(Owned::from_usize(data))
938 }
939 }
940}
941
942impl<T: ?Sized + Pointable> fmt::Debug for Atomic<T> {
943 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
944 let data: usize = self.data.load(order:Ordering::SeqCst);
945 let (raw: usize, tag: usize) = decompose_tag::<T>(data);
946
947 f&mut DebugStruct<'_, '_>.debug_struct("Atomic")
948 .field("raw", &raw)
949 .field(name:"tag", &tag)
950 .finish()
951 }
952}
953
954impl<T: ?Sized + Pointable> fmt::Pointer for Atomic<T> {
955 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
956 let data: usize = self.data.load(order:Ordering::SeqCst);
957 let (raw: usize, _) = decompose_tag::<T>(data);
958 fmt::Pointer::fmt(&(unsafe { T::deref(ptr:raw) as *const _ }), f)
959 }
960}
961
962impl<T: ?Sized + Pointable> Clone for Atomic<T> {
963 /// Returns a copy of the atomic value.
964 ///
965 /// Note that a `Relaxed` load is used here. If you need synchronization, use it with other
966 /// atomics or fences.
967 fn clone(&self) -> Self {
968 let data: usize = self.data.load(order:Ordering::Relaxed);
969 Atomic::from_usize(data)
970 }
971}
972
973impl<T: ?Sized + Pointable> Default for Atomic<T> {
974 fn default() -> Self {
975 Atomic::null()
976 }
977}
978
979impl<T: ?Sized + Pointable> From<Owned<T>> for Atomic<T> {
980 /// Returns a new atomic pointer pointing to `owned`.
981 ///
982 /// # Examples
983 ///
984 /// ```
985 /// use crossbeam_epoch::{Atomic, Owned};
986 ///
987 /// let a = Atomic::<i32>::from(Owned::new(1234));
988 /// # unsafe { drop(a.into_owned()); } // avoid leak
989 /// ```
990 fn from(owned: Owned<T>) -> Self {
991 let data: usize = owned.data;
992 mem::forget(owned);
993 Self::from_usize(data)
994 }
995}
996
997impl<T> From<Box<T>> for Atomic<T> {
998 fn from(b: Box<T>) -> Self {
999 Self::from(Owned::from(b))
1000 }
1001}
1002
1003impl<T> From<T> for Atomic<T> {
1004 fn from(t: T) -> Self {
1005 Self::new(init:t)
1006 }
1007}
1008
1009impl<'g, T: ?Sized + Pointable> From<Shared<'g, T>> for Atomic<T> {
1010 /// Returns a new atomic pointer pointing to `ptr`.
1011 ///
1012 /// # Examples
1013 ///
1014 /// ```
1015 /// use crossbeam_epoch::{Atomic, Shared};
1016 ///
1017 /// let a = Atomic::<i32>::from(Shared::<i32>::null());
1018 /// ```
1019 fn from(ptr: Shared<'g, T>) -> Self {
1020 Self::from_usize(ptr.data)
1021 }
1022}
1023
1024impl<T> From<*const T> for Atomic<T> {
1025 /// Returns a new atomic pointer pointing to `raw`.
1026 ///
1027 /// # Examples
1028 ///
1029 /// ```
1030 /// use std::ptr;
1031 /// use crossbeam_epoch::Atomic;
1032 ///
1033 /// let a = Atomic::<i32>::from(ptr::null::<i32>());
1034 /// ```
1035 fn from(raw: *const T) -> Self {
1036 Self::from_usize(data:raw as usize)
1037 }
1038}
1039
1040/// A trait for either `Owned` or `Shared` pointers.
1041pub trait Pointer<T: ?Sized + Pointable> {
1042 /// Returns the machine representation of the pointer.
1043 fn into_usize(self) -> usize;
1044
1045 /// Returns a new pointer pointing to the tagged pointer `data`.
1046 ///
1047 /// # Safety
1048 ///
1049 /// The given `data` should have been created by `Pointer::into_usize()`, and one `data` should
1050 /// not be converted back by `Pointer::from_usize()` multiple times.
1051 unsafe fn from_usize(data: usize) -> Self;
1052}
1053
1054/// An owned heap-allocated object.
1055///
1056/// This type is very similar to `Box<T>`.
1057///
1058/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
1059/// least significant bits of the address.
1060pub struct Owned<T: ?Sized + Pointable> {
1061 data: usize,
1062 _marker: PhantomData<Box<T>>,
1063}
1064
1065impl<T: ?Sized + Pointable> Pointer<T> for Owned<T> {
1066 #[inline]
1067 fn into_usize(self) -> usize {
1068 let data: usize = self.data;
1069 mem::forget(self);
1070 data
1071 }
1072
1073 /// Returns a new pointer pointing to the tagged pointer `data`.
1074 ///
1075 /// # Panics
1076 ///
1077 /// Panics if the data is zero in debug mode.
1078 #[inline]
1079 unsafe fn from_usize(data: usize) -> Self {
1080 debug_assert!(data != 0, "converting zero into `Owned`");
1081 Owned {
1082 data,
1083 _marker: PhantomData,
1084 }
1085 }
1086}
1087
1088impl<T> Owned<T> {
1089 /// Returns a new owned pointer pointing to `raw`.
1090 ///
1091 /// This function is unsafe because improper use may lead to memory problems. Argument `raw`
1092 /// must be a valid pointer. Also, a double-free may occur if the function is called twice on
1093 /// the same raw pointer.
1094 ///
1095 /// # Panics
1096 ///
1097 /// Panics if `raw` is not properly aligned.
1098 ///
1099 /// # Safety
1100 ///
1101 /// The given `raw` should have been derived from `Owned`, and one `raw` should not be converted
1102 /// back by `Owned::from_raw()` multiple times.
1103 ///
1104 /// # Examples
1105 ///
1106 /// ```
1107 /// use crossbeam_epoch::Owned;
1108 ///
1109 /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) };
1110 /// ```
1111 pub unsafe fn from_raw(raw: *mut T) -> Owned<T> {
1112 let raw = raw as usize;
1113 ensure_aligned::<T>(raw);
1114 Self::from_usize(raw)
1115 }
1116
1117 /// Converts the owned pointer into a `Box`.
1118 ///
1119 /// # Examples
1120 ///
1121 /// ```
1122 /// use crossbeam_epoch::Owned;
1123 ///
1124 /// let o = Owned::new(1234);
1125 /// let b: Box<i32> = o.into_box();
1126 /// assert_eq!(*b, 1234);
1127 /// ```
1128 pub fn into_box(self) -> Box<T> {
1129 let (raw, _) = decompose_tag::<T>(self.data);
1130 mem::forget(self);
1131 unsafe { Box::from_raw(raw as *mut _) }
1132 }
1133
1134 /// Allocates `value` on the heap and returns a new owned pointer pointing to it.
1135 ///
1136 /// # Examples
1137 ///
1138 /// ```
1139 /// use crossbeam_epoch::Owned;
1140 ///
1141 /// let o = Owned::new(1234);
1142 /// ```
1143 pub fn new(init: T) -> Owned<T> {
1144 Self::init(init)
1145 }
1146}
1147
1148impl<T: ?Sized + Pointable> Owned<T> {
1149 /// Allocates `value` on the heap and returns a new owned pointer pointing to it.
1150 ///
1151 /// # Examples
1152 ///
1153 /// ```
1154 /// use crossbeam_epoch::Owned;
1155 ///
1156 /// let o = Owned::<i32>::init(1234);
1157 /// ```
1158 pub fn init(init: T::Init) -> Owned<T> {
1159 unsafe { Self::from_usize(T::init(init)) }
1160 }
1161
1162 /// Converts the owned pointer into a [`Shared`].
1163 ///
1164 /// # Examples
1165 ///
1166 /// ```
1167 /// use crossbeam_epoch::{self as epoch, Owned};
1168 ///
1169 /// let o = Owned::new(1234);
1170 /// let guard = &epoch::pin();
1171 /// let p = o.into_shared(guard);
1172 /// # unsafe { drop(p.into_owned()); } // avoid leak
1173 /// ```
1174 #[allow(clippy::needless_lifetimes)]
1175 pub fn into_shared<'g>(self, _: &'g Guard) -> Shared<'g, T> {
1176 unsafe { Shared::from_usize(self.into_usize()) }
1177 }
1178
1179 /// Returns the tag stored within the pointer.
1180 ///
1181 /// # Examples
1182 ///
1183 /// ```
1184 /// use crossbeam_epoch::Owned;
1185 ///
1186 /// assert_eq!(Owned::new(1234).tag(), 0);
1187 /// ```
1188 pub fn tag(&self) -> usize {
1189 let (_, tag) = decompose_tag::<T>(self.data);
1190 tag
1191 }
1192
1193 /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the
1194 /// unused bits of the pointer to `T`.
1195 ///
1196 /// # Examples
1197 ///
1198 /// ```
1199 /// use crossbeam_epoch::Owned;
1200 ///
1201 /// let o = Owned::new(0u64);
1202 /// assert_eq!(o.tag(), 0);
1203 /// let o = o.with_tag(2);
1204 /// assert_eq!(o.tag(), 2);
1205 /// ```
1206 pub fn with_tag(self, tag: usize) -> Owned<T> {
1207 let data = self.into_usize();
1208 unsafe { Self::from_usize(compose_tag::<T>(data, tag)) }
1209 }
1210}
1211
1212impl<T: ?Sized + Pointable> Drop for Owned<T> {
1213 fn drop(&mut self) {
1214 let (raw: usize, _) = decompose_tag::<T>(self.data);
1215 unsafe {
1216 T::drop(ptr:raw);
1217 }
1218 }
1219}
1220
1221impl<T: ?Sized + Pointable> fmt::Debug for Owned<T> {
1222 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1223 let (raw: usize, tag: usize) = decompose_tag::<T>(self.data);
1224
1225 f&mut DebugStruct<'_, '_>.debug_struct("Owned")
1226 .field("raw", &raw)
1227 .field(name:"tag", &tag)
1228 .finish()
1229 }
1230}
1231
1232impl<T: Clone> Clone for Owned<T> {
1233 fn clone(&self) -> Self {
1234 Owned::new((**self).clone()).with_tag(self.tag())
1235 }
1236}
1237
1238impl<T: ?Sized + Pointable> Deref for Owned<T> {
1239 type Target = T;
1240
1241 fn deref(&self) -> &T {
1242 let (raw: usize, _) = decompose_tag::<T>(self.data);
1243 unsafe { T::deref(ptr:raw) }
1244 }
1245}
1246
1247impl<T: ?Sized + Pointable> DerefMut for Owned<T> {
1248 fn deref_mut(&mut self) -> &mut T {
1249 let (raw: usize, _) = decompose_tag::<T>(self.data);
1250 unsafe { T::deref_mut(ptr:raw) }
1251 }
1252}
1253
1254impl<T> From<T> for Owned<T> {
1255 fn from(t: T) -> Self {
1256 Owned::new(init:t)
1257 }
1258}
1259
1260impl<T> From<Box<T>> for Owned<T> {
1261 /// Returns a new owned pointer pointing to `b`.
1262 ///
1263 /// # Panics
1264 ///
1265 /// Panics if the pointer (the `Box`) is not properly aligned.
1266 ///
1267 /// # Examples
1268 ///
1269 /// ```
1270 /// use crossbeam_epoch::Owned;
1271 ///
1272 /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) };
1273 /// ```
1274 fn from(b: Box<T>) -> Self {
1275 unsafe { Self::from_raw(Box::into_raw(b)) }
1276 }
1277}
1278
1279impl<T: ?Sized + Pointable> Borrow<T> for Owned<T> {
1280 fn borrow(&self) -> &T {
1281 self.deref()
1282 }
1283}
1284
1285impl<T: ?Sized + Pointable> BorrowMut<T> for Owned<T> {
1286 fn borrow_mut(&mut self) -> &mut T {
1287 self.deref_mut()
1288 }
1289}
1290
1291impl<T: ?Sized + Pointable> AsRef<T> for Owned<T> {
1292 fn as_ref(&self) -> &T {
1293 self.deref()
1294 }
1295}
1296
1297impl<T: ?Sized + Pointable> AsMut<T> for Owned<T> {
1298 fn as_mut(&mut self) -> &mut T {
1299 self.deref_mut()
1300 }
1301}
1302
1303/// A pointer to an object protected by the epoch GC.
1304///
1305/// The pointer is valid for use only during the lifetime `'g`.
1306///
1307/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
1308/// least significant bits of the address.
1309pub struct Shared<'g, T: 'g + ?Sized + Pointable> {
1310 data: usize,
1311 _marker: PhantomData<(&'g (), *const T)>,
1312}
1313
1314impl<T: ?Sized + Pointable> Clone for Shared<'_, T> {
1315 fn clone(&self) -> Self {
1316 Self {
1317 data: self.data,
1318 _marker: PhantomData,
1319 }
1320 }
1321}
1322
1323impl<T: ?Sized + Pointable> Copy for Shared<'_, T> {}
1324
1325impl<T: ?Sized + Pointable> Pointer<T> for Shared<'_, T> {
1326 #[inline]
1327 fn into_usize(self) -> usize {
1328 self.data
1329 }
1330
1331 #[inline]
1332 unsafe fn from_usize(data: usize) -> Self {
1333 Shared {
1334 data,
1335 _marker: PhantomData,
1336 }
1337 }
1338}
1339
1340impl<'g, T> Shared<'g, T> {
1341 /// Converts the pointer to a raw pointer (without the tag).
1342 ///
1343 /// # Examples
1344 ///
1345 /// ```
1346 /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
1347 /// use std::sync::atomic::Ordering::SeqCst;
1348 ///
1349 /// let o = Owned::new(1234);
1350 /// let raw = &*o as *const _;
1351 /// let a = Atomic::from(o);
1352 ///
1353 /// let guard = &epoch::pin();
1354 /// let p = a.load(SeqCst, guard);
1355 /// assert_eq!(p.as_raw(), raw);
1356 /// # unsafe { drop(a.into_owned()); } // avoid leak
1357 /// ```
1358 pub fn as_raw(&self) -> *const T {
1359 let (raw: usize, _) = decompose_tag::<T>(self.data);
1360 raw as *const _
1361 }
1362}
1363
1364impl<'g, T: ?Sized + Pointable> Shared<'g, T> {
1365 /// Returns a new null pointer.
1366 ///
1367 /// # Examples
1368 ///
1369 /// ```
1370 /// use crossbeam_epoch::Shared;
1371 ///
1372 /// let p = Shared::<i32>::null();
1373 /// assert!(p.is_null());
1374 /// ```
1375 pub fn null() -> Shared<'g, T> {
1376 Shared {
1377 data: 0,
1378 _marker: PhantomData,
1379 }
1380 }
1381
1382 /// Returns `true` if the pointer is null.
1383 ///
1384 /// # Examples
1385 ///
1386 /// ```
1387 /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
1388 /// use std::sync::atomic::Ordering::SeqCst;
1389 ///
1390 /// let a = Atomic::null();
1391 /// let guard = &epoch::pin();
1392 /// assert!(a.load(SeqCst, guard).is_null());
1393 /// a.store(Owned::new(1234), SeqCst);
1394 /// assert!(!a.load(SeqCst, guard).is_null());
1395 /// # unsafe { drop(a.into_owned()); } // avoid leak
1396 /// ```
1397 pub fn is_null(&self) -> bool {
1398 let (raw, _) = decompose_tag::<T>(self.data);
1399 raw == 0
1400 }
1401
1402 /// Dereferences the pointer.
1403 ///
1404 /// Returns a reference to the pointee that is valid during the lifetime `'g`.
1405 ///
1406 /// # Safety
1407 ///
1408 /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory.
1409 ///
1410 /// Another concern is the possibility of data races due to lack of proper synchronization.
1411 /// For example, consider the following scenario:
1412 ///
1413 /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)`
1414 /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()`
1415 ///
1416 /// The problem is that relaxed orderings don't synchronize initialization of the object with
1417 /// the read from the second thread. This is a data race. A possible solution would be to use
1418 /// `Release` and `Acquire` orderings.
1419 ///
1420 /// # Examples
1421 ///
1422 /// ```
1423 /// use crossbeam_epoch::{self as epoch, Atomic};
1424 /// use std::sync::atomic::Ordering::SeqCst;
1425 ///
1426 /// let a = Atomic::new(1234);
1427 /// let guard = &epoch::pin();
1428 /// let p = a.load(SeqCst, guard);
1429 /// unsafe {
1430 /// assert_eq!(p.deref(), &1234);
1431 /// }
1432 /// # unsafe { drop(a.into_owned()); } // avoid leak
1433 /// ```
1434 pub unsafe fn deref(&self) -> &'g T {
1435 let (raw, _) = decompose_tag::<T>(self.data);
1436 T::deref(raw)
1437 }
1438
1439 /// Dereferences the pointer.
1440 ///
1441 /// Returns a mutable reference to the pointee that is valid during the lifetime `'g`.
1442 ///
1443 /// # Safety
1444 ///
1445 /// * There is no guarantee that there are no more threads attempting to read/write from/to the
1446 /// actual object at the same time.
1447 ///
1448 /// The user must know that there are no concurrent accesses towards the object itself.
1449 ///
1450 /// * Other than the above, all safety concerns of `deref()` applies here.
1451 ///
1452 /// # Examples
1453 ///
1454 /// ```
1455 /// use crossbeam_epoch::{self as epoch, Atomic};
1456 /// use std::sync::atomic::Ordering::SeqCst;
1457 ///
1458 /// let a = Atomic::new(vec![1, 2, 3, 4]);
1459 /// let guard = &epoch::pin();
1460 ///
1461 /// let mut p = a.load(SeqCst, guard);
1462 /// unsafe {
1463 /// assert!(!p.is_null());
1464 /// let b = p.deref_mut();
1465 /// assert_eq!(b, &vec![1, 2, 3, 4]);
1466 /// b.push(5);
1467 /// assert_eq!(b, &vec![1, 2, 3, 4, 5]);
1468 /// }
1469 ///
1470 /// let p = a.load(SeqCst, guard);
1471 /// unsafe {
1472 /// assert_eq!(p.deref(), &vec![1, 2, 3, 4, 5]);
1473 /// }
1474 /// # unsafe { drop(a.into_owned()); } // avoid leak
1475 /// ```
1476 pub unsafe fn deref_mut(&mut self) -> &'g mut T {
1477 let (raw, _) = decompose_tag::<T>(self.data);
1478 T::deref_mut(raw)
1479 }
1480
1481 /// Converts the pointer to a reference.
1482 ///
1483 /// Returns `None` if the pointer is null, or else a reference to the object wrapped in `Some`.
1484 ///
1485 /// # Safety
1486 ///
1487 /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory.
1488 ///
1489 /// Another concern is the possibility of data races due to lack of proper synchronization.
1490 /// For example, consider the following scenario:
1491 ///
1492 /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)`
1493 /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()`
1494 ///
1495 /// The problem is that relaxed orderings don't synchronize initialization of the object with
1496 /// the read from the second thread. This is a data race. A possible solution would be to use
1497 /// `Release` and `Acquire` orderings.
1498 ///
1499 /// # Examples
1500 ///
1501 /// ```
1502 /// use crossbeam_epoch::{self as epoch, Atomic};
1503 /// use std::sync::atomic::Ordering::SeqCst;
1504 ///
1505 /// let a = Atomic::new(1234);
1506 /// let guard = &epoch::pin();
1507 /// let p = a.load(SeqCst, guard);
1508 /// unsafe {
1509 /// assert_eq!(p.as_ref(), Some(&1234));
1510 /// }
1511 /// # unsafe { drop(a.into_owned()); } // avoid leak
1512 /// ```
1513 pub unsafe fn as_ref(&self) -> Option<&'g T> {
1514 let (raw, _) = decompose_tag::<T>(self.data);
1515 if raw == 0 {
1516 None
1517 } else {
1518 Some(T::deref(raw))
1519 }
1520 }
1521
1522 /// Takes ownership of the pointee.
1523 ///
1524 /// # Panics
1525 ///
1526 /// Panics if this pointer is null, but only in debug mode.
1527 ///
1528 /// # Safety
1529 ///
1530 /// This method may be called only if the pointer is valid and nobody else is holding a
1531 /// reference to the same object.
1532 ///
1533 /// # Examples
1534 ///
1535 /// ```
1536 /// use crossbeam_epoch::{self as epoch, Atomic};
1537 /// use std::sync::atomic::Ordering::SeqCst;
1538 ///
1539 /// let a = Atomic::new(1234);
1540 /// unsafe {
1541 /// let guard = &epoch::unprotected();
1542 /// let p = a.load(SeqCst, guard);
1543 /// drop(p.into_owned());
1544 /// }
1545 /// ```
1546 pub unsafe fn into_owned(self) -> Owned<T> {
1547 debug_assert!(!self.is_null(), "converting a null `Shared` into `Owned`");
1548 Owned::from_usize(self.data)
1549 }
1550
1551 /// Takes ownership of the pointee if it is not null.
1552 ///
1553 /// # Safety
1554 ///
1555 /// This method may be called only if the pointer is valid and nobody else is holding a
1556 /// reference to the same object, or if the pointer is null.
1557 ///
1558 /// # Examples
1559 ///
1560 /// ```
1561 /// use crossbeam_epoch::{self as epoch, Atomic};
1562 /// use std::sync::atomic::Ordering::SeqCst;
1563 ///
1564 /// let a = Atomic::new(1234);
1565 /// unsafe {
1566 /// let guard = &epoch::unprotected();
1567 /// let p = a.load(SeqCst, guard);
1568 /// if let Some(x) = p.try_into_owned() {
1569 /// drop(x);
1570 /// }
1571 /// }
1572 /// ```
1573 pub unsafe fn try_into_owned(self) -> Option<Owned<T>> {
1574 if self.is_null() {
1575 None
1576 } else {
1577 Some(Owned::from_usize(self.data))
1578 }
1579 }
1580
1581 /// Returns the tag stored within the pointer.
1582 ///
1583 /// # Examples
1584 ///
1585 /// ```
1586 /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
1587 /// use std::sync::atomic::Ordering::SeqCst;
1588 ///
1589 /// let a = Atomic::<u64>::from(Owned::new(0u64).with_tag(2));
1590 /// let guard = &epoch::pin();
1591 /// let p = a.load(SeqCst, guard);
1592 /// assert_eq!(p.tag(), 2);
1593 /// # unsafe { drop(a.into_owned()); } // avoid leak
1594 /// ```
1595 pub fn tag(&self) -> usize {
1596 let (_, tag) = decompose_tag::<T>(self.data);
1597 tag
1598 }
1599
1600 /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the
1601 /// unused bits of the pointer to `T`.
1602 ///
1603 /// # Examples
1604 ///
1605 /// ```
1606 /// use crossbeam_epoch::{self as epoch, Atomic};
1607 /// use std::sync::atomic::Ordering::SeqCst;
1608 ///
1609 /// let a = Atomic::new(0u64);
1610 /// let guard = &epoch::pin();
1611 /// let p1 = a.load(SeqCst, guard);
1612 /// let p2 = p1.with_tag(2);
1613 ///
1614 /// assert_eq!(p1.tag(), 0);
1615 /// assert_eq!(p2.tag(), 2);
1616 /// assert_eq!(p1.as_raw(), p2.as_raw());
1617 /// # unsafe { drop(a.into_owned()); } // avoid leak
1618 /// ```
1619 pub fn with_tag(&self, tag: usize) -> Shared<'g, T> {
1620 unsafe { Self::from_usize(compose_tag::<T>(self.data, tag)) }
1621 }
1622}
1623
1624impl<T> From<*const T> for Shared<'_, T> {
1625 /// Returns a new pointer pointing to `raw`.
1626 ///
1627 /// # Panics
1628 ///
1629 /// Panics if `raw` is not properly aligned.
1630 ///
1631 /// # Examples
1632 ///
1633 /// ```
1634 /// use crossbeam_epoch::Shared;
1635 ///
1636 /// let p = Shared::from(Box::into_raw(Box::new(1234)) as *const _);
1637 /// assert!(!p.is_null());
1638 /// # unsafe { drop(p.into_owned()); } // avoid leak
1639 /// ```
1640 fn from(raw: *const T) -> Self {
1641 let raw: usize = raw as usize;
1642 ensure_aligned::<T>(raw);
1643 unsafe { Self::from_usize(data:raw) }
1644 }
1645}
1646
1647impl<'g, T: ?Sized + Pointable> PartialEq<Shared<'g, T>> for Shared<'g, T> {
1648 fn eq(&self, other: &Self) -> bool {
1649 self.data == other.data
1650 }
1651}
1652
1653impl<T: ?Sized + Pointable> Eq for Shared<'_, T> {}
1654
1655impl<'g, T: ?Sized + Pointable> PartialOrd<Shared<'g, T>> for Shared<'g, T> {
1656 fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
1657 self.data.partial_cmp(&other.data)
1658 }
1659}
1660
1661impl<T: ?Sized + Pointable> Ord for Shared<'_, T> {
1662 fn cmp(&self, other: &Self) -> cmp::Ordering {
1663 self.data.cmp(&other.data)
1664 }
1665}
1666
1667impl<T: ?Sized + Pointable> fmt::Debug for Shared<'_, T> {
1668 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1669 let (raw: usize, tag: usize) = decompose_tag::<T>(self.data);
1670
1671 f&mut DebugStruct<'_, '_>.debug_struct("Shared")
1672 .field("raw", &raw)
1673 .field(name:"tag", &tag)
1674 .finish()
1675 }
1676}
1677
1678impl<T: ?Sized + Pointable> fmt::Pointer for Shared<'_, T> {
1679 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1680 fmt::Pointer::fmt(&(unsafe { self.deref() as *const _ }), f)
1681 }
1682}
1683
1684impl<T: ?Sized + Pointable> Default for Shared<'_, T> {
1685 fn default() -> Self {
1686 Shared::null()
1687 }
1688}
1689
1690#[cfg(all(test, not(crossbeam_loom)))]
1691mod tests {
1692 use super::{Owned, Shared};
1693 use std::mem::MaybeUninit;
1694
1695 #[test]
1696 fn valid_tag_i8() {
1697 Shared::<i8>::null().with_tag(0);
1698 }
1699
1700 #[test]
1701 fn valid_tag_i64() {
1702 Shared::<i64>::null().with_tag(7);
1703 }
1704
1705 #[rustversion::since(1.61)]
1706 #[test]
1707 fn const_atomic_null() {
1708 use super::Atomic;
1709 static _U: Atomic<u8> = Atomic::<u8>::null();
1710 }
1711
1712 #[test]
1713 fn array_init() {
1714 let owned = Owned::<[MaybeUninit<usize>]>::init(10);
1715 let arr: &[MaybeUninit<usize>] = &owned;
1716 assert_eq!(arr.len(), 10);
1717 }
1718}
1719