1use core::borrow::{Borrow, BorrowMut};
2use core::cmp;
3use core::fmt;
4use core::marker::PhantomData;
5use core::mem::{self, MaybeUninit};
6use core::ops::{Deref, DerefMut};
7use core::slice;
8
9use crate::alloc::alloc;
10use crate::alloc::boxed::Box;
11use crate::guard::Guard;
12use crate::primitive::sync::atomic::{AtomicUsize, Ordering};
13use crossbeam_utils::atomic::AtomicConsume;
14
15/// Given ordering for the success case in a compare-exchange operation, returns the strongest
16/// appropriate ordering for the failure case.
17#[inline]
18fn strongest_failure_ordering(ord: Ordering) -> Ordering {
19 use self::Ordering::*;
20 match ord {
21 Relaxed | Release => Relaxed,
22 Acquire | AcqRel => Acquire,
23 _ => SeqCst,
24 }
25}
26
27/// The error returned on failed compare-and-set operation.
28// TODO: remove in the next major version.
29#[deprecated(note = "Use `CompareExchangeError` instead")]
30pub type CompareAndSetError<'g, T, P> = CompareExchangeError<'g, T, P>;
31
32/// The error returned on failed compare-and-swap operation.
33pub struct CompareExchangeError<'g, T: ?Sized + Pointable, P: Pointer<T>> {
34 /// The value in the atomic pointer at the time of the failed operation.
35 pub current: Shared<'g, T>,
36
37 /// The new value, which the operation failed to store.
38 pub new: P,
39}
40
41impl<T, P: Pointer<T> + fmt::Debug> fmt::Debug for CompareExchangeError<'_, T, P> {
42 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
43 f.debug_struct("CompareExchangeError")
44 .field("current", &self.current)
45 .field("new", &self.new)
46 .finish()
47 }
48}
49
50/// Memory orderings for compare-and-set operations.
51///
52/// A compare-and-set operation can have different memory orderings depending on whether it
53/// succeeds or fails. This trait generalizes different ways of specifying memory orderings.
54///
55/// The two ways of specifying orderings for compare-and-set are:
56///
57/// 1. Just one `Ordering` for the success case. In case of failure, the strongest appropriate
58/// ordering is chosen.
59/// 2. A pair of `Ordering`s. The first one is for the success case, while the second one is
60/// for the failure case.
61// TODO: remove in the next major version.
62#[deprecated(
63 note = "`compare_and_set` and `compare_and_set_weak` that use this trait are deprecated, \
64 use `compare_exchange` or `compare_exchange_weak instead`"
65)]
66pub trait CompareAndSetOrdering {
67 /// The ordering of the operation when it succeeds.
68 fn success(&self) -> Ordering;
69
70 /// The ordering of the operation when it fails.
71 ///
72 /// The failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than
73 /// the success ordering.
74 fn failure(&self) -> Ordering;
75}
76
77#[allow(deprecated)]
78impl CompareAndSetOrdering for Ordering {
79 #[inline]
80 fn success(&self) -> Ordering {
81 *self
82 }
83
84 #[inline]
85 fn failure(&self) -> Ordering {
86 strongest_failure_ordering(*self)
87 }
88}
89
90#[allow(deprecated)]
91impl CompareAndSetOrdering for (Ordering, Ordering) {
92 #[inline]
93 fn success(&self) -> Ordering {
94 self.0
95 }
96
97 #[inline]
98 fn failure(&self) -> Ordering {
99 self.1
100 }
101}
102
103/// Returns a bitmask containing the unused least significant bits of an aligned pointer to `T`.
104#[inline]
105fn low_bits<T: ?Sized + Pointable>() -> usize {
106 (1 << T::ALIGN.trailing_zeros()) - 1
107}
108
109/// Panics if the pointer is not properly unaligned.
110#[inline]
111fn ensure_aligned<T: ?Sized + Pointable>(raw: usize) {
112 assert_eq!(raw & low_bits::<T>(), 0, "unaligned pointer");
113}
114
115/// Given a tagged pointer `data`, returns the same pointer, but tagged with `tag`.
116///
117/// `tag` is truncated to fit into the unused bits of the pointer to `T`.
118#[inline]
119fn compose_tag<T: ?Sized + Pointable>(data: usize, tag: usize) -> usize {
120 (data & !low_bits::<T>()) | (tag & low_bits::<T>())
121}
122
123/// Decomposes a tagged pointer `data` into the pointer and the tag.
124#[inline]
125fn decompose_tag<T: ?Sized + Pointable>(data: usize) -> (usize, usize) {
126 (data & !low_bits::<T>(), data & low_bits::<T>())
127}
128
129/// Types that are pointed to by a single word.
130///
131/// In concurrent programming, it is necessary to represent an object within a word because atomic
132/// operations (e.g., reads, writes, read-modify-writes) support only single words. This trait
133/// qualifies such types that are pointed to by a single word.
134///
135/// The trait generalizes `Box<T>` for a sized type `T`. In a box, an object of type `T` is
136/// allocated in heap and it is owned by a single-word pointer. This trait is also implemented for
137/// `[MaybeUninit<T>]` by storing its size along with its elements and pointing to the pair of array
138/// size and elements.
139///
140/// Pointers to `Pointable` types can be stored in [`Atomic`], [`Owned`], and [`Shared`]. In
141/// particular, Crossbeam supports dynamically sized slices as follows.
142///
143/// ```
144/// use std::mem::MaybeUninit;
145/// use crossbeam_epoch::Owned;
146///
147/// let o = Owned::<[MaybeUninit<i32>]>::init(10); // allocating [i32; 10]
148/// ```
149pub trait Pointable {
150 /// The alignment of pointer.
151 const ALIGN: usize;
152
153 /// The type for initializers.
154 type Init;
155
156 /// Initializes a with the given initializer.
157 ///
158 /// # Safety
159 ///
160 /// The result should be a multiple of `ALIGN`.
161 unsafe fn init(init: Self::Init) -> usize;
162
163 /// Dereferences the given pointer.
164 ///
165 /// # Safety
166 ///
167 /// - The given `ptr` should have been initialized with [`Pointable::init`].
168 /// - `ptr` should not have yet been dropped by [`Pointable::drop`].
169 /// - `ptr` should not be mutably dereferenced by [`Pointable::deref_mut`] concurrently.
170 unsafe fn deref<'a>(ptr: usize) -> &'a Self;
171
172 /// Mutably dereferences the given pointer.
173 ///
174 /// # Safety
175 ///
176 /// - The given `ptr` should have been initialized with [`Pointable::init`].
177 /// - `ptr` should not have yet been dropped by [`Pointable::drop`].
178 /// - `ptr` should not be dereferenced by [`Pointable::deref`] or [`Pointable::deref_mut`]
179 /// concurrently.
180 unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut Self;
181
182 /// Drops the object pointed to by the given pointer.
183 ///
184 /// # Safety
185 ///
186 /// - The given `ptr` should have been initialized with [`Pointable::init`].
187 /// - `ptr` should not have yet been dropped by [`Pointable::drop`].
188 /// - `ptr` should not be dereferenced by [`Pointable::deref`] or [`Pointable::deref_mut`]
189 /// concurrently.
190 unsafe fn drop(ptr: usize);
191}
192
193impl<T> Pointable for T {
194 const ALIGN: usize = mem::align_of::<T>();
195
196 type Init = T;
197
198 unsafe fn init(init: Self::Init) -> usize {
199 Box::into_raw(Box::new(init)) as usize
200 }
201
202 unsafe fn deref<'a>(ptr: usize) -> &'a Self {
203 &*(ptr as *const T)
204 }
205
206 unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut Self {
207 &mut *(ptr as *mut T)
208 }
209
210 unsafe fn drop(ptr: usize) {
211 drop(Box::from_raw(ptr as *mut T));
212 }
213}
214
215/// Array with size.
216///
217/// # Memory layout
218///
219/// An array consisting of size and elements:
220///
221/// ```text
222/// elements
223/// |
224/// |
225/// ------------------------------------
226/// | size | 0 | 1 | 2 | 3 | 4 | 5 | 6 |
227/// ------------------------------------
228/// ```
229///
230/// Its memory layout is different from that of `Box<[T]>` in that size is in the allocation (not
231/// along with pointer as in `Box<[T]>`).
232///
233/// Elements are not present in the type, but they will be in the allocation.
234/// ```
235///
236// TODO(@jeehoonkang): once we bump the minimum required Rust version to 1.44 or newer, use
237// [`alloc::alloc::Layout::extend`] instead.
238#[repr(C)]
239struct Array<T> {
240 /// The number of elements (not the number of bytes).
241 len: usize,
242 elements: [MaybeUninit<T>; 0],
243}
244
245impl<T> Pointable for [MaybeUninit<T>] {
246 const ALIGN: usize = mem::align_of::<Array<T>>();
247
248 type Init = usize;
249
250 unsafe fn init(len: Self::Init) -> usize {
251 let size = mem::size_of::<Array<T>>() + mem::size_of::<MaybeUninit<T>>() * len;
252 let align = mem::align_of::<Array<T>>();
253 let layout = alloc::Layout::from_size_align(size, align).unwrap();
254 let ptr = alloc::alloc(layout).cast::<Array<T>>();
255 if ptr.is_null() {
256 alloc::handle_alloc_error(layout);
257 }
258 (*ptr).len = len;
259 ptr as usize
260 }
261
262 unsafe fn deref<'a>(ptr: usize) -> &'a Self {
263 let array = &*(ptr as *const Array<T>);
264 slice::from_raw_parts(array.elements.as_ptr() as *const _, array.len)
265 }
266
267 unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut Self {
268 let array = &*(ptr as *mut Array<T>);
269 slice::from_raw_parts_mut(array.elements.as_ptr() as *mut _, array.len)
270 }
271
272 unsafe fn drop(ptr: usize) {
273 let array = &*(ptr as *mut Array<T>);
274 let size = mem::size_of::<Array<T>>() + mem::size_of::<MaybeUninit<T>>() * array.len;
275 let align = mem::align_of::<Array<T>>();
276 let layout = alloc::Layout::from_size_align(size, align).unwrap();
277 alloc::dealloc(ptr as *mut u8, layout);
278 }
279}
280
281/// An atomic pointer that can be safely shared between threads.
282///
283/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
284/// least significant bits of the address. For example, the tag for a pointer to a sized type `T`
285/// should be less than `(1 << mem::align_of::<T>().trailing_zeros())`.
286///
287/// Any method that loads the pointer must be passed a reference to a [`Guard`].
288///
289/// Crossbeam supports dynamically sized types. See [`Pointable`] for details.
290pub struct Atomic<T: ?Sized + Pointable> {
291 data: AtomicUsize,
292 _marker: PhantomData<*mut T>,
293}
294
295unsafe impl<T: ?Sized + Pointable + Send + Sync> Send for Atomic<T> {}
296unsafe impl<T: ?Sized + Pointable + Send + Sync> Sync for Atomic<T> {}
297
298impl<T> Atomic<T> {
299 /// Allocates `value` on the heap and returns a new atomic pointer pointing to it.
300 ///
301 /// # Examples
302 ///
303 /// ```
304 /// use crossbeam_epoch::Atomic;
305 ///
306 /// let a = Atomic::new(1234);
307 /// # unsafe { drop(a.into_owned()); } // avoid leak
308 /// ```
309 pub fn new(init: T) -> Atomic<T> {
310 Self::init(init)
311 }
312}
313
314impl<T: ?Sized + Pointable> Atomic<T> {
315 /// Allocates `value` on the heap and returns a new atomic pointer pointing to it.
316 ///
317 /// # Examples
318 ///
319 /// ```
320 /// use crossbeam_epoch::Atomic;
321 ///
322 /// let a = Atomic::<i32>::init(1234);
323 /// # unsafe { drop(a.into_owned()); } // avoid leak
324 /// ```
325 pub fn init(init: T::Init) -> Atomic<T> {
326 Self::from(Owned::init(init))
327 }
328
329 /// Returns a new atomic pointer pointing to the tagged pointer `data`.
330 fn from_usize(data: usize) -> Self {
331 Self {
332 data: AtomicUsize::new(data),
333 _marker: PhantomData,
334 }
335 }
336
337 /// Returns a new null atomic pointer.
338 ///
339 /// # Examples
340 ///
341 /// ```
342 /// use crossbeam_epoch::Atomic;
343 ///
344 /// let a = Atomic::<i32>::null();
345 /// ```
346 #[cfg(not(crossbeam_loom))]
347 pub const fn null() -> Atomic<T> {
348 Self {
349 data: AtomicUsize::new(0),
350 _marker: PhantomData,
351 }
352 }
353 /// Returns a new null atomic pointer.
354 #[cfg(crossbeam_loom)]
355 pub fn null() -> Atomic<T> {
356 Self {
357 data: AtomicUsize::new(0),
358 _marker: PhantomData,
359 }
360 }
361
362 /// Loads a `Shared` from the atomic pointer.
363 ///
364 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
365 /// operation.
366 ///
367 /// # Examples
368 ///
369 /// ```
370 /// use crossbeam_epoch::{self as epoch, Atomic};
371 /// use std::sync::atomic::Ordering::SeqCst;
372 ///
373 /// let a = Atomic::new(1234);
374 /// let guard = &epoch::pin();
375 /// let p = a.load(SeqCst, guard);
376 /// # unsafe { drop(a.into_owned()); } // avoid leak
377 /// ```
378 pub fn load<'g>(&self, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
379 unsafe { Shared::from_usize(self.data.load(ord)) }
380 }
381
382 /// Loads a `Shared` from the atomic pointer using a "consume" memory ordering.
383 ///
384 /// This is similar to the "acquire" ordering, except that an ordering is
385 /// only guaranteed with operations that "depend on" the result of the load.
386 /// However consume loads are usually much faster than acquire loads on
387 /// architectures with a weak memory model since they don't require memory
388 /// fence instructions.
389 ///
390 /// The exact definition of "depend on" is a bit vague, but it works as you
391 /// would expect in practice since a lot of software, especially the Linux
392 /// kernel, rely on this behavior.
393 ///
394 /// # Examples
395 ///
396 /// ```
397 /// use crossbeam_epoch::{self as epoch, Atomic};
398 ///
399 /// let a = Atomic::new(1234);
400 /// let guard = &epoch::pin();
401 /// let p = a.load_consume(guard);
402 /// # unsafe { drop(a.into_owned()); } // avoid leak
403 /// ```
404 pub fn load_consume<'g>(&self, _: &'g Guard) -> Shared<'g, T> {
405 unsafe { Shared::from_usize(self.data.load_consume()) }
406 }
407
408 /// Stores a `Shared` or `Owned` pointer into the atomic pointer.
409 ///
410 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
411 /// operation.
412 ///
413 /// # Examples
414 ///
415 /// ```
416 /// use crossbeam_epoch::{Atomic, Owned, Shared};
417 /// use std::sync::atomic::Ordering::SeqCst;
418 ///
419 /// let a = Atomic::new(1234);
420 /// # unsafe { drop(a.load(SeqCst, &crossbeam_epoch::pin()).into_owned()); } // avoid leak
421 /// a.store(Shared::null(), SeqCst);
422 /// a.store(Owned::new(1234), SeqCst);
423 /// # unsafe { drop(a.into_owned()); } // avoid leak
424 /// ```
425 pub fn store<P: Pointer<T>>(&self, new: P, ord: Ordering) {
426 self.data.store(new.into_usize(), ord);
427 }
428
429 /// Stores a `Shared` or `Owned` pointer into the atomic pointer, returning the previous
430 /// `Shared`.
431 ///
432 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
433 /// operation.
434 ///
435 /// # Examples
436 ///
437 /// ```
438 /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
439 /// use std::sync::atomic::Ordering::SeqCst;
440 ///
441 /// let a = Atomic::new(1234);
442 /// let guard = &epoch::pin();
443 /// let p = a.swap(Shared::null(), SeqCst, guard);
444 /// # unsafe { drop(p.into_owned()); } // avoid leak
445 /// ```
446 pub fn swap<'g, P: Pointer<T>>(&self, new: P, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
447 unsafe { Shared::from_usize(self.data.swap(new.into_usize(), ord)) }
448 }
449
450 /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
451 /// value is the same as `current`. The tag is also taken into account, so two pointers to the
452 /// same object, but with different tags, will not be considered equal.
453 ///
454 /// The return value is a result indicating whether the new pointer was written. On success the
455 /// pointer that was written is returned. On failure the actual current value and `new` are
456 /// returned.
457 ///
458 /// This method takes two `Ordering` arguments to describe the memory
459 /// ordering of this operation. `success` describes the required ordering for the
460 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
461 /// `failure` describes the required ordering for the load operation that takes place when
462 /// the comparison fails. Using `Acquire` as success ordering makes the store part
463 /// of this operation `Relaxed`, and using `Release` makes the successful load
464 /// `Relaxed`. The failure ordering can only be `SeqCst`, `Acquire` or `Relaxed`
465 /// and must be equivalent to or weaker than the success ordering.
466 ///
467 /// # Examples
468 ///
469 /// ```
470 /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
471 /// use std::sync::atomic::Ordering::SeqCst;
472 ///
473 /// let a = Atomic::new(1234);
474 ///
475 /// let guard = &epoch::pin();
476 /// let curr = a.load(SeqCst, guard);
477 /// let res1 = a.compare_exchange(curr, Shared::null(), SeqCst, SeqCst, guard);
478 /// let res2 = a.compare_exchange(curr, Owned::new(5678), SeqCst, SeqCst, guard);
479 /// # unsafe { drop(curr.into_owned()); } // avoid leak
480 /// ```
481 pub fn compare_exchange<'g, P>(
482 &self,
483 current: Shared<'_, T>,
484 new: P,
485 success: Ordering,
486 failure: Ordering,
487 _: &'g Guard,
488 ) -> Result<Shared<'g, T>, CompareExchangeError<'g, T, P>>
489 where
490 P: Pointer<T>,
491 {
492 let new = new.into_usize();
493 self.data
494 .compare_exchange(current.into_usize(), new, success, failure)
495 .map(|_| unsafe { Shared::from_usize(new) })
496 .map_err(|current| unsafe {
497 CompareExchangeError {
498 current: Shared::from_usize(current),
499 new: P::from_usize(new),
500 }
501 })
502 }
503
504 /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
505 /// value is the same as `current`. The tag is also taken into account, so two pointers to the
506 /// same object, but with different tags, will not be considered equal.
507 ///
508 /// Unlike [`compare_exchange`], this method is allowed to spuriously fail even when comparison
509 /// succeeds, which can result in more efficient code on some platforms. The return value is a
510 /// result indicating whether the new pointer was written. On success the pointer that was
511 /// written is returned. On failure the actual current value and `new` are returned.
512 ///
513 /// This method takes two `Ordering` arguments to describe the memory
514 /// ordering of this operation. `success` describes the required ordering for the
515 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
516 /// `failure` describes the required ordering for the load operation that takes place when
517 /// the comparison fails. Using `Acquire` as success ordering makes the store part
518 /// of this operation `Relaxed`, and using `Release` makes the successful load
519 /// `Relaxed`. The failure ordering can only be `SeqCst`, `Acquire` or `Relaxed`
520 /// and must be equivalent to or weaker than the success ordering.
521 ///
522 /// [`compare_exchange`]: Atomic::compare_exchange
523 ///
524 /// # Examples
525 ///
526 /// ```
527 /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
528 /// use std::sync::atomic::Ordering::SeqCst;
529 ///
530 /// let a = Atomic::new(1234);
531 /// let guard = &epoch::pin();
532 ///
533 /// let mut new = Owned::new(5678);
534 /// let mut ptr = a.load(SeqCst, guard);
535 /// # unsafe { drop(a.load(SeqCst, guard).into_owned()); } // avoid leak
536 /// loop {
537 /// match a.compare_exchange_weak(ptr, new, SeqCst, SeqCst, guard) {
538 /// Ok(p) => {
539 /// ptr = p;
540 /// break;
541 /// }
542 /// Err(err) => {
543 /// ptr = err.current;
544 /// new = err.new;
545 /// }
546 /// }
547 /// }
548 ///
549 /// let mut curr = a.load(SeqCst, guard);
550 /// loop {
551 /// match a.compare_exchange_weak(curr, Shared::null(), SeqCst, SeqCst, guard) {
552 /// Ok(_) => break,
553 /// Err(err) => curr = err.current,
554 /// }
555 /// }
556 /// # unsafe { drop(curr.into_owned()); } // avoid leak
557 /// ```
558 pub fn compare_exchange_weak<'g, P>(
559 &self,
560 current: Shared<'_, T>,
561 new: P,
562 success: Ordering,
563 failure: Ordering,
564 _: &'g Guard,
565 ) -> Result<Shared<'g, T>, CompareExchangeError<'g, T, P>>
566 where
567 P: Pointer<T>,
568 {
569 let new = new.into_usize();
570 self.data
571 .compare_exchange_weak(current.into_usize(), new, success, failure)
572 .map(|_| unsafe { Shared::from_usize(new) })
573 .map_err(|current| unsafe {
574 CompareExchangeError {
575 current: Shared::from_usize(current),
576 new: P::from_usize(new),
577 }
578 })
579 }
580
581 /// Fetches the pointer, and then applies a function to it that returns a new value.
582 /// Returns a `Result` of `Ok(previous_value)` if the function returned `Some`, else `Err(_)`.
583 ///
584 /// Note that the given function may be called multiple times if the value has been changed by
585 /// other threads in the meantime, as long as the function returns `Some(_)`, but the function
586 /// will have been applied only once to the stored value.
587 ///
588 /// `fetch_update` takes two [`Ordering`] arguments to describe the memory
589 /// ordering of this operation. The first describes the required ordering for
590 /// when the operation finally succeeds while the second describes the
591 /// required ordering for loads. These correspond to the success and failure
592 /// orderings of [`Atomic::compare_exchange`] respectively.
593 ///
594 /// Using [`Acquire`] as success ordering makes the store part of this
595 /// operation [`Relaxed`], and using [`Release`] makes the final successful
596 /// load [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`],
597 /// [`Acquire`] or [`Relaxed`] and must be equivalent to or weaker than the
598 /// success ordering.
599 ///
600 /// [`Relaxed`]: Ordering::Relaxed
601 /// [`Acquire`]: Ordering::Acquire
602 /// [`Release`]: Ordering::Release
603 /// [`SeqCst`]: Ordering::SeqCst
604 ///
605 /// # Examples
606 ///
607 /// ```
608 /// use crossbeam_epoch::{self as epoch, Atomic};
609 /// use std::sync::atomic::Ordering::SeqCst;
610 ///
611 /// let a = Atomic::new(1234);
612 /// let guard = &epoch::pin();
613 ///
614 /// let res1 = a.fetch_update(SeqCst, SeqCst, guard, |x| Some(x.with_tag(1)));
615 /// assert!(res1.is_ok());
616 ///
617 /// let res2 = a.fetch_update(SeqCst, SeqCst, guard, |x| None);
618 /// assert!(res2.is_err());
619 /// # unsafe { drop(a.into_owned()); } // avoid leak
620 /// ```
621 pub fn fetch_update<'g, F>(
622 &self,
623 set_order: Ordering,
624 fail_order: Ordering,
625 guard: &'g Guard,
626 mut func: F,
627 ) -> Result<Shared<'g, T>, Shared<'g, T>>
628 where
629 F: FnMut(Shared<'g, T>) -> Option<Shared<'g, T>>,
630 {
631 let mut prev = self.load(fail_order, guard);
632 while let Some(next) = func(prev) {
633 match self.compare_exchange_weak(prev, next, set_order, fail_order, guard) {
634 Ok(shared) => return Ok(shared),
635 Err(next_prev) => prev = next_prev.current,
636 }
637 }
638 Err(prev)
639 }
640
641 /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
642 /// value is the same as `current`. The tag is also taken into account, so two pointers to the
643 /// same object, but with different tags, will not be considered equal.
644 ///
645 /// The return value is a result indicating whether the new pointer was written. On success the
646 /// pointer that was written is returned. On failure the actual current value and `new` are
647 /// returned.
648 ///
649 /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory
650 /// ordering of this operation.
651 ///
652 /// # Migrating to `compare_exchange`
653 ///
654 /// `compare_and_set` is equivalent to `compare_exchange` with the following mapping for
655 /// memory orderings:
656 ///
657 /// Original | Success | Failure
658 /// -------- | ------- | -------
659 /// Relaxed | Relaxed | Relaxed
660 /// Acquire | Acquire | Acquire
661 /// Release | Release | Relaxed
662 /// AcqRel | AcqRel | Acquire
663 /// SeqCst | SeqCst | SeqCst
664 ///
665 /// # Examples
666 ///
667 /// ```
668 /// # #![allow(deprecated)]
669 /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
670 /// use std::sync::atomic::Ordering::SeqCst;
671 ///
672 /// let a = Atomic::new(1234);
673 ///
674 /// let guard = &epoch::pin();
675 /// let curr = a.load(SeqCst, guard);
676 /// let res1 = a.compare_and_set(curr, Shared::null(), SeqCst, guard);
677 /// let res2 = a.compare_and_set(curr, Owned::new(5678), SeqCst, guard);
678 /// # unsafe { drop(curr.into_owned()); } // avoid leak
679 /// ```
680 // TODO: remove in the next major version.
681 #[allow(deprecated)]
682 #[deprecated(note = "Use `compare_exchange` instead")]
683 pub fn compare_and_set<'g, O, P>(
684 &self,
685 current: Shared<'_, T>,
686 new: P,
687 ord: O,
688 guard: &'g Guard,
689 ) -> Result<Shared<'g, T>, CompareAndSetError<'g, T, P>>
690 where
691 O: CompareAndSetOrdering,
692 P: Pointer<T>,
693 {
694 self.compare_exchange(current, new, ord.success(), ord.failure(), guard)
695 }
696
697 /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
698 /// value is the same as `current`. The tag is also taken into account, so two pointers to the
699 /// same object, but with different tags, will not be considered equal.
700 ///
701 /// Unlike [`compare_and_set`], this method is allowed to spuriously fail even when comparison
702 /// succeeds, which can result in more efficient code on some platforms. The return value is a
703 /// result indicating whether the new pointer was written. On success the pointer that was
704 /// written is returned. On failure the actual current value and `new` are returned.
705 ///
706 /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory
707 /// ordering of this operation.
708 ///
709 /// [`compare_and_set`]: Atomic::compare_and_set
710 ///
711 /// # Migrating to `compare_exchange_weak`
712 ///
713 /// `compare_and_set_weak` is equivalent to `compare_exchange_weak` with the following mapping for
714 /// memory orderings:
715 ///
716 /// Original | Success | Failure
717 /// -------- | ------- | -------
718 /// Relaxed | Relaxed | Relaxed
719 /// Acquire | Acquire | Acquire
720 /// Release | Release | Relaxed
721 /// AcqRel | AcqRel | Acquire
722 /// SeqCst | SeqCst | SeqCst
723 ///
724 /// # Examples
725 ///
726 /// ```
727 /// # #![allow(deprecated)]
728 /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
729 /// use std::sync::atomic::Ordering::SeqCst;
730 ///
731 /// let a = Atomic::new(1234);
732 /// let guard = &epoch::pin();
733 ///
734 /// let mut new = Owned::new(5678);
735 /// let mut ptr = a.load(SeqCst, guard);
736 /// # unsafe { drop(a.load(SeqCst, guard).into_owned()); } // avoid leak
737 /// loop {
738 /// match a.compare_and_set_weak(ptr, new, SeqCst, guard) {
739 /// Ok(p) => {
740 /// ptr = p;
741 /// break;
742 /// }
743 /// Err(err) => {
744 /// ptr = err.current;
745 /// new = err.new;
746 /// }
747 /// }
748 /// }
749 ///
750 /// let mut curr = a.load(SeqCst, guard);
751 /// loop {
752 /// match a.compare_and_set_weak(curr, Shared::null(), SeqCst, guard) {
753 /// Ok(_) => break,
754 /// Err(err) => curr = err.current,
755 /// }
756 /// }
757 /// # unsafe { drop(curr.into_owned()); } // avoid leak
758 /// ```
759 // TODO: remove in the next major version.
760 #[allow(deprecated)]
761 #[deprecated(note = "Use `compare_exchange_weak` instead")]
762 pub fn compare_and_set_weak<'g, O, P>(
763 &self,
764 current: Shared<'_, T>,
765 new: P,
766 ord: O,
767 guard: &'g Guard,
768 ) -> Result<Shared<'g, T>, CompareAndSetError<'g, T, P>>
769 where
770 O: CompareAndSetOrdering,
771 P: Pointer<T>,
772 {
773 self.compare_exchange_weak(current, new, ord.success(), ord.failure(), guard)
774 }
775
776 /// Bitwise "and" with the current tag.
777 ///
778 /// Performs a bitwise "and" operation on the current tag and the argument `val`, and sets the
779 /// new tag to the result. Returns the previous pointer.
780 ///
781 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
782 /// operation.
783 ///
784 /// # Examples
785 ///
786 /// ```
787 /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
788 /// use std::sync::atomic::Ordering::SeqCst;
789 ///
790 /// let a = Atomic::<i32>::from(Shared::null().with_tag(3));
791 /// let guard = &epoch::pin();
792 /// assert_eq!(a.fetch_and(2, SeqCst, guard).tag(), 3);
793 /// assert_eq!(a.load(SeqCst, guard).tag(), 2);
794 /// ```
795 pub fn fetch_and<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
796 unsafe { Shared::from_usize(self.data.fetch_and(val | !low_bits::<T>(), ord)) }
797 }
798
799 /// Bitwise "or" with the current tag.
800 ///
801 /// Performs a bitwise "or" operation on the current tag and the argument `val`, and sets the
802 /// new tag to the result. Returns the previous pointer.
803 ///
804 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
805 /// operation.
806 ///
807 /// # Examples
808 ///
809 /// ```
810 /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
811 /// use std::sync::atomic::Ordering::SeqCst;
812 ///
813 /// let a = Atomic::<i32>::from(Shared::null().with_tag(1));
814 /// let guard = &epoch::pin();
815 /// assert_eq!(a.fetch_or(2, SeqCst, guard).tag(), 1);
816 /// assert_eq!(a.load(SeqCst, guard).tag(), 3);
817 /// ```
818 pub fn fetch_or<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
819 unsafe { Shared::from_usize(self.data.fetch_or(val & low_bits::<T>(), ord)) }
820 }
821
822 /// Bitwise "xor" with the current tag.
823 ///
824 /// Performs a bitwise "xor" operation on the current tag and the argument `val`, and sets the
825 /// new tag to the result. Returns the previous pointer.
826 ///
827 /// This method takes an [`Ordering`] argument which describes the memory ordering of this
828 /// operation.
829 ///
830 /// # Examples
831 ///
832 /// ```
833 /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
834 /// use std::sync::atomic::Ordering::SeqCst;
835 ///
836 /// let a = Atomic::<i32>::from(Shared::null().with_tag(1));
837 /// let guard = &epoch::pin();
838 /// assert_eq!(a.fetch_xor(3, SeqCst, guard).tag(), 1);
839 /// assert_eq!(a.load(SeqCst, guard).tag(), 2);
840 /// ```
841 pub fn fetch_xor<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
842 unsafe { Shared::from_usize(self.data.fetch_xor(val & low_bits::<T>(), ord)) }
843 }
844
845 /// Takes ownership of the pointee.
846 ///
847 /// This consumes the atomic and converts it into [`Owned`]. As [`Atomic`] doesn't have a
848 /// destructor and doesn't drop the pointee while [`Owned`] does, this is suitable for
849 /// destructors of data structures.
850 ///
851 /// # Panics
852 ///
853 /// Panics if this pointer is null, but only in debug mode.
854 ///
855 /// # Safety
856 ///
857 /// This method may be called only if the pointer is valid and nobody else is holding a
858 /// reference to the same object.
859 ///
860 /// # Examples
861 ///
862 /// ```rust
863 /// # use std::mem;
864 /// # use crossbeam_epoch::Atomic;
865 /// struct DataStructure {
866 /// ptr: Atomic<usize>,
867 /// }
868 ///
869 /// impl Drop for DataStructure {
870 /// fn drop(&mut self) {
871 /// // By now the DataStructure lives only in our thread and we are sure we don't hold
872 /// // any Shared or & to it ourselves.
873 /// unsafe {
874 /// drop(mem::replace(&mut self.ptr, Atomic::null()).into_owned());
875 /// }
876 /// }
877 /// }
878 /// ```
879 pub unsafe fn into_owned(self) -> Owned<T> {
880 Owned::from_usize(self.data.into_inner())
881 }
882
883 /// Takes ownership of the pointee if it is non-null.
884 ///
885 /// This consumes the atomic and converts it into [`Owned`]. As [`Atomic`] doesn't have a
886 /// destructor and doesn't drop the pointee while [`Owned`] does, this is suitable for
887 /// destructors of data structures.
888 ///
889 /// # Safety
890 ///
891 /// This method may be called only if the pointer is valid and nobody else is holding a
892 /// reference to the same object, or the pointer is null.
893 ///
894 /// # Examples
895 ///
896 /// ```rust
897 /// # use std::mem;
898 /// # use crossbeam_epoch::Atomic;
899 /// struct DataStructure {
900 /// ptr: Atomic<usize>,
901 /// }
902 ///
903 /// impl Drop for DataStructure {
904 /// fn drop(&mut self) {
905 /// // By now the DataStructure lives only in our thread and we are sure we don't hold
906 /// // any Shared or & to it ourselves, but it may be null, so we have to be careful.
907 /// let old = mem::replace(&mut self.ptr, Atomic::null());
908 /// unsafe {
909 /// if let Some(x) = old.try_into_owned() {
910 /// drop(x)
911 /// }
912 /// }
913 /// }
914 /// }
915 /// ```
916 pub unsafe fn try_into_owned(self) -> Option<Owned<T>> {
917 let data = self.data.into_inner();
918 if decompose_tag::<T>(data).0 == 0 {
919 None
920 } else {
921 Some(Owned::from_usize(data))
922 }
923 }
924}
925
926impl<T: ?Sized + Pointable> fmt::Debug for Atomic<T> {
927 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
928 let data = self.data.load(Ordering::SeqCst);
929 let (raw, tag) = decompose_tag::<T>(data);
930
931 f.debug_struct("Atomic")
932 .field("raw", &raw)
933 .field("tag", &tag)
934 .finish()
935 }
936}
937
938impl<T: ?Sized + Pointable> fmt::Pointer for Atomic<T> {
939 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
940 let data = self.data.load(Ordering::SeqCst);
941 let (raw, _) = decompose_tag::<T>(data);
942 fmt::Pointer::fmt(&(unsafe { T::deref(raw) as *const _ }), f)
943 }
944}
945
946impl<T: ?Sized + Pointable> Clone for Atomic<T> {
947 /// Returns a copy of the atomic value.
948 ///
949 /// Note that a `Relaxed` load is used here. If you need synchronization, use it with other
950 /// atomics or fences.
951 fn clone(&self) -> Self {
952 let data = self.data.load(Ordering::Relaxed);
953 Atomic::from_usize(data)
954 }
955}
956
957impl<T: ?Sized + Pointable> Default for Atomic<T> {
958 fn default() -> Self {
959 Atomic::null()
960 }
961}
962
963impl<T: ?Sized + Pointable> From<Owned<T>> for Atomic<T> {
964 /// Returns a new atomic pointer pointing to `owned`.
965 ///
966 /// # Examples
967 ///
968 /// ```
969 /// use crossbeam_epoch::{Atomic, Owned};
970 ///
971 /// let a = Atomic::<i32>::from(Owned::new(1234));
972 /// # unsafe { drop(a.into_owned()); } // avoid leak
973 /// ```
974 fn from(owned: Owned<T>) -> Self {
975 let data = owned.data;
976 mem::forget(owned);
977 Self::from_usize(data)
978 }
979}
980
981impl<T> From<Box<T>> for Atomic<T> {
982 fn from(b: Box<T>) -> Self {
983 Self::from(Owned::from(b))
984 }
985}
986
987impl<T> From<T> for Atomic<T> {
988 fn from(t: T) -> Self {
989 Self::new(t)
990 }
991}
992
993impl<'g, T: ?Sized + Pointable> From<Shared<'g, T>> for Atomic<T> {
994 /// Returns a new atomic pointer pointing to `ptr`.
995 ///
996 /// # Examples
997 ///
998 /// ```
999 /// use crossbeam_epoch::{Atomic, Shared};
1000 ///
1001 /// let a = Atomic::<i32>::from(Shared::<i32>::null());
1002 /// ```
1003 fn from(ptr: Shared<'g, T>) -> Self {
1004 Self::from_usize(ptr.data)
1005 }
1006}
1007
1008impl<T> From<*const T> for Atomic<T> {
1009 /// Returns a new atomic pointer pointing to `raw`.
1010 ///
1011 /// # Examples
1012 ///
1013 /// ```
1014 /// use std::ptr;
1015 /// use crossbeam_epoch::Atomic;
1016 ///
1017 /// let a = Atomic::<i32>::from(ptr::null::<i32>());
1018 /// ```
1019 fn from(raw: *const T) -> Self {
1020 Self::from_usize(raw as usize)
1021 }
1022}
1023
1024/// A trait for either `Owned` or `Shared` pointers.
1025pub trait Pointer<T: ?Sized + Pointable> {
1026 /// Returns the machine representation of the pointer.
1027 fn into_usize(self) -> usize;
1028
1029 /// Returns a new pointer pointing to the tagged pointer `data`.
1030 ///
1031 /// # Safety
1032 ///
1033 /// The given `data` should have been created by `Pointer::into_usize()`, and one `data` should
1034 /// not be converted back by `Pointer::from_usize()` multiple times.
1035 unsafe fn from_usize(data: usize) -> Self;
1036}
1037
1038/// An owned heap-allocated object.
1039///
1040/// This type is very similar to `Box<T>`.
1041///
1042/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
1043/// least significant bits of the address.
1044pub struct Owned<T: ?Sized + Pointable> {
1045 data: usize,
1046 _marker: PhantomData<Box<T>>,
1047}
1048
1049impl<T: ?Sized + Pointable> Pointer<T> for Owned<T> {
1050 #[inline]
1051 fn into_usize(self) -> usize {
1052 let data = self.data;
1053 mem::forget(self);
1054 data
1055 }
1056
1057 /// Returns a new pointer pointing to the tagged pointer `data`.
1058 ///
1059 /// # Panics
1060 ///
1061 /// Panics if the data is zero in debug mode.
1062 #[inline]
1063 unsafe fn from_usize(data: usize) -> Self {
1064 debug_assert!(data != 0, "converting zero into `Owned`");
1065 Owned {
1066 data,
1067 _marker: PhantomData,
1068 }
1069 }
1070}
1071
1072impl<T> Owned<T> {
1073 /// Returns a new owned pointer pointing to `raw`.
1074 ///
1075 /// This function is unsafe because improper use may lead to memory problems. Argument `raw`
1076 /// must be a valid pointer. Also, a double-free may occur if the function is called twice on
1077 /// the same raw pointer.
1078 ///
1079 /// # Panics
1080 ///
1081 /// Panics if `raw` is not properly aligned.
1082 ///
1083 /// # Safety
1084 ///
1085 /// The given `raw` should have been derived from `Owned`, and one `raw` should not be converted
1086 /// back by `Owned::from_raw()` multiple times.
1087 ///
1088 /// # Examples
1089 ///
1090 /// ```
1091 /// use crossbeam_epoch::Owned;
1092 ///
1093 /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) };
1094 /// ```
1095 pub unsafe fn from_raw(raw: *mut T) -> Owned<T> {
1096 let raw = raw as usize;
1097 ensure_aligned::<T>(raw);
1098 Self::from_usize(raw)
1099 }
1100
1101 /// Converts the owned pointer into a `Box`.
1102 ///
1103 /// # Examples
1104 ///
1105 /// ```
1106 /// use crossbeam_epoch::Owned;
1107 ///
1108 /// let o = Owned::new(1234);
1109 /// let b: Box<i32> = o.into_box();
1110 /// assert_eq!(*b, 1234);
1111 /// ```
1112 pub fn into_box(self) -> Box<T> {
1113 let (raw, _) = decompose_tag::<T>(self.data);
1114 mem::forget(self);
1115 unsafe { Box::from_raw(raw as *mut _) }
1116 }
1117
1118 /// Allocates `value` on the heap and returns a new owned pointer pointing to it.
1119 ///
1120 /// # Examples
1121 ///
1122 /// ```
1123 /// use crossbeam_epoch::Owned;
1124 ///
1125 /// let o = Owned::new(1234);
1126 /// ```
1127 pub fn new(init: T) -> Owned<T> {
1128 Self::init(init)
1129 }
1130}
1131
1132impl<T: ?Sized + Pointable> Owned<T> {
1133 /// Allocates `value` on the heap and returns a new owned pointer pointing to it.
1134 ///
1135 /// # Examples
1136 ///
1137 /// ```
1138 /// use crossbeam_epoch::Owned;
1139 ///
1140 /// let o = Owned::<i32>::init(1234);
1141 /// ```
1142 pub fn init(init: T::Init) -> Owned<T> {
1143 unsafe { Self::from_usize(T::init(init)) }
1144 }
1145
1146 /// Converts the owned pointer into a [`Shared`].
1147 ///
1148 /// # Examples
1149 ///
1150 /// ```
1151 /// use crossbeam_epoch::{self as epoch, Owned};
1152 ///
1153 /// let o = Owned::new(1234);
1154 /// let guard = &epoch::pin();
1155 /// let p = o.into_shared(guard);
1156 /// # unsafe { drop(p.into_owned()); } // avoid leak
1157 /// ```
1158 #[allow(clippy::needless_lifetimes)]
1159 pub fn into_shared<'g>(self, _: &'g Guard) -> Shared<'g, T> {
1160 unsafe { Shared::from_usize(self.into_usize()) }
1161 }
1162
1163 /// Returns the tag stored within the pointer.
1164 ///
1165 /// # Examples
1166 ///
1167 /// ```
1168 /// use crossbeam_epoch::Owned;
1169 ///
1170 /// assert_eq!(Owned::new(1234).tag(), 0);
1171 /// ```
1172 pub fn tag(&self) -> usize {
1173 let (_, tag) = decompose_tag::<T>(self.data);
1174 tag
1175 }
1176
1177 /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the
1178 /// unused bits of the pointer to `T`.
1179 ///
1180 /// # Examples
1181 ///
1182 /// ```
1183 /// use crossbeam_epoch::Owned;
1184 ///
1185 /// let o = Owned::new(0u64);
1186 /// assert_eq!(o.tag(), 0);
1187 /// let o = o.with_tag(2);
1188 /// assert_eq!(o.tag(), 2);
1189 /// ```
1190 pub fn with_tag(self, tag: usize) -> Owned<T> {
1191 let data = self.into_usize();
1192 unsafe { Self::from_usize(compose_tag::<T>(data, tag)) }
1193 }
1194}
1195
1196impl<T: ?Sized + Pointable> Drop for Owned<T> {
1197 fn drop(&mut self) {
1198 let (raw, _) = decompose_tag::<T>(self.data);
1199 unsafe {
1200 T::drop(raw);
1201 }
1202 }
1203}
1204
1205impl<T: ?Sized + Pointable> fmt::Debug for Owned<T> {
1206 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1207 let (raw, tag) = decompose_tag::<T>(self.data);
1208
1209 f.debug_struct("Owned")
1210 .field("raw", &raw)
1211 .field("tag", &tag)
1212 .finish()
1213 }
1214}
1215
1216impl<T: Clone> Clone for Owned<T> {
1217 fn clone(&self) -> Self {
1218 Owned::new((**self).clone()).with_tag(self.tag())
1219 }
1220}
1221
1222impl<T: ?Sized + Pointable> Deref for Owned<T> {
1223 type Target = T;
1224
1225 fn deref(&self) -> &T {
1226 let (raw, _) = decompose_tag::<T>(self.data);
1227 unsafe { T::deref(raw) }
1228 }
1229}
1230
1231impl<T: ?Sized + Pointable> DerefMut for Owned<T> {
1232 fn deref_mut(&mut self) -> &mut T {
1233 let (raw, _) = decompose_tag::<T>(self.data);
1234 unsafe { T::deref_mut(raw) }
1235 }
1236}
1237
1238impl<T> From<T> for Owned<T> {
1239 fn from(t: T) -> Self {
1240 Owned::new(t)
1241 }
1242}
1243
1244impl<T> From<Box<T>> for Owned<T> {
1245 /// Returns a new owned pointer pointing to `b`.
1246 ///
1247 /// # Panics
1248 ///
1249 /// Panics if the pointer (the `Box`) is not properly aligned.
1250 ///
1251 /// # Examples
1252 ///
1253 /// ```
1254 /// use crossbeam_epoch::Owned;
1255 ///
1256 /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) };
1257 /// ```
1258 fn from(b: Box<T>) -> Self {
1259 unsafe { Self::from_raw(Box::into_raw(b)) }
1260 }
1261}
1262
1263impl<T: ?Sized + Pointable> Borrow<T> for Owned<T> {
1264 fn borrow(&self) -> &T {
1265 self.deref()
1266 }
1267}
1268
1269impl<T: ?Sized + Pointable> BorrowMut<T> for Owned<T> {
1270 fn borrow_mut(&mut self) -> &mut T {
1271 self.deref_mut()
1272 }
1273}
1274
1275impl<T: ?Sized + Pointable> AsRef<T> for Owned<T> {
1276 fn as_ref(&self) -> &T {
1277 self.deref()
1278 }
1279}
1280
1281impl<T: ?Sized + Pointable> AsMut<T> for Owned<T> {
1282 fn as_mut(&mut self) -> &mut T {
1283 self.deref_mut()
1284 }
1285}
1286
1287/// A pointer to an object protected by the epoch GC.
1288///
1289/// The pointer is valid for use only during the lifetime `'g`.
1290///
1291/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
1292/// least significant bits of the address.
1293pub struct Shared<'g, T: 'g + ?Sized + Pointable> {
1294 data: usize,
1295 _marker: PhantomData<(&'g (), *const T)>,
1296}
1297
1298impl<T: ?Sized + Pointable> Clone for Shared<'_, T> {
1299 fn clone(&self) -> Self {
1300 *self
1301 }
1302}
1303
1304impl<T: ?Sized + Pointable> Copy for Shared<'_, T> {}
1305
1306impl<T: ?Sized + Pointable> Pointer<T> for Shared<'_, T> {
1307 #[inline]
1308 fn into_usize(self) -> usize {
1309 self.data
1310 }
1311
1312 #[inline]
1313 unsafe fn from_usize(data: usize) -> Self {
1314 Shared {
1315 data,
1316 _marker: PhantomData,
1317 }
1318 }
1319}
1320
1321impl<'g, T> Shared<'g, T> {
1322 /// Converts the pointer to a raw pointer (without the tag).
1323 ///
1324 /// # Examples
1325 ///
1326 /// ```
1327 /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
1328 /// use std::sync::atomic::Ordering::SeqCst;
1329 ///
1330 /// let o = Owned::new(1234);
1331 /// let raw = &*o as *const _;
1332 /// let a = Atomic::from(o);
1333 ///
1334 /// let guard = &epoch::pin();
1335 /// let p = a.load(SeqCst, guard);
1336 /// assert_eq!(p.as_raw(), raw);
1337 /// # unsafe { drop(a.into_owned()); } // avoid leak
1338 /// ```
1339 pub fn as_raw(&self) -> *const T {
1340 let (raw, _) = decompose_tag::<T>(self.data);
1341 raw as *const _
1342 }
1343}
1344
1345impl<'g, T: ?Sized + Pointable> Shared<'g, T> {
1346 /// Returns a new null pointer.
1347 ///
1348 /// # Examples
1349 ///
1350 /// ```
1351 /// use crossbeam_epoch::Shared;
1352 ///
1353 /// let p = Shared::<i32>::null();
1354 /// assert!(p.is_null());
1355 /// ```
1356 pub fn null() -> Shared<'g, T> {
1357 Shared {
1358 data: 0,
1359 _marker: PhantomData,
1360 }
1361 }
1362
1363 /// Returns `true` if the pointer is null.
1364 ///
1365 /// # Examples
1366 ///
1367 /// ```
1368 /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
1369 /// use std::sync::atomic::Ordering::SeqCst;
1370 ///
1371 /// let a = Atomic::null();
1372 /// let guard = &epoch::pin();
1373 /// assert!(a.load(SeqCst, guard).is_null());
1374 /// a.store(Owned::new(1234), SeqCst);
1375 /// assert!(!a.load(SeqCst, guard).is_null());
1376 /// # unsafe { drop(a.into_owned()); } // avoid leak
1377 /// ```
1378 pub fn is_null(&self) -> bool {
1379 let (raw, _) = decompose_tag::<T>(self.data);
1380 raw == 0
1381 }
1382
1383 /// Dereferences the pointer.
1384 ///
1385 /// Returns a reference to the pointee that is valid during the lifetime `'g`.
1386 ///
1387 /// # Safety
1388 ///
1389 /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory.
1390 ///
1391 /// Another concern is the possibility of data races due to lack of proper synchronization.
1392 /// For example, consider the following scenario:
1393 ///
1394 /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)`
1395 /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()`
1396 ///
1397 /// The problem is that relaxed orderings don't synchronize initialization of the object with
1398 /// the read from the second thread. This is a data race. A possible solution would be to use
1399 /// `Release` and `Acquire` orderings.
1400 ///
1401 /// # Examples
1402 ///
1403 /// ```
1404 /// use crossbeam_epoch::{self as epoch, Atomic};
1405 /// use std::sync::atomic::Ordering::SeqCst;
1406 ///
1407 /// let a = Atomic::new(1234);
1408 /// let guard = &epoch::pin();
1409 /// let p = a.load(SeqCst, guard);
1410 /// unsafe {
1411 /// assert_eq!(p.deref(), &1234);
1412 /// }
1413 /// # unsafe { drop(a.into_owned()); } // avoid leak
1414 /// ```
1415 pub unsafe fn deref(&self) -> &'g T {
1416 let (raw, _) = decompose_tag::<T>(self.data);
1417 T::deref(raw)
1418 }
1419
1420 /// Dereferences the pointer.
1421 ///
1422 /// Returns a mutable reference to the pointee that is valid during the lifetime `'g`.
1423 ///
1424 /// # Safety
1425 ///
1426 /// * There is no guarantee that there are no more threads attempting to read/write from/to the
1427 /// actual object at the same time.
1428 ///
1429 /// The user must know that there are no concurrent accesses towards the object itself.
1430 ///
1431 /// * Other than the above, all safety concerns of `deref()` applies here.
1432 ///
1433 /// # Examples
1434 ///
1435 /// ```
1436 /// use crossbeam_epoch::{self as epoch, Atomic};
1437 /// use std::sync::atomic::Ordering::SeqCst;
1438 ///
1439 /// let a = Atomic::new(vec![1, 2, 3, 4]);
1440 /// let guard = &epoch::pin();
1441 ///
1442 /// let mut p = a.load(SeqCst, guard);
1443 /// unsafe {
1444 /// assert!(!p.is_null());
1445 /// let b = p.deref_mut();
1446 /// assert_eq!(b, &vec![1, 2, 3, 4]);
1447 /// b.push(5);
1448 /// assert_eq!(b, &vec![1, 2, 3, 4, 5]);
1449 /// }
1450 ///
1451 /// let p = a.load(SeqCst, guard);
1452 /// unsafe {
1453 /// assert_eq!(p.deref(), &vec![1, 2, 3, 4, 5]);
1454 /// }
1455 /// # unsafe { drop(a.into_owned()); } // avoid leak
1456 /// ```
1457 pub unsafe fn deref_mut(&mut self) -> &'g mut T {
1458 let (raw, _) = decompose_tag::<T>(self.data);
1459 T::deref_mut(raw)
1460 }
1461
1462 /// Converts the pointer to a reference.
1463 ///
1464 /// Returns `None` if the pointer is null, or else a reference to the object wrapped in `Some`.
1465 ///
1466 /// # Safety
1467 ///
1468 /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory.
1469 ///
1470 /// Another concern is the possibility of data races due to lack of proper synchronization.
1471 /// For example, consider the following scenario:
1472 ///
1473 /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)`
1474 /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()`
1475 ///
1476 /// The problem is that relaxed orderings don't synchronize initialization of the object with
1477 /// the read from the second thread. This is a data race. A possible solution would be to use
1478 /// `Release` and `Acquire` orderings.
1479 ///
1480 /// # Examples
1481 ///
1482 /// ```
1483 /// use crossbeam_epoch::{self as epoch, Atomic};
1484 /// use std::sync::atomic::Ordering::SeqCst;
1485 ///
1486 /// let a = Atomic::new(1234);
1487 /// let guard = &epoch::pin();
1488 /// let p = a.load(SeqCst, guard);
1489 /// unsafe {
1490 /// assert_eq!(p.as_ref(), Some(&1234));
1491 /// }
1492 /// # unsafe { drop(a.into_owned()); } // avoid leak
1493 /// ```
1494 pub unsafe fn as_ref(&self) -> Option<&'g T> {
1495 let (raw, _) = decompose_tag::<T>(self.data);
1496 if raw == 0 {
1497 None
1498 } else {
1499 Some(T::deref(raw))
1500 }
1501 }
1502
1503 /// Takes ownership of the pointee.
1504 ///
1505 /// # Panics
1506 ///
1507 /// Panics if this pointer is null, but only in debug mode.
1508 ///
1509 /// # Safety
1510 ///
1511 /// This method may be called only if the pointer is valid and nobody else is holding a
1512 /// reference to the same object.
1513 ///
1514 /// # Examples
1515 ///
1516 /// ```
1517 /// use crossbeam_epoch::{self as epoch, Atomic};
1518 /// use std::sync::atomic::Ordering::SeqCst;
1519 ///
1520 /// let a = Atomic::new(1234);
1521 /// unsafe {
1522 /// let guard = &epoch::unprotected();
1523 /// let p = a.load(SeqCst, guard);
1524 /// drop(p.into_owned());
1525 /// }
1526 /// ```
1527 pub unsafe fn into_owned(self) -> Owned<T> {
1528 debug_assert!(!self.is_null(), "converting a null `Shared` into `Owned`");
1529 Owned::from_usize(self.data)
1530 }
1531
1532 /// Takes ownership of the pointee if it is not null.
1533 ///
1534 /// # Safety
1535 ///
1536 /// This method may be called only if the pointer is valid and nobody else is holding a
1537 /// reference to the same object, or if the pointer is null.
1538 ///
1539 /// # Examples
1540 ///
1541 /// ```
1542 /// use crossbeam_epoch::{self as epoch, Atomic};
1543 /// use std::sync::atomic::Ordering::SeqCst;
1544 ///
1545 /// let a = Atomic::new(1234);
1546 /// unsafe {
1547 /// let guard = &epoch::unprotected();
1548 /// let p = a.load(SeqCst, guard);
1549 /// if let Some(x) = p.try_into_owned() {
1550 /// drop(x);
1551 /// }
1552 /// }
1553 /// ```
1554 pub unsafe fn try_into_owned(self) -> Option<Owned<T>> {
1555 if self.is_null() {
1556 None
1557 } else {
1558 Some(Owned::from_usize(self.data))
1559 }
1560 }
1561
1562 /// Returns the tag stored within the pointer.
1563 ///
1564 /// # Examples
1565 ///
1566 /// ```
1567 /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
1568 /// use std::sync::atomic::Ordering::SeqCst;
1569 ///
1570 /// let a = Atomic::<u64>::from(Owned::new(0u64).with_tag(2));
1571 /// let guard = &epoch::pin();
1572 /// let p = a.load(SeqCst, guard);
1573 /// assert_eq!(p.tag(), 2);
1574 /// # unsafe { drop(a.into_owned()); } // avoid leak
1575 /// ```
1576 pub fn tag(&self) -> usize {
1577 let (_, tag) = decompose_tag::<T>(self.data);
1578 tag
1579 }
1580
1581 /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the
1582 /// unused bits of the pointer to `T`.
1583 ///
1584 /// # Examples
1585 ///
1586 /// ```
1587 /// use crossbeam_epoch::{self as epoch, Atomic};
1588 /// use std::sync::atomic::Ordering::SeqCst;
1589 ///
1590 /// let a = Atomic::new(0u64);
1591 /// let guard = &epoch::pin();
1592 /// let p1 = a.load(SeqCst, guard);
1593 /// let p2 = p1.with_tag(2);
1594 ///
1595 /// assert_eq!(p1.tag(), 0);
1596 /// assert_eq!(p2.tag(), 2);
1597 /// assert_eq!(p1.as_raw(), p2.as_raw());
1598 /// # unsafe { drop(a.into_owned()); } // avoid leak
1599 /// ```
1600 pub fn with_tag(&self, tag: usize) -> Shared<'g, T> {
1601 unsafe { Self::from_usize(compose_tag::<T>(self.data, tag)) }
1602 }
1603}
1604
1605impl<T> From<*const T> for Shared<'_, T> {
1606 /// Returns a new pointer pointing to `raw`.
1607 ///
1608 /// # Panics
1609 ///
1610 /// Panics if `raw` is not properly aligned.
1611 ///
1612 /// # Examples
1613 ///
1614 /// ```
1615 /// use crossbeam_epoch::Shared;
1616 ///
1617 /// let p = Shared::from(Box::into_raw(Box::new(1234)) as *const _);
1618 /// assert!(!p.is_null());
1619 /// # unsafe { drop(p.into_owned()); } // avoid leak
1620 /// ```
1621 fn from(raw: *const T) -> Self {
1622 let raw = raw as usize;
1623 ensure_aligned::<T>(raw);
1624 unsafe { Self::from_usize(raw) }
1625 }
1626}
1627
1628impl<'g, T: ?Sized + Pointable> PartialEq<Shared<'g, T>> for Shared<'g, T> {
1629 fn eq(&self, other: &Self) -> bool {
1630 self.data == other.data
1631 }
1632}
1633
1634impl<T: ?Sized + Pointable> Eq for Shared<'_, T> {}
1635
1636impl<'g, T: ?Sized + Pointable> PartialOrd<Shared<'g, T>> for Shared<'g, T> {
1637 fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
1638 self.data.partial_cmp(&other.data)
1639 }
1640}
1641
1642impl<T: ?Sized + Pointable> Ord for Shared<'_, T> {
1643 fn cmp(&self, other: &Self) -> cmp::Ordering {
1644 self.data.cmp(&other.data)
1645 }
1646}
1647
1648impl<T: ?Sized + Pointable> fmt::Debug for Shared<'_, T> {
1649 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1650 let (raw, tag) = decompose_tag::<T>(self.data);
1651
1652 f.debug_struct("Shared")
1653 .field("raw", &raw)
1654 .field("tag", &tag)
1655 .finish()
1656 }
1657}
1658
1659impl<T: ?Sized + Pointable> fmt::Pointer for Shared<'_, T> {
1660 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1661 fmt::Pointer::fmt(&(unsafe { self.deref() as *const _ }), f)
1662 }
1663}
1664
1665impl<T: ?Sized + Pointable> Default for Shared<'_, T> {
1666 fn default() -> Self {
1667 Shared::null()
1668 }
1669}
1670
1671#[cfg(all(test, not(crossbeam_loom)))]
1672mod tests {
1673 use super::{Owned, Shared};
1674 use std::mem::MaybeUninit;
1675
1676 #[test]
1677 fn valid_tag_i8() {
1678 Shared::<i8>::null().with_tag(0);
1679 }
1680
1681 #[test]
1682 fn valid_tag_i64() {
1683 Shared::<i64>::null().with_tag(7);
1684 }
1685
1686 #[test]
1687 fn const_atomic_null() {
1688 use super::Atomic;
1689 static _U: Atomic<u8> = Atomic::<u8>::null();
1690 }
1691
1692 #[test]
1693 fn array_init() {
1694 let owned = Owned::<[MaybeUninit<usize>]>::init(10);
1695 let arr: &[MaybeUninit<usize>] = &owned;
1696 assert_eq!(arr.len(), 10);
1697 }
1698}
1699