1#![stable(feature = "rust1", since = "1.0.0")]
2
3//! Thread-safe reference-counting pointers.
4//!
5//! See the [`Arc<T>`][Arc] documentation for more details.
6//!
7//! **Note**: This module is only available on platforms that support atomic
8//! loads and stores of pointers. This may be detected at compile time using
9//! `#[cfg(target_has_atomic = "ptr")]`.
10
11use core::any::Any;
12use core::borrow;
13use core::cmp::Ordering;
14use core::fmt;
15use core::hash::{Hash, Hasher};
16use core::hint;
17use core::intrinsics::abort;
18#[cfg(not(no_global_oom_handling))]
19use core::iter;
20use core::marker::{PhantomData, Unsize};
21#[cfg(not(no_global_oom_handling))]
22use core::mem::size_of_val;
23use core::mem::{self, align_of_val_raw};
24use core::ops::{CoerceUnsized, Deref, DerefPure, DispatchFromDyn, Receiver};
25use core::panic::{RefUnwindSafe, UnwindSafe};
26use core::pin::Pin;
27use core::ptr::{self, NonNull};
28#[cfg(not(no_global_oom_handling))]
29use core::slice::from_raw_parts_mut;
30use core::sync::atomic;
31use core::sync::atomic::Ordering::{Acquire, Relaxed, Release};
32
33#[cfg(not(no_global_oom_handling))]
34use crate::alloc::handle_alloc_error;
35#[cfg(not(no_global_oom_handling))]
36use crate::alloc::WriteCloneIntoRaw;
37use crate::alloc::{AllocError, Allocator, Global, Layout};
38use crate::borrow::{Cow, ToOwned};
39use crate::boxed::Box;
40use crate::rc::is_dangling;
41#[cfg(not(no_global_oom_handling))]
42use crate::string::String;
43#[cfg(not(no_global_oom_handling))]
44use crate::vec::Vec;
45
46#[cfg(test)]
47mod tests;
48
49/// A soft limit on the amount of references that may be made to an `Arc`.
50///
51/// Going above this limit will abort your program (although not
52/// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references.
53/// Trying to go above it might call a `panic` (if not actually going above it).
54///
55/// This is a global invariant, and also applies when using a compare-exchange loop.
56///
57/// See comment in `Arc::clone`.
58const MAX_REFCOUNT: usize = (isize::MAX) as usize;
59
60/// The error in case either counter reaches above `MAX_REFCOUNT`, and we can `panic` safely.
61const INTERNAL_OVERFLOW_ERROR: &str = "Arc counter overflow";
62
63#[cfg(not(sanitize = "thread"))]
64macro_rules! acquire {
65 ($x:expr) => {
66 atomic::fence(Acquire)
67 };
68}
69
70// ThreadSanitizer does not support memory fences. To avoid false positive
71// reports in Arc / Weak implementation use atomic loads for synchronization
72// instead.
73#[cfg(sanitize = "thread")]
74macro_rules! acquire {
75 ($x:expr) => {
76 $x.load(Acquire)
77 };
78}
79
80/// A thread-safe reference-counting pointer. 'Arc' stands for 'Atomically
81/// Reference Counted'.
82///
83/// The type `Arc<T>` provides shared ownership of a value of type `T`,
84/// allocated in the heap. Invoking [`clone`][clone] on `Arc` produces
85/// a new `Arc` instance, which points to the same allocation on the heap as the
86/// source `Arc`, while increasing a reference count. When the last `Arc`
87/// pointer to a given allocation is destroyed, the value stored in that allocation (often
88/// referred to as "inner value") is also dropped.
89///
90/// Shared references in Rust disallow mutation by default, and `Arc` is no
91/// exception: you cannot generally obtain a mutable reference to something
92/// inside an `Arc`. If you need to mutate through an `Arc`, use
93/// [`Mutex`][mutex], [`RwLock`][rwlock], or one of the [`Atomic`][atomic]
94/// types.
95///
96/// **Note**: This type is only available on platforms that support atomic
97/// loads and stores of pointers, which includes all platforms that support
98/// the `std` crate but not all those which only support [`alloc`](crate).
99/// This may be detected at compile time using `#[cfg(target_has_atomic = "ptr")]`.
100///
101/// ## Thread Safety
102///
103/// Unlike [`Rc<T>`], `Arc<T>` uses atomic operations for its reference
104/// counting. This means that it is thread-safe. The disadvantage is that
105/// atomic operations are more expensive than ordinary memory accesses. If you
106/// are not sharing reference-counted allocations between threads, consider using
107/// [`Rc<T>`] for lower overhead. [`Rc<T>`] is a safe default, because the
108/// compiler will catch any attempt to send an [`Rc<T>`] between threads.
109/// However, a library might choose `Arc<T>` in order to give library consumers
110/// more flexibility.
111///
112/// `Arc<T>` will implement [`Send`] and [`Sync`] as long as the `T` implements
113/// [`Send`] and [`Sync`]. Why can't you put a non-thread-safe type `T` in an
114/// `Arc<T>` to make it thread-safe? This may be a bit counter-intuitive at
115/// first: after all, isn't the point of `Arc<T>` thread safety? The key is
116/// this: `Arc<T>` makes it thread safe to have multiple ownership of the same
117/// data, but it doesn't add thread safety to its data. Consider
118/// <code>Arc<[RefCell\<T>]></code>. [`RefCell<T>`] isn't [`Sync`], and if `Arc<T>` was always
119/// [`Send`], <code>Arc<[RefCell\<T>]></code> would be as well. But then we'd have a problem:
120/// [`RefCell<T>`] is not thread safe; it keeps track of the borrowing count using
121/// non-atomic operations.
122///
123/// In the end, this means that you may need to pair `Arc<T>` with some sort of
124/// [`std::sync`] type, usually [`Mutex<T>`][mutex].
125///
126/// ## Breaking cycles with `Weak`
127///
128/// The [`downgrade`][downgrade] method can be used to create a non-owning
129/// [`Weak`] pointer. A [`Weak`] pointer can be [`upgrade`][upgrade]d
130/// to an `Arc`, but this will return [`None`] if the value stored in the allocation has
131/// already been dropped. In other words, `Weak` pointers do not keep the value
132/// inside the allocation alive; however, they *do* keep the allocation
133/// (the backing store for the value) alive.
134///
135/// A cycle between `Arc` pointers will never be deallocated. For this reason,
136/// [`Weak`] is used to break cycles. For example, a tree could have
137/// strong `Arc` pointers from parent nodes to children, and [`Weak`]
138/// pointers from children back to their parents.
139///
140/// # Cloning references
141///
142/// Creating a new reference from an existing reference-counted pointer is done using the
143/// `Clone` trait implemented for [`Arc<T>`][Arc] and [`Weak<T>`][Weak].
144///
145/// ```
146/// use std::sync::Arc;
147/// let foo = Arc::new(vec![1.0, 2.0, 3.0]);
148/// // The two syntaxes below are equivalent.
149/// let a = foo.clone();
150/// let b = Arc::clone(&foo);
151/// // a, b, and foo are all Arcs that point to the same memory location
152/// ```
153///
154/// ## `Deref` behavior
155///
156/// `Arc<T>` automatically dereferences to `T` (via the [`Deref`] trait),
157/// so you can call `T`'s methods on a value of type `Arc<T>`. To avoid name
158/// clashes with `T`'s methods, the methods of `Arc<T>` itself are associated
159/// functions, called using [fully qualified syntax]:
160///
161/// ```
162/// use std::sync::Arc;
163///
164/// let my_arc = Arc::new(());
165/// let my_weak = Arc::downgrade(&my_arc);
166/// ```
167///
168/// `Arc<T>`'s implementations of traits like `Clone` may also be called using
169/// fully qualified syntax. Some people prefer to use fully qualified syntax,
170/// while others prefer using method-call syntax.
171///
172/// ```
173/// use std::sync::Arc;
174///
175/// let arc = Arc::new(());
176/// // Method-call syntax
177/// let arc2 = arc.clone();
178/// // Fully qualified syntax
179/// let arc3 = Arc::clone(&arc);
180/// ```
181///
182/// [`Weak<T>`][Weak] does not auto-dereference to `T`, because the inner value may have
183/// already been dropped.
184///
185/// [`Rc<T>`]: crate::rc::Rc
186/// [clone]: Clone::clone
187/// [mutex]: ../../std/sync/struct.Mutex.html
188/// [rwlock]: ../../std/sync/struct.RwLock.html
189/// [atomic]: core::sync::atomic
190/// [downgrade]: Arc::downgrade
191/// [upgrade]: Weak::upgrade
192/// [RefCell\<T>]: core::cell::RefCell
193/// [`RefCell<T>`]: core::cell::RefCell
194/// [`std::sync`]: ../../std/sync/index.html
195/// [`Arc::clone(&from)`]: Arc::clone
196/// [fully qualified syntax]: https://doc.rust-lang.org/book/ch19-03-advanced-traits.html#fully-qualified-syntax-for-disambiguation-calling-methods-with-the-same-name
197///
198/// # Examples
199///
200/// Sharing some immutable data between threads:
201///
202// Note that we **do not** run these tests here. The windows builders get super
203// unhappy if a thread outlives the main thread and then exits at the same time
204// (something deadlocks) so we just avoid this entirely by not running these
205// tests.
206/// ```no_run
207/// use std::sync::Arc;
208/// use std::thread;
209///
210/// let five = Arc::new(5);
211///
212/// for _ in 0..10 {
213/// let five = Arc::clone(&five);
214///
215/// thread::spawn(move || {
216/// println!("{five:?}");
217/// });
218/// }
219/// ```
220///
221/// Sharing a mutable [`AtomicUsize`]:
222///
223/// [`AtomicUsize`]: core::sync::atomic::AtomicUsize "sync::atomic::AtomicUsize"
224///
225/// ```no_run
226/// use std::sync::Arc;
227/// use std::sync::atomic::{AtomicUsize, Ordering};
228/// use std::thread;
229///
230/// let val = Arc::new(AtomicUsize::new(5));
231///
232/// for _ in 0..10 {
233/// let val = Arc::clone(&val);
234///
235/// thread::spawn(move || {
236/// let v = val.fetch_add(1, Ordering::Relaxed);
237/// println!("{v:?}");
238/// });
239/// }
240/// ```
241///
242/// See the [`rc` documentation][rc_examples] for more examples of reference
243/// counting in general.
244///
245/// [rc_examples]: crate::rc#examples
246#[cfg_attr(not(test), rustc_diagnostic_item = "Arc")]
247#[stable(feature = "rust1", since = "1.0.0")]
248pub struct Arc<
249 T: ?Sized,
250 #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
251> {
252 ptr: NonNull<ArcInner<T>>,
253 phantom: PhantomData<ArcInner<T>>,
254 alloc: A,
255}
256
257#[stable(feature = "rust1", since = "1.0.0")]
258unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Send> Send for Arc<T, A> {}
259#[stable(feature = "rust1", since = "1.0.0")]
260unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Sync> Sync for Arc<T, A> {}
261
262#[stable(feature = "catch_unwind", since = "1.9.0")]
263impl<T: RefUnwindSafe + ?Sized, A: Allocator + UnwindSafe> UnwindSafe for Arc<T, A> {}
264
265#[unstable(feature = "coerce_unsized", issue = "18598")]
266impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<Arc<U, A>> for Arc<T, A> {}
267
268#[unstable(feature = "dispatch_from_dyn", issue = "none")]
269impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Arc<U>> for Arc<T> {}
270
271impl<T: ?Sized> Arc<T> {
272 unsafe fn from_inner(ptr: NonNull<ArcInner<T>>) -> Self {
273 unsafe { Self::from_inner_in(ptr, alloc:Global) }
274 }
275
276 unsafe fn from_ptr(ptr: *mut ArcInner<T>) -> Self {
277 unsafe { Self::from_ptr_in(ptr, alloc:Global) }
278 }
279}
280
281impl<T: ?Sized, A: Allocator> Arc<T, A> {
282 #[inline]
283 fn internal_into_inner_with_allocator(self) -> (NonNull<ArcInner<T>>, A) {
284 let this: ManuallyDrop> = mem::ManuallyDrop::new(self);
285 (this.ptr, unsafe { ptr::read(&this.alloc) })
286 }
287
288 #[inline]
289 unsafe fn from_inner_in(ptr: NonNull<ArcInner<T>>, alloc: A) -> Self {
290 Self { ptr, phantom: PhantomData, alloc }
291 }
292
293 #[inline]
294 unsafe fn from_ptr_in(ptr: *mut ArcInner<T>, alloc: A) -> Self {
295 unsafe { Self::from_inner_in(ptr:NonNull::new_unchecked(ptr), alloc) }
296 }
297}
298
299/// `Weak` is a version of [`Arc`] that holds a non-owning reference to the
300/// managed allocation. The allocation is accessed by calling [`upgrade`] on the `Weak`
301/// pointer, which returns an <code>[Option]<[Arc]\<T>></code>.
302///
303/// Since a `Weak` reference does not count towards ownership, it will not
304/// prevent the value stored in the allocation from being dropped, and `Weak` itself makes no
305/// guarantees about the value still being present. Thus it may return [`None`]
306/// when [`upgrade`]d. Note however that a `Weak` reference *does* prevent the allocation
307/// itself (the backing store) from being deallocated.
308///
309/// A `Weak` pointer is useful for keeping a temporary reference to the allocation
310/// managed by [`Arc`] without preventing its inner value from being dropped. It is also used to
311/// prevent circular references between [`Arc`] pointers, since mutual owning references
312/// would never allow either [`Arc`] to be dropped. For example, a tree could
313/// have strong [`Arc`] pointers from parent nodes to children, and `Weak`
314/// pointers from children back to their parents.
315///
316/// The typical way to obtain a `Weak` pointer is to call [`Arc::downgrade`].
317///
318/// [`upgrade`]: Weak::upgrade
319#[stable(feature = "arc_weak", since = "1.4.0")]
320#[cfg_attr(not(test), rustc_diagnostic_item = "ArcWeak")]
321pub struct Weak<
322 T: ?Sized,
323 #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
324> {
325 // This is a `NonNull` to allow optimizing the size of this type in enums,
326 // but it is not necessarily a valid pointer.
327 // `Weak::new` sets this to `usize::MAX` so that it doesn’t need
328 // to allocate space on the heap. That's not a value a real pointer
329 // will ever have because RcBox has alignment at least 2.
330 // This is only possible when `T: Sized`; unsized `T` never dangle.
331 ptr: NonNull<ArcInner<T>>,
332 alloc: A,
333}
334
335#[stable(feature = "arc_weak", since = "1.4.0")]
336unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Send> Send for Weak<T, A> {}
337#[stable(feature = "arc_weak", since = "1.4.0")]
338unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Sync> Sync for Weak<T, A> {}
339
340#[unstable(feature = "coerce_unsized", issue = "18598")]
341impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<Weak<U, A>> for Weak<T, A> {}
342#[unstable(feature = "dispatch_from_dyn", issue = "none")]
343impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Weak<U>> for Weak<T> {}
344
345#[stable(feature = "arc_weak", since = "1.4.0")]
346impl<T: ?Sized> fmt::Debug for Weak<T> {
347 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
348 write!(f, "(Weak)")
349 }
350}
351
352// This is repr(C) to future-proof against possible field-reordering, which
353// would interfere with otherwise safe [into|from]_raw() of transmutable
354// inner types.
355#[repr(C)]
356struct ArcInner<T: ?Sized> {
357 strong: atomic::AtomicUsize,
358
359 // the value usize::MAX acts as a sentinel for temporarily "locking" the
360 // ability to upgrade weak pointers or downgrade strong ones; this is used
361 // to avoid races in `make_mut` and `get_mut`.
362 weak: atomic::AtomicUsize,
363
364 data: T,
365}
366
367/// Calculate layout for `ArcInner<T>` using the inner value's layout
368fn arcinner_layout_for_value_layout(layout: Layout) -> Layout {
369 // Calculate layout using the given value layout.
370 // Previously, layout was calculated on the expression
371 // `&*(ptr as *const ArcInner<T>)`, but this created a misaligned
372 // reference (see #54908).
373 Layout::new::<ArcInner<()>>().extend(next:layout).unwrap().0.pad_to_align()
374}
375
376unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {}
377unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {}
378
379impl<T> Arc<T> {
380 /// Constructs a new `Arc<T>`.
381 ///
382 /// # Examples
383 ///
384 /// ```
385 /// use std::sync::Arc;
386 ///
387 /// let five = Arc::new(5);
388 /// ```
389 #[cfg(not(no_global_oom_handling))]
390 #[inline]
391 #[stable(feature = "rust1", since = "1.0.0")]
392 pub fn new(data: T) -> Arc<T> {
393 // Start the weak pointer count as 1 which is the weak pointer that's
394 // held by all the strong pointers (kinda), see std/rc.rs for more info
395 let x: Box<_> = Box::new(ArcInner {
396 strong: atomic::AtomicUsize::new(1),
397 weak: atomic::AtomicUsize::new(1),
398 data,
399 });
400 unsafe { Self::from_inner(Box::leak(x).into()) }
401 }
402
403 /// Constructs a new `Arc<T>` while giving you a `Weak<T>` to the allocation,
404 /// to allow you to construct a `T` which holds a weak pointer to itself.
405 ///
406 /// Generally, a structure circularly referencing itself, either directly or
407 /// indirectly, should not hold a strong reference to itself to prevent a memory leak.
408 /// Using this function, you get access to the weak pointer during the
409 /// initialization of `T`, before the `Arc<T>` is created, such that you can
410 /// clone and store it inside the `T`.
411 ///
412 /// `new_cyclic` first allocates the managed allocation for the `Arc<T>`,
413 /// then calls your closure, giving it a `Weak<T>` to this allocation,
414 /// and only afterwards completes the construction of the `Arc<T>` by placing
415 /// the `T` returned from your closure into the allocation.
416 ///
417 /// Since the new `Arc<T>` is not fully-constructed until `Arc<T>::new_cyclic`
418 /// returns, calling [`upgrade`] on the weak reference inside your closure will
419 /// fail and result in a `None` value.
420 ///
421 /// # Panics
422 ///
423 /// If `data_fn` panics, the panic is propagated to the caller, and the
424 /// temporary [`Weak<T>`] is dropped normally.
425 ///
426 /// # Example
427 ///
428 /// ```
429 /// # #![allow(dead_code)]
430 /// use std::sync::{Arc, Weak};
431 ///
432 /// struct Gadget {
433 /// me: Weak<Gadget>,
434 /// }
435 ///
436 /// impl Gadget {
437 /// /// Construct a reference counted Gadget.
438 /// fn new() -> Arc<Self> {
439 /// // `me` is a `Weak<Gadget>` pointing at the new allocation of the
440 /// // `Arc` we're constructing.
441 /// Arc::new_cyclic(|me| {
442 /// // Create the actual struct here.
443 /// Gadget { me: me.clone() }
444 /// })
445 /// }
446 ///
447 /// /// Return a reference counted pointer to Self.
448 /// fn me(&self) -> Arc<Self> {
449 /// self.me.upgrade().unwrap()
450 /// }
451 /// }
452 /// ```
453 /// [`upgrade`]: Weak::upgrade
454 #[cfg(not(no_global_oom_handling))]
455 #[inline]
456 #[stable(feature = "arc_new_cyclic", since = "1.60.0")]
457 pub fn new_cyclic<F>(data_fn: F) -> Arc<T>
458 where
459 F: FnOnce(&Weak<T>) -> T,
460 {
461 // Construct the inner in the "uninitialized" state with a single
462 // weak reference.
463 let uninit_ptr: NonNull<_> = Box::leak(Box::new(ArcInner {
464 strong: atomic::AtomicUsize::new(0),
465 weak: atomic::AtomicUsize::new(1),
466 data: mem::MaybeUninit::<T>::uninit(),
467 }))
468 .into();
469 let init_ptr: NonNull<ArcInner<T>> = uninit_ptr.cast();
470
471 let weak = Weak { ptr: init_ptr, alloc: Global };
472
473 // It's important we don't give up ownership of the weak pointer, or
474 // else the memory might be freed by the time `data_fn` returns. If
475 // we really wanted to pass ownership, we could create an additional
476 // weak pointer for ourselves, but this would result in additional
477 // updates to the weak reference count which might not be necessary
478 // otherwise.
479 let data = data_fn(&weak);
480
481 // Now we can properly initialize the inner value and turn our weak
482 // reference into a strong reference.
483 let strong = unsafe {
484 let inner = init_ptr.as_ptr();
485 ptr::write(ptr::addr_of_mut!((*inner).data), data);
486
487 // The above write to the data field must be visible to any threads which
488 // observe a non-zero strong count. Therefore we need at least "Release" ordering
489 // in order to synchronize with the `compare_exchange_weak` in `Weak::upgrade`.
490 //
491 // "Acquire" ordering is not required. When considering the possible behaviours
492 // of `data_fn` we only need to look at what it could do with a reference to a
493 // non-upgradeable `Weak`:
494 // - It can *clone* the `Weak`, increasing the weak reference count.
495 // - It can drop those clones, decreasing the weak reference count (but never to zero).
496 //
497 // These side effects do not impact us in any way, and no other side effects are
498 // possible with safe code alone.
499 let prev_value = (*inner).strong.fetch_add(1, Release);
500 debug_assert_eq!(prev_value, 0, "No prior strong references should exist");
501
502 Arc::from_inner(init_ptr)
503 };
504
505 // Strong references should collectively own a shared weak reference,
506 // so don't run the destructor for our old weak reference.
507 mem::forget(weak);
508 strong
509 }
510
511 /// Constructs a new `Arc` with uninitialized contents.
512 ///
513 /// # Examples
514 ///
515 /// ```
516 /// #![feature(new_uninit)]
517 /// #![feature(get_mut_unchecked)]
518 ///
519 /// use std::sync::Arc;
520 ///
521 /// let mut five = Arc::<u32>::new_uninit();
522 ///
523 /// // Deferred initialization:
524 /// Arc::get_mut(&mut five).unwrap().write(5);
525 ///
526 /// let five = unsafe { five.assume_init() };
527 ///
528 /// assert_eq!(*five, 5)
529 /// ```
530 #[cfg(not(no_global_oom_handling))]
531 #[inline]
532 #[unstable(feature = "new_uninit", issue = "63291")]
533 #[must_use]
534 pub fn new_uninit() -> Arc<mem::MaybeUninit<T>> {
535 unsafe {
536 Arc::from_ptr(Arc::allocate_for_layout(
537 Layout::new::<T>(),
538 |layout| Global.allocate(layout),
539 <*mut u8>::cast,
540 ))
541 }
542 }
543
544 /// Constructs a new `Arc` with uninitialized contents, with the memory
545 /// being filled with `0` bytes.
546 ///
547 /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
548 /// of this method.
549 ///
550 /// # Examples
551 ///
552 /// ```
553 /// #![feature(new_uninit)]
554 ///
555 /// use std::sync::Arc;
556 ///
557 /// let zero = Arc::<u32>::new_zeroed();
558 /// let zero = unsafe { zero.assume_init() };
559 ///
560 /// assert_eq!(*zero, 0)
561 /// ```
562 ///
563 /// [zeroed]: mem::MaybeUninit::zeroed
564 #[cfg(not(no_global_oom_handling))]
565 #[inline]
566 #[unstable(feature = "new_uninit", issue = "63291")]
567 #[must_use]
568 pub fn new_zeroed() -> Arc<mem::MaybeUninit<T>> {
569 unsafe {
570 Arc::from_ptr(Arc::allocate_for_layout(
571 Layout::new::<T>(),
572 |layout| Global.allocate_zeroed(layout),
573 <*mut u8>::cast,
574 ))
575 }
576 }
577
578 /// Constructs a new `Pin<Arc<T>>`. If `T` does not implement `Unpin`, then
579 /// `data` will be pinned in memory and unable to be moved.
580 #[cfg(not(no_global_oom_handling))]
581 #[stable(feature = "pin", since = "1.33.0")]
582 #[must_use]
583 pub fn pin(data: T) -> Pin<Arc<T>> {
584 unsafe { Pin::new_unchecked(Arc::new(data)) }
585 }
586
587 /// Constructs a new `Pin<Arc<T>>`, return an error if allocation fails.
588 #[unstable(feature = "allocator_api", issue = "32838")]
589 #[inline]
590 pub fn try_pin(data: T) -> Result<Pin<Arc<T>>, AllocError> {
591 unsafe { Ok(Pin::new_unchecked(Arc::try_new(data)?)) }
592 }
593
594 /// Constructs a new `Arc<T>`, returning an error if allocation fails.
595 ///
596 /// # Examples
597 ///
598 /// ```
599 /// #![feature(allocator_api)]
600 /// use std::sync::Arc;
601 ///
602 /// let five = Arc::try_new(5)?;
603 /// # Ok::<(), std::alloc::AllocError>(())
604 /// ```
605 #[unstable(feature = "allocator_api", issue = "32838")]
606 #[inline]
607 pub fn try_new(data: T) -> Result<Arc<T>, AllocError> {
608 // Start the weak pointer count as 1 which is the weak pointer that's
609 // held by all the strong pointers (kinda), see std/rc.rs for more info
610 let x: Box<_> = Box::try_new(ArcInner {
611 strong: atomic::AtomicUsize::new(1),
612 weak: atomic::AtomicUsize::new(1),
613 data,
614 })?;
615 unsafe { Ok(Self::from_inner(Box::leak(x).into())) }
616 }
617
618 /// Constructs a new `Arc` with uninitialized contents, returning an error
619 /// if allocation fails.
620 ///
621 /// # Examples
622 ///
623 /// ```
624 /// #![feature(new_uninit, allocator_api)]
625 /// #![feature(get_mut_unchecked)]
626 ///
627 /// use std::sync::Arc;
628 ///
629 /// let mut five = Arc::<u32>::try_new_uninit()?;
630 ///
631 /// // Deferred initialization:
632 /// Arc::get_mut(&mut five).unwrap().write(5);
633 ///
634 /// let five = unsafe { five.assume_init() };
635 ///
636 /// assert_eq!(*five, 5);
637 /// # Ok::<(), std::alloc::AllocError>(())
638 /// ```
639 #[unstable(feature = "allocator_api", issue = "32838")]
640 // #[unstable(feature = "new_uninit", issue = "63291")]
641 pub fn try_new_uninit() -> Result<Arc<mem::MaybeUninit<T>>, AllocError> {
642 unsafe {
643 Ok(Arc::from_ptr(Arc::try_allocate_for_layout(
644 Layout::new::<T>(),
645 |layout| Global.allocate(layout),
646 <*mut u8>::cast,
647 )?))
648 }
649 }
650
651 /// Constructs a new `Arc` with uninitialized contents, with the memory
652 /// being filled with `0` bytes, returning an error if allocation fails.
653 ///
654 /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
655 /// of this method.
656 ///
657 /// # Examples
658 ///
659 /// ```
660 /// #![feature(new_uninit, allocator_api)]
661 ///
662 /// use std::sync::Arc;
663 ///
664 /// let zero = Arc::<u32>::try_new_zeroed()?;
665 /// let zero = unsafe { zero.assume_init() };
666 ///
667 /// assert_eq!(*zero, 0);
668 /// # Ok::<(), std::alloc::AllocError>(())
669 /// ```
670 ///
671 /// [zeroed]: mem::MaybeUninit::zeroed
672 #[unstable(feature = "allocator_api", issue = "32838")]
673 // #[unstable(feature = "new_uninit", issue = "63291")]
674 pub fn try_new_zeroed() -> Result<Arc<mem::MaybeUninit<T>>, AllocError> {
675 unsafe {
676 Ok(Arc::from_ptr(Arc::try_allocate_for_layout(
677 Layout::new::<T>(),
678 |layout| Global.allocate_zeroed(layout),
679 <*mut u8>::cast,
680 )?))
681 }
682 }
683}
684
685impl<T, A: Allocator> Arc<T, A> {
686 /// Returns a reference to the underlying allocator.
687 ///
688 /// Note: this is an associated function, which means that you have
689 /// to call it as `Arc::allocator(&a)` instead of `a.allocator()`. This
690 /// is so that there is no conflict with a method on the inner type.
691 #[inline]
692 #[unstable(feature = "allocator_api", issue = "32838")]
693 pub fn allocator(this: &Self) -> &A {
694 &this.alloc
695 }
696 /// Constructs a new `Arc<T>` in the provided allocator.
697 ///
698 /// # Examples
699 ///
700 /// ```
701 /// #![feature(allocator_api)]
702 ///
703 /// use std::sync::Arc;
704 /// use std::alloc::System;
705 ///
706 /// let five = Arc::new_in(5, System);
707 /// ```
708 #[inline]
709 #[cfg(not(no_global_oom_handling))]
710 #[unstable(feature = "allocator_api", issue = "32838")]
711 pub fn new_in(data: T, alloc: A) -> Arc<T, A> {
712 // Start the weak pointer count as 1 which is the weak pointer that's
713 // held by all the strong pointers (kinda), see std/rc.rs for more info
714 let x = Box::new_in(
715 ArcInner {
716 strong: atomic::AtomicUsize::new(1),
717 weak: atomic::AtomicUsize::new(1),
718 data,
719 },
720 alloc,
721 );
722 let (ptr, alloc) = Box::into_unique(x);
723 unsafe { Self::from_inner_in(ptr.into(), alloc) }
724 }
725
726 /// Constructs a new `Arc` with uninitialized contents in the provided allocator.
727 ///
728 /// # Examples
729 ///
730 /// ```
731 /// #![feature(new_uninit)]
732 /// #![feature(get_mut_unchecked)]
733 /// #![feature(allocator_api)]
734 ///
735 /// use std::sync::Arc;
736 /// use std::alloc::System;
737 ///
738 /// let mut five = Arc::<u32, _>::new_uninit_in(System);
739 ///
740 /// let five = unsafe {
741 /// // Deferred initialization:
742 /// Arc::get_mut_unchecked(&mut five).as_mut_ptr().write(5);
743 ///
744 /// five.assume_init()
745 /// };
746 ///
747 /// assert_eq!(*five, 5)
748 /// ```
749 #[cfg(not(no_global_oom_handling))]
750 #[unstable(feature = "allocator_api", issue = "32838")]
751 // #[unstable(feature = "new_uninit", issue = "63291")]
752 #[inline]
753 pub fn new_uninit_in(alloc: A) -> Arc<mem::MaybeUninit<T>, A> {
754 unsafe {
755 Arc::from_ptr_in(
756 Arc::allocate_for_layout(
757 Layout::new::<T>(),
758 |layout| alloc.allocate(layout),
759 <*mut u8>::cast,
760 ),
761 alloc,
762 )
763 }
764 }
765
766 /// Constructs a new `Arc` with uninitialized contents, with the memory
767 /// being filled with `0` bytes, in the provided allocator.
768 ///
769 /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
770 /// of this method.
771 ///
772 /// # Examples
773 ///
774 /// ```
775 /// #![feature(new_uninit)]
776 /// #![feature(allocator_api)]
777 ///
778 /// use std::sync::Arc;
779 /// use std::alloc::System;
780 ///
781 /// let zero = Arc::<u32, _>::new_zeroed_in(System);
782 /// let zero = unsafe { zero.assume_init() };
783 ///
784 /// assert_eq!(*zero, 0)
785 /// ```
786 ///
787 /// [zeroed]: mem::MaybeUninit::zeroed
788 #[cfg(not(no_global_oom_handling))]
789 #[unstable(feature = "allocator_api", issue = "32838")]
790 // #[unstable(feature = "new_uninit", issue = "63291")]
791 #[inline]
792 pub fn new_zeroed_in(alloc: A) -> Arc<mem::MaybeUninit<T>, A> {
793 unsafe {
794 Arc::from_ptr_in(
795 Arc::allocate_for_layout(
796 Layout::new::<T>(),
797 |layout| alloc.allocate_zeroed(layout),
798 <*mut u8>::cast,
799 ),
800 alloc,
801 )
802 }
803 }
804
805 /// Constructs a new `Pin<Arc<T, A>>` in the provided allocator. If `T` does not implement `Unpin`,
806 /// then `data` will be pinned in memory and unable to be moved.
807 #[cfg(not(no_global_oom_handling))]
808 #[unstable(feature = "allocator_api", issue = "32838")]
809 #[inline]
810 pub fn pin_in(data: T, alloc: A) -> Pin<Arc<T, A>>
811 where
812 A: 'static,
813 {
814 unsafe { Pin::new_unchecked(Arc::new_in(data, alloc)) }
815 }
816
817 /// Constructs a new `Pin<Arc<T, A>>` in the provided allocator, return an error if allocation
818 /// fails.
819 #[inline]
820 #[unstable(feature = "allocator_api", issue = "32838")]
821 pub fn try_pin_in(data: T, alloc: A) -> Result<Pin<Arc<T, A>>, AllocError>
822 where
823 A: 'static,
824 {
825 unsafe { Ok(Pin::new_unchecked(Arc::try_new_in(data, alloc)?)) }
826 }
827
828 /// Constructs a new `Arc<T, A>` in the provided allocator, returning an error if allocation fails.
829 ///
830 /// # Examples
831 ///
832 /// ```
833 /// #![feature(allocator_api)]
834 ///
835 /// use std::sync::Arc;
836 /// use std::alloc::System;
837 ///
838 /// let five = Arc::try_new_in(5, System)?;
839 /// # Ok::<(), std::alloc::AllocError>(())
840 /// ```
841 #[inline]
842 #[unstable(feature = "allocator_api", issue = "32838")]
843 #[inline]
844 pub fn try_new_in(data: T, alloc: A) -> Result<Arc<T, A>, AllocError> {
845 // Start the weak pointer count as 1 which is the weak pointer that's
846 // held by all the strong pointers (kinda), see std/rc.rs for more info
847 let x = Box::try_new_in(
848 ArcInner {
849 strong: atomic::AtomicUsize::new(1),
850 weak: atomic::AtomicUsize::new(1),
851 data,
852 },
853 alloc,
854 )?;
855 let (ptr, alloc) = Box::into_unique(x);
856 Ok(unsafe { Self::from_inner_in(ptr.into(), alloc) })
857 }
858
859 /// Constructs a new `Arc` with uninitialized contents, in the provided allocator, returning an
860 /// error if allocation fails.
861 ///
862 /// # Examples
863 ///
864 /// ```
865 /// #![feature(new_uninit, allocator_api)]
866 /// #![feature(get_mut_unchecked)]
867 ///
868 /// use std::sync::Arc;
869 /// use std::alloc::System;
870 ///
871 /// let mut five = Arc::<u32, _>::try_new_uninit_in(System)?;
872 ///
873 /// let five = unsafe {
874 /// // Deferred initialization:
875 /// Arc::get_mut_unchecked(&mut five).as_mut_ptr().write(5);
876 ///
877 /// five.assume_init()
878 /// };
879 ///
880 /// assert_eq!(*five, 5);
881 /// # Ok::<(), std::alloc::AllocError>(())
882 /// ```
883 #[unstable(feature = "allocator_api", issue = "32838")]
884 // #[unstable(feature = "new_uninit", issue = "63291")]
885 #[inline]
886 pub fn try_new_uninit_in(alloc: A) -> Result<Arc<mem::MaybeUninit<T>, A>, AllocError> {
887 unsafe {
888 Ok(Arc::from_ptr_in(
889 Arc::try_allocate_for_layout(
890 Layout::new::<T>(),
891 |layout| alloc.allocate(layout),
892 <*mut u8>::cast,
893 )?,
894 alloc,
895 ))
896 }
897 }
898
899 /// Constructs a new `Arc` with uninitialized contents, with the memory
900 /// being filled with `0` bytes, in the provided allocator, returning an error if allocation
901 /// fails.
902 ///
903 /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
904 /// of this method.
905 ///
906 /// # Examples
907 ///
908 /// ```
909 /// #![feature(new_uninit, allocator_api)]
910 ///
911 /// use std::sync::Arc;
912 /// use std::alloc::System;
913 ///
914 /// let zero = Arc::<u32, _>::try_new_zeroed_in(System)?;
915 /// let zero = unsafe { zero.assume_init() };
916 ///
917 /// assert_eq!(*zero, 0);
918 /// # Ok::<(), std::alloc::AllocError>(())
919 /// ```
920 ///
921 /// [zeroed]: mem::MaybeUninit::zeroed
922 #[unstable(feature = "allocator_api", issue = "32838")]
923 // #[unstable(feature = "new_uninit", issue = "63291")]
924 #[inline]
925 pub fn try_new_zeroed_in(alloc: A) -> Result<Arc<mem::MaybeUninit<T>, A>, AllocError> {
926 unsafe {
927 Ok(Arc::from_ptr_in(
928 Arc::try_allocate_for_layout(
929 Layout::new::<T>(),
930 |layout| alloc.allocate_zeroed(layout),
931 <*mut u8>::cast,
932 )?,
933 alloc,
934 ))
935 }
936 }
937 /// Returns the inner value, if the `Arc` has exactly one strong reference.
938 ///
939 /// Otherwise, an [`Err`] is returned with the same `Arc` that was
940 /// passed in.
941 ///
942 /// This will succeed even if there are outstanding weak references.
943 ///
944 /// It is strongly recommended to use [`Arc::into_inner`] instead if you don't
945 /// want to keep the `Arc` in the [`Err`] case.
946 /// Immediately dropping the [`Err`] payload, like in the expression
947 /// `Arc::try_unwrap(this).ok()`, can still cause the strong count to
948 /// drop to zero and the inner value of the `Arc` to be dropped:
949 /// For instance if two threads each execute this expression in parallel, then
950 /// there is a race condition. The threads could first both check whether they
951 /// have the last clone of their `Arc` via `Arc::try_unwrap`, and then
952 /// both drop their `Arc` in the call to [`ok`][`Result::ok`],
953 /// taking the strong count from two down to zero.
954 ///
955 /// # Examples
956 ///
957 /// ```
958 /// use std::sync::Arc;
959 ///
960 /// let x = Arc::new(3);
961 /// assert_eq!(Arc::try_unwrap(x), Ok(3));
962 ///
963 /// let x = Arc::new(4);
964 /// let _y = Arc::clone(&x);
965 /// assert_eq!(*Arc::try_unwrap(x).unwrap_err(), 4);
966 /// ```
967 #[inline]
968 #[stable(feature = "arc_unique", since = "1.4.0")]
969 pub fn try_unwrap(this: Self) -> Result<T, Self> {
970 if this.inner().strong.compare_exchange(1, 0, Relaxed, Relaxed).is_err() {
971 return Err(this);
972 }
973
974 acquire!(this.inner().strong);
975
976 unsafe {
977 let elem = ptr::read(&this.ptr.as_ref().data);
978 let alloc = ptr::read(&this.alloc); // copy the allocator
979
980 // Make a weak pointer to clean up the implicit strong-weak reference
981 let _weak = Weak { ptr: this.ptr, alloc };
982 mem::forget(this);
983
984 Ok(elem)
985 }
986 }
987
988 /// Returns the inner value, if the `Arc` has exactly one strong reference.
989 ///
990 /// Otherwise, [`None`] is returned and the `Arc` is dropped.
991 ///
992 /// This will succeed even if there are outstanding weak references.
993 ///
994 /// If `Arc::into_inner` is called on every clone of this `Arc`,
995 /// it is guaranteed that exactly one of the calls returns the inner value.
996 /// This means in particular that the inner value is not dropped.
997 ///
998 /// [`Arc::try_unwrap`] is conceptually similar to `Arc::into_inner`, but it
999 /// is meant for different use-cases. If used as a direct replacement
1000 /// for `Arc::into_inner` anyway, such as with the expression
1001 /// <code>[Arc::try_unwrap]\(this).[ok][Result::ok]()</code>, then it does
1002 /// **not** give the same guarantee as described in the previous paragraph.
1003 /// For more information, see the examples below and read the documentation
1004 /// of [`Arc::try_unwrap`].
1005 ///
1006 /// # Examples
1007 ///
1008 /// Minimal example demonstrating the guarantee that `Arc::into_inner` gives.
1009 /// ```
1010 /// use std::sync::Arc;
1011 ///
1012 /// let x = Arc::new(3);
1013 /// let y = Arc::clone(&x);
1014 ///
1015 /// // Two threads calling `Arc::into_inner` on both clones of an `Arc`:
1016 /// let x_thread = std::thread::spawn(|| Arc::into_inner(x));
1017 /// let y_thread = std::thread::spawn(|| Arc::into_inner(y));
1018 ///
1019 /// let x_inner_value = x_thread.join().unwrap();
1020 /// let y_inner_value = y_thread.join().unwrap();
1021 ///
1022 /// // One of the threads is guaranteed to receive the inner value:
1023 /// assert!(matches!(
1024 /// (x_inner_value, y_inner_value),
1025 /// (None, Some(3)) | (Some(3), None)
1026 /// ));
1027 /// // The result could also be `(None, None)` if the threads called
1028 /// // `Arc::try_unwrap(x).ok()` and `Arc::try_unwrap(y).ok()` instead.
1029 /// ```
1030 ///
1031 /// A more practical example demonstrating the need for `Arc::into_inner`:
1032 /// ```
1033 /// use std::sync::Arc;
1034 ///
1035 /// // Definition of a simple singly linked list using `Arc`:
1036 /// #[derive(Clone)]
1037 /// struct LinkedList<T>(Option<Arc<Node<T>>>);
1038 /// struct Node<T>(T, Option<Arc<Node<T>>>);
1039 ///
1040 /// // Dropping a long `LinkedList<T>` relying on the destructor of `Arc`
1041 /// // can cause a stack overflow. To prevent this, we can provide a
1042 /// // manual `Drop` implementation that does the destruction in a loop:
1043 /// impl<T> Drop for LinkedList<T> {
1044 /// fn drop(&mut self) {
1045 /// let mut link = self.0.take();
1046 /// while let Some(arc_node) = link.take() {
1047 /// if let Some(Node(_value, next)) = Arc::into_inner(arc_node) {
1048 /// link = next;
1049 /// }
1050 /// }
1051 /// }
1052 /// }
1053 ///
1054 /// // Implementation of `new` and `push` omitted
1055 /// impl<T> LinkedList<T> {
1056 /// /* ... */
1057 /// # fn new() -> Self {
1058 /// # LinkedList(None)
1059 /// # }
1060 /// # fn push(&mut self, x: T) {
1061 /// # self.0 = Some(Arc::new(Node(x, self.0.take())));
1062 /// # }
1063 /// }
1064 ///
1065 /// // The following code could have still caused a stack overflow
1066 /// // despite the manual `Drop` impl if that `Drop` impl had used
1067 /// // `Arc::try_unwrap(arc).ok()` instead of `Arc::into_inner(arc)`.
1068 ///
1069 /// // Create a long list and clone it
1070 /// let mut x = LinkedList::new();
1071 /// let size = 100000;
1072 /// # let size = if cfg!(miri) { 100 } else { size };
1073 /// for i in 0..size {
1074 /// x.push(i); // Adds i to the front of x
1075 /// }
1076 /// let y = x.clone();
1077 ///
1078 /// // Drop the clones in parallel
1079 /// let x_thread = std::thread::spawn(|| drop(x));
1080 /// let y_thread = std::thread::spawn(|| drop(y));
1081 /// x_thread.join().unwrap();
1082 /// y_thread.join().unwrap();
1083 /// ```
1084 #[inline]
1085 #[stable(feature = "arc_into_inner", since = "1.70.0")]
1086 pub fn into_inner(this: Self) -> Option<T> {
1087 // Make sure that the ordinary `Drop` implementation isn’t called as well
1088 let mut this = mem::ManuallyDrop::new(this);
1089
1090 // Following the implementation of `drop` and `drop_slow`
1091 if this.inner().strong.fetch_sub(1, Release) != 1 {
1092 return None;
1093 }
1094
1095 acquire!(this.inner().strong);
1096
1097 // SAFETY: This mirrors the line
1098 //
1099 // unsafe { ptr::drop_in_place(Self::get_mut_unchecked(self)) };
1100 //
1101 // in `drop_slow`. Instead of dropping the value behind the pointer,
1102 // it is read and eventually returned; `ptr::read` has the same
1103 // safety conditions as `ptr::drop_in_place`.
1104
1105 let inner = unsafe { ptr::read(Self::get_mut_unchecked(&mut this)) };
1106 let alloc = unsafe { ptr::read(&this.alloc) };
1107
1108 drop(Weak { ptr: this.ptr, alloc });
1109
1110 Some(inner)
1111 }
1112}
1113
1114impl<T> Arc<[T]> {
1115 /// Constructs a new atomically reference-counted slice with uninitialized contents.
1116 ///
1117 /// # Examples
1118 ///
1119 /// ```
1120 /// #![feature(new_uninit)]
1121 /// #![feature(get_mut_unchecked)]
1122 ///
1123 /// use std::sync::Arc;
1124 ///
1125 /// let mut values = Arc::<[u32]>::new_uninit_slice(3);
1126 ///
1127 /// // Deferred initialization:
1128 /// let data = Arc::get_mut(&mut values).unwrap();
1129 /// data[0].write(1);
1130 /// data[1].write(2);
1131 /// data[2].write(3);
1132 ///
1133 /// let values = unsafe { values.assume_init() };
1134 ///
1135 /// assert_eq!(*values, [1, 2, 3])
1136 /// ```
1137 #[cfg(not(no_global_oom_handling))]
1138 #[inline]
1139 #[unstable(feature = "new_uninit", issue = "63291")]
1140 #[must_use]
1141 pub fn new_uninit_slice(len: usize) -> Arc<[mem::MaybeUninit<T>]> {
1142 unsafe { Arc::from_ptr(Arc::allocate_for_slice(len)) }
1143 }
1144
1145 /// Constructs a new atomically reference-counted slice with uninitialized contents, with the memory being
1146 /// filled with `0` bytes.
1147 ///
1148 /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
1149 /// incorrect usage of this method.
1150 ///
1151 /// # Examples
1152 ///
1153 /// ```
1154 /// #![feature(new_uninit)]
1155 ///
1156 /// use std::sync::Arc;
1157 ///
1158 /// let values = Arc::<[u32]>::new_zeroed_slice(3);
1159 /// let values = unsafe { values.assume_init() };
1160 ///
1161 /// assert_eq!(*values, [0, 0, 0])
1162 /// ```
1163 ///
1164 /// [zeroed]: mem::MaybeUninit::zeroed
1165 #[cfg(not(no_global_oom_handling))]
1166 #[inline]
1167 #[unstable(feature = "new_uninit", issue = "63291")]
1168 #[must_use]
1169 pub fn new_zeroed_slice(len: usize) -> Arc<[mem::MaybeUninit<T>]> {
1170 unsafe {
1171 Arc::from_ptr(Arc::allocate_for_layout(
1172 Layout::array::<T>(len).unwrap(),
1173 |layout| Global.allocate_zeroed(layout),
1174 |mem| {
1175 ptr::slice_from_raw_parts_mut(mem as *mut T, len)
1176 as *mut ArcInner<[mem::MaybeUninit<T>]>
1177 },
1178 ))
1179 }
1180 }
1181}
1182
1183impl<T, A: Allocator> Arc<[T], A> {
1184 /// Constructs a new atomically reference-counted slice with uninitialized contents in the
1185 /// provided allocator.
1186 ///
1187 /// # Examples
1188 ///
1189 /// ```
1190 /// #![feature(new_uninit)]
1191 /// #![feature(get_mut_unchecked)]
1192 /// #![feature(allocator_api)]
1193 ///
1194 /// use std::sync::Arc;
1195 /// use std::alloc::System;
1196 ///
1197 /// let mut values = Arc::<[u32], _>::new_uninit_slice_in(3, System);
1198 ///
1199 /// let values = unsafe {
1200 /// // Deferred initialization:
1201 /// Arc::get_mut_unchecked(&mut values)[0].as_mut_ptr().write(1);
1202 /// Arc::get_mut_unchecked(&mut values)[1].as_mut_ptr().write(2);
1203 /// Arc::get_mut_unchecked(&mut values)[2].as_mut_ptr().write(3);
1204 ///
1205 /// values.assume_init()
1206 /// };
1207 ///
1208 /// assert_eq!(*values, [1, 2, 3])
1209 /// ```
1210 #[cfg(not(no_global_oom_handling))]
1211 #[unstable(feature = "new_uninit", issue = "63291")]
1212 #[inline]
1213 pub fn new_uninit_slice_in(len: usize, alloc: A) -> Arc<[mem::MaybeUninit<T>], A> {
1214 unsafe { Arc::from_ptr_in(Arc::allocate_for_slice_in(len, &alloc), alloc) }
1215 }
1216
1217 /// Constructs a new atomically reference-counted slice with uninitialized contents, with the memory being
1218 /// filled with `0` bytes, in the provided allocator.
1219 ///
1220 /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
1221 /// incorrect usage of this method.
1222 ///
1223 /// # Examples
1224 ///
1225 /// ```
1226 /// #![feature(new_uninit)]
1227 /// #![feature(allocator_api)]
1228 ///
1229 /// use std::sync::Arc;
1230 /// use std::alloc::System;
1231 ///
1232 /// let values = Arc::<[u32], _>::new_zeroed_slice_in(3, System);
1233 /// let values = unsafe { values.assume_init() };
1234 ///
1235 /// assert_eq!(*values, [0, 0, 0])
1236 /// ```
1237 ///
1238 /// [zeroed]: mem::MaybeUninit::zeroed
1239 #[cfg(not(no_global_oom_handling))]
1240 #[unstable(feature = "new_uninit", issue = "63291")]
1241 #[inline]
1242 pub fn new_zeroed_slice_in(len: usize, alloc: A) -> Arc<[mem::MaybeUninit<T>], A> {
1243 unsafe {
1244 Arc::from_ptr_in(
1245 Arc::allocate_for_layout(
1246 Layout::array::<T>(len).unwrap(),
1247 |layout| alloc.allocate_zeroed(layout),
1248 |mem| {
1249 ptr::slice_from_raw_parts_mut(mem.cast::<T>(), len)
1250 as *mut ArcInner<[mem::MaybeUninit<T>]>
1251 },
1252 ),
1253 alloc,
1254 )
1255 }
1256 }
1257}
1258
1259impl<T, A: Allocator> Arc<mem::MaybeUninit<T>, A> {
1260 /// Converts to `Arc<T>`.
1261 ///
1262 /// # Safety
1263 ///
1264 /// As with [`MaybeUninit::assume_init`],
1265 /// it is up to the caller to guarantee that the inner value
1266 /// really is in an initialized state.
1267 /// Calling this when the content is not yet fully initialized
1268 /// causes immediate undefined behavior.
1269 ///
1270 /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
1271 ///
1272 /// # Examples
1273 ///
1274 /// ```
1275 /// #![feature(new_uninit)]
1276 /// #![feature(get_mut_unchecked)]
1277 ///
1278 /// use std::sync::Arc;
1279 ///
1280 /// let mut five = Arc::<u32>::new_uninit();
1281 ///
1282 /// // Deferred initialization:
1283 /// Arc::get_mut(&mut five).unwrap().write(5);
1284 ///
1285 /// let five = unsafe { five.assume_init() };
1286 ///
1287 /// assert_eq!(*five, 5)
1288 /// ```
1289 #[unstable(feature = "new_uninit", issue = "63291")]
1290 #[must_use = "`self` will be dropped if the result is not used"]
1291 #[inline]
1292 pub unsafe fn assume_init(self) -> Arc<T, A> {
1293 let (ptr, alloc) = self.internal_into_inner_with_allocator();
1294 unsafe { Arc::from_inner_in(ptr.cast(), alloc) }
1295 }
1296}
1297
1298impl<T, A: Allocator> Arc<[mem::MaybeUninit<T>], A> {
1299 /// Converts to `Arc<[T]>`.
1300 ///
1301 /// # Safety
1302 ///
1303 /// As with [`MaybeUninit::assume_init`],
1304 /// it is up to the caller to guarantee that the inner value
1305 /// really is in an initialized state.
1306 /// Calling this when the content is not yet fully initialized
1307 /// causes immediate undefined behavior.
1308 ///
1309 /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
1310 ///
1311 /// # Examples
1312 ///
1313 /// ```
1314 /// #![feature(new_uninit)]
1315 /// #![feature(get_mut_unchecked)]
1316 ///
1317 /// use std::sync::Arc;
1318 ///
1319 /// let mut values = Arc::<[u32]>::new_uninit_slice(3);
1320 ///
1321 /// // Deferred initialization:
1322 /// let data = Arc::get_mut(&mut values).unwrap();
1323 /// data[0].write(1);
1324 /// data[1].write(2);
1325 /// data[2].write(3);
1326 ///
1327 /// let values = unsafe { values.assume_init() };
1328 ///
1329 /// assert_eq!(*values, [1, 2, 3])
1330 /// ```
1331 #[unstable(feature = "new_uninit", issue = "63291")]
1332 #[must_use = "`self` will be dropped if the result is not used"]
1333 #[inline]
1334 pub unsafe fn assume_init(self) -> Arc<[T], A> {
1335 let (ptr, alloc) = self.internal_into_inner_with_allocator();
1336 unsafe { Arc::from_ptr_in(ptr.as_ptr() as _, alloc) }
1337 }
1338}
1339
1340impl<T: ?Sized> Arc<T> {
1341 /// Constructs an `Arc<T>` from a raw pointer.
1342 ///
1343 /// The raw pointer must have been previously returned by a call to
1344 /// [`Arc<U>::into_raw`][into_raw] with the following requirements:
1345 ///
1346 /// * If `U` is sized, it must have the same size and alignment as `T`. This
1347 /// is trivially true if `U` is `T`.
1348 /// * If `U` is unsized, its data pointer must have the same size and
1349 /// alignment as `T`. This is trivially true if `Arc<U>` was constructed
1350 /// through `Arc<T>` and then converted to `Arc<U>` through an [unsized
1351 /// coercion].
1352 ///
1353 /// Note that if `U` or `U`'s data pointer is not `T` but has the same size
1354 /// and alignment, this is basically like transmuting references of
1355 /// different types. See [`mem::transmute`][transmute] for more information
1356 /// on what restrictions apply in this case.
1357 ///
1358 /// The user of `from_raw` has to make sure a specific value of `T` is only
1359 /// dropped once.
1360 ///
1361 /// This function is unsafe because improper use may lead to memory unsafety,
1362 /// even if the returned `Arc<T>` is never accessed.
1363 ///
1364 /// [into_raw]: Arc::into_raw
1365 /// [transmute]: core::mem::transmute
1366 /// [unsized coercion]: https://doc.rust-lang.org/reference/type-coercions.html#unsized-coercions
1367 ///
1368 /// # Examples
1369 ///
1370 /// ```
1371 /// use std::sync::Arc;
1372 ///
1373 /// let x = Arc::new("hello".to_owned());
1374 /// let x_ptr = Arc::into_raw(x);
1375 ///
1376 /// unsafe {
1377 /// // Convert back to an `Arc` to prevent leak.
1378 /// let x = Arc::from_raw(x_ptr);
1379 /// assert_eq!(&*x, "hello");
1380 ///
1381 /// // Further calls to `Arc::from_raw(x_ptr)` would be memory-unsafe.
1382 /// }
1383 ///
1384 /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling!
1385 /// ```
1386 ///
1387 /// Convert a slice back into its original array:
1388 ///
1389 /// ```
1390 /// use std::sync::Arc;
1391 ///
1392 /// let x: Arc<[u32]> = Arc::new([1, 2, 3]);
1393 /// let x_ptr: *const [u32] = Arc::into_raw(x);
1394 ///
1395 /// unsafe {
1396 /// let x: Arc<[u32; 3]> = Arc::from_raw(x_ptr.cast::<[u32; 3]>());
1397 /// assert_eq!(&*x, &[1, 2, 3]);
1398 /// }
1399 /// ```
1400 #[inline]
1401 #[stable(feature = "rc_raw", since = "1.17.0")]
1402 pub unsafe fn from_raw(ptr: *const T) -> Self {
1403 unsafe { Arc::from_raw_in(ptr, Global) }
1404 }
1405
1406 /// Increments the strong reference count on the `Arc<T>` associated with the
1407 /// provided pointer by one.
1408 ///
1409 /// # Safety
1410 ///
1411 /// The pointer must have been obtained through `Arc::into_raw`, and the
1412 /// associated `Arc` instance must be valid (i.e. the strong count must be at
1413 /// least 1) for the duration of this method.
1414 ///
1415 /// # Examples
1416 ///
1417 /// ```
1418 /// use std::sync::Arc;
1419 ///
1420 /// let five = Arc::new(5);
1421 ///
1422 /// unsafe {
1423 /// let ptr = Arc::into_raw(five);
1424 /// Arc::increment_strong_count(ptr);
1425 ///
1426 /// // This assertion is deterministic because we haven't shared
1427 /// // the `Arc` between threads.
1428 /// let five = Arc::from_raw(ptr);
1429 /// assert_eq!(2, Arc::strong_count(&five));
1430 /// }
1431 /// ```
1432 #[inline]
1433 #[stable(feature = "arc_mutate_strong_count", since = "1.51.0")]
1434 pub unsafe fn increment_strong_count(ptr: *const T) {
1435 unsafe { Arc::increment_strong_count_in(ptr, Global) }
1436 }
1437
1438 /// Decrements the strong reference count on the `Arc<T>` associated with the
1439 /// provided pointer by one.
1440 ///
1441 /// # Safety
1442 ///
1443 /// The pointer must have been obtained through `Arc::into_raw`, and the
1444 /// associated `Arc` instance must be valid (i.e. the strong count must be at
1445 /// least 1) when invoking this method. This method can be used to release the final
1446 /// `Arc` and backing storage, but **should not** be called after the final `Arc` has been
1447 /// released.
1448 ///
1449 /// # Examples
1450 ///
1451 /// ```
1452 /// use std::sync::Arc;
1453 ///
1454 /// let five = Arc::new(5);
1455 ///
1456 /// unsafe {
1457 /// let ptr = Arc::into_raw(five);
1458 /// Arc::increment_strong_count(ptr);
1459 ///
1460 /// // Those assertions are deterministic because we haven't shared
1461 /// // the `Arc` between threads.
1462 /// let five = Arc::from_raw(ptr);
1463 /// assert_eq!(2, Arc::strong_count(&five));
1464 /// Arc::decrement_strong_count(ptr);
1465 /// assert_eq!(1, Arc::strong_count(&five));
1466 /// }
1467 /// ```
1468 #[inline]
1469 #[stable(feature = "arc_mutate_strong_count", since = "1.51.0")]
1470 pub unsafe fn decrement_strong_count(ptr: *const T) {
1471 unsafe { Arc::decrement_strong_count_in(ptr, Global) }
1472 }
1473}
1474
1475impl<T: ?Sized, A: Allocator> Arc<T, A> {
1476 /// Consumes the `Arc`, returning the wrapped pointer.
1477 ///
1478 /// To avoid a memory leak the pointer must be converted back to an `Arc` using
1479 /// [`Arc::from_raw`].
1480 ///
1481 /// # Examples
1482 ///
1483 /// ```
1484 /// use std::sync::Arc;
1485 ///
1486 /// let x = Arc::new("hello".to_owned());
1487 /// let x_ptr = Arc::into_raw(x);
1488 /// assert_eq!(unsafe { &*x_ptr }, "hello");
1489 /// ```
1490 #[must_use = "losing the pointer will leak memory"]
1491 #[stable(feature = "rc_raw", since = "1.17.0")]
1492 #[rustc_never_returns_null_ptr]
1493 pub fn into_raw(this: Self) -> *const T {
1494 let ptr = Self::as_ptr(&this);
1495 mem::forget(this);
1496 ptr
1497 }
1498
1499 /// Provides a raw pointer to the data.
1500 ///
1501 /// The counts are not affected in any way and the `Arc` is not consumed. The pointer is valid for
1502 /// as long as there are strong counts in the `Arc`.
1503 ///
1504 /// # Examples
1505 ///
1506 /// ```
1507 /// use std::sync::Arc;
1508 ///
1509 /// let x = Arc::new("hello".to_owned());
1510 /// let y = Arc::clone(&x);
1511 /// let x_ptr = Arc::as_ptr(&x);
1512 /// assert_eq!(x_ptr, Arc::as_ptr(&y));
1513 /// assert_eq!(unsafe { &*x_ptr }, "hello");
1514 /// ```
1515 #[must_use]
1516 #[stable(feature = "rc_as_ptr", since = "1.45.0")]
1517 #[rustc_never_returns_null_ptr]
1518 pub fn as_ptr(this: &Self) -> *const T {
1519 let ptr: *mut ArcInner<T> = NonNull::as_ptr(this.ptr);
1520
1521 // SAFETY: This cannot go through Deref::deref or RcBoxPtr::inner because
1522 // this is required to retain raw/mut provenance such that e.g. `get_mut` can
1523 // write through the pointer after the Rc is recovered through `from_raw`.
1524 unsafe { ptr::addr_of_mut!((*ptr).data) }
1525 }
1526
1527 /// Constructs an `Arc<T, A>` from a raw pointer.
1528 ///
1529 /// The raw pointer must have been previously returned by a call to [`Arc<U,
1530 /// A>::into_raw`][into_raw] with the following requirements:
1531 ///
1532 /// * If `U` is sized, it must have the same size and alignment as `T`. This
1533 /// is trivially true if `U` is `T`.
1534 /// * If `U` is unsized, its data pointer must have the same size and
1535 /// alignment as `T`. This is trivially true if `Arc<U>` was constructed
1536 /// through `Arc<T>` and then converted to `Arc<U>` through an [unsized
1537 /// coercion].
1538 ///
1539 /// Note that if `U` or `U`'s data pointer is not `T` but has the same size
1540 /// and alignment, this is basically like transmuting references of
1541 /// different types. See [`mem::transmute`][transmute] for more information
1542 /// on what restrictions apply in this case.
1543 ///
1544 /// The raw pointer must point to a block of memory allocated by `alloc`
1545 ///
1546 /// The user of `from_raw` has to make sure a specific value of `T` is only
1547 /// dropped once.
1548 ///
1549 /// This function is unsafe because improper use may lead to memory unsafety,
1550 /// even if the returned `Arc<T>` is never accessed.
1551 ///
1552 /// [into_raw]: Arc::into_raw
1553 /// [transmute]: core::mem::transmute
1554 /// [unsized coercion]: https://doc.rust-lang.org/reference/type-coercions.html#unsized-coercions
1555 ///
1556 /// # Examples
1557 ///
1558 /// ```
1559 /// #![feature(allocator_api)]
1560 ///
1561 /// use std::sync::Arc;
1562 /// use std::alloc::System;
1563 ///
1564 /// let x = Arc::new_in("hello".to_owned(), System);
1565 /// let x_ptr = Arc::into_raw(x);
1566 ///
1567 /// unsafe {
1568 /// // Convert back to an `Arc` to prevent leak.
1569 /// let x = Arc::from_raw_in(x_ptr, System);
1570 /// assert_eq!(&*x, "hello");
1571 ///
1572 /// // Further calls to `Arc::from_raw(x_ptr)` would be memory-unsafe.
1573 /// }
1574 ///
1575 /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling!
1576 /// ```
1577 ///
1578 /// Convert a slice back into its original array:
1579 ///
1580 /// ```
1581 /// #![feature(allocator_api)]
1582 ///
1583 /// use std::sync::Arc;
1584 /// use std::alloc::System;
1585 ///
1586 /// let x: Arc<[u32], _> = Arc::new_in([1, 2, 3], System);
1587 /// let x_ptr: *const [u32] = Arc::into_raw(x);
1588 ///
1589 /// unsafe {
1590 /// let x: Arc<[u32; 3], _> = Arc::from_raw_in(x_ptr.cast::<[u32; 3]>(), System);
1591 /// assert_eq!(&*x, &[1, 2, 3]);
1592 /// }
1593 /// ```
1594 #[inline]
1595 #[unstable(feature = "allocator_api", issue = "32838")]
1596 pub unsafe fn from_raw_in(ptr: *const T, alloc: A) -> Self {
1597 unsafe {
1598 let offset = data_offset(ptr);
1599
1600 // Reverse the offset to find the original ArcInner.
1601 let arc_ptr = ptr.byte_sub(offset) as *mut ArcInner<T>;
1602
1603 Self::from_ptr_in(arc_ptr, alloc)
1604 }
1605 }
1606
1607 /// Creates a new [`Weak`] pointer to this allocation.
1608 ///
1609 /// # Examples
1610 ///
1611 /// ```
1612 /// use std::sync::Arc;
1613 ///
1614 /// let five = Arc::new(5);
1615 ///
1616 /// let weak_five = Arc::downgrade(&five);
1617 /// ```
1618 #[must_use = "this returns a new `Weak` pointer, \
1619 without modifying the original `Arc`"]
1620 #[stable(feature = "arc_weak", since = "1.4.0")]
1621 pub fn downgrade(this: &Self) -> Weak<T, A>
1622 where
1623 A: Clone,
1624 {
1625 // This Relaxed is OK because we're checking the value in the CAS
1626 // below.
1627 let mut cur = this.inner().weak.load(Relaxed);
1628
1629 loop {
1630 // check if the weak counter is currently "locked"; if so, spin.
1631 if cur == usize::MAX {
1632 hint::spin_loop();
1633 cur = this.inner().weak.load(Relaxed);
1634 continue;
1635 }
1636
1637 // We can't allow the refcount to increase much past `MAX_REFCOUNT`.
1638 assert!(cur <= MAX_REFCOUNT, "{}", INTERNAL_OVERFLOW_ERROR);
1639
1640 // NOTE: this code currently ignores the possibility of overflow
1641 // into usize::MAX; in general both Rc and Arc need to be adjusted
1642 // to deal with overflow.
1643
1644 // Unlike with Clone(), we need this to be an Acquire read to
1645 // synchronize with the write coming from `is_unique`, so that the
1646 // events prior to that write happen before this read.
1647 match this.inner().weak.compare_exchange_weak(cur, cur + 1, Acquire, Relaxed) {
1648 Ok(_) => {
1649 // Make sure we do not create a dangling Weak
1650 debug_assert!(!is_dangling(this.ptr.as_ptr()));
1651 return Weak { ptr: this.ptr, alloc: this.alloc.clone() };
1652 }
1653 Err(old) => cur = old,
1654 }
1655 }
1656 }
1657
1658 /// Gets the number of [`Weak`] pointers to this allocation.
1659 ///
1660 /// # Safety
1661 ///
1662 /// This method by itself is safe, but using it correctly requires extra care.
1663 /// Another thread can change the weak count at any time,
1664 /// including potentially between calling this method and acting on the result.
1665 ///
1666 /// # Examples
1667 ///
1668 /// ```
1669 /// use std::sync::Arc;
1670 ///
1671 /// let five = Arc::new(5);
1672 /// let _weak_five = Arc::downgrade(&five);
1673 ///
1674 /// // This assertion is deterministic because we haven't shared
1675 /// // the `Arc` or `Weak` between threads.
1676 /// assert_eq!(1, Arc::weak_count(&five));
1677 /// ```
1678 #[inline]
1679 #[must_use]
1680 #[stable(feature = "arc_counts", since = "1.15.0")]
1681 pub fn weak_count(this: &Self) -> usize {
1682 let cnt = this.inner().weak.load(Relaxed);
1683 // If the weak count is currently locked, the value of the
1684 // count was 0 just before taking the lock.
1685 if cnt == usize::MAX { 0 } else { cnt - 1 }
1686 }
1687
1688 /// Gets the number of strong (`Arc`) pointers to this allocation.
1689 ///
1690 /// # Safety
1691 ///
1692 /// This method by itself is safe, but using it correctly requires extra care.
1693 /// Another thread can change the strong count at any time,
1694 /// including potentially between calling this method and acting on the result.
1695 ///
1696 /// # Examples
1697 ///
1698 /// ```
1699 /// use std::sync::Arc;
1700 ///
1701 /// let five = Arc::new(5);
1702 /// let _also_five = Arc::clone(&five);
1703 ///
1704 /// // This assertion is deterministic because we haven't shared
1705 /// // the `Arc` between threads.
1706 /// assert_eq!(2, Arc::strong_count(&five));
1707 /// ```
1708 #[inline]
1709 #[must_use]
1710 #[stable(feature = "arc_counts", since = "1.15.0")]
1711 pub fn strong_count(this: &Self) -> usize {
1712 this.inner().strong.load(Relaxed)
1713 }
1714
1715 /// Increments the strong reference count on the `Arc<T>` associated with the
1716 /// provided pointer by one.
1717 ///
1718 /// # Safety
1719 ///
1720 /// The pointer must have been obtained through `Arc::into_raw`, and the
1721 /// associated `Arc` instance must be valid (i.e. the strong count must be at
1722 /// least 1) for the duration of this method,, and `ptr` must point to a block of memory
1723 /// allocated by `alloc`.
1724 ///
1725 /// # Examples
1726 ///
1727 /// ```
1728 /// #![feature(allocator_api)]
1729 ///
1730 /// use std::sync::Arc;
1731 /// use std::alloc::System;
1732 ///
1733 /// let five = Arc::new_in(5, System);
1734 ///
1735 /// unsafe {
1736 /// let ptr = Arc::into_raw(five);
1737 /// Arc::increment_strong_count_in(ptr, System);
1738 ///
1739 /// // This assertion is deterministic because we haven't shared
1740 /// // the `Arc` between threads.
1741 /// let five = Arc::from_raw_in(ptr, System);
1742 /// assert_eq!(2, Arc::strong_count(&five));
1743 /// }
1744 /// ```
1745 #[inline]
1746 #[unstable(feature = "allocator_api", issue = "32838")]
1747 pub unsafe fn increment_strong_count_in(ptr: *const T, alloc: A)
1748 where
1749 A: Clone,
1750 {
1751 // Retain Arc, but don't touch refcount by wrapping in ManuallyDrop
1752 let arc = unsafe { mem::ManuallyDrop::new(Arc::from_raw_in(ptr, alloc)) };
1753 // Now increase refcount, but don't drop new refcount either
1754 let _arc_clone: mem::ManuallyDrop<_> = arc.clone();
1755 }
1756
1757 /// Decrements the strong reference count on the `Arc<T>` associated with the
1758 /// provided pointer by one.
1759 ///
1760 /// # Safety
1761 ///
1762 /// The pointer must have been obtained through `Arc::into_raw`, the
1763 /// associated `Arc` instance must be valid (i.e. the strong count must be at
1764 /// least 1) when invoking this method, and `ptr` must point to a block of memory
1765 /// allocated by `alloc`. This method can be used to release the final
1766 /// `Arc` and backing storage, but **should not** be called after the final `Arc` has been
1767 /// released.
1768 ///
1769 /// # Examples
1770 ///
1771 /// ```
1772 /// #![feature(allocator_api)]
1773 ///
1774 /// use std::sync::Arc;
1775 /// use std::alloc::System;
1776 ///
1777 /// let five = Arc::new_in(5, System);
1778 ///
1779 /// unsafe {
1780 /// let ptr = Arc::into_raw(five);
1781 /// Arc::increment_strong_count_in(ptr, System);
1782 ///
1783 /// // Those assertions are deterministic because we haven't shared
1784 /// // the `Arc` between threads.
1785 /// let five = Arc::from_raw_in(ptr, System);
1786 /// assert_eq!(2, Arc::strong_count(&five));
1787 /// Arc::decrement_strong_count_in(ptr, System);
1788 /// assert_eq!(1, Arc::strong_count(&five));
1789 /// }
1790 /// ```
1791 #[inline]
1792 #[unstable(feature = "allocator_api", issue = "32838")]
1793 pub unsafe fn decrement_strong_count_in(ptr: *const T, alloc: A) {
1794 unsafe { drop(Arc::from_raw_in(ptr, alloc)) };
1795 }
1796
1797 #[inline]
1798 fn inner(&self) -> &ArcInner<T> {
1799 // This unsafety is ok because while this arc is alive we're guaranteed
1800 // that the inner pointer is valid. Furthermore, we know that the
1801 // `ArcInner` structure itself is `Sync` because the inner data is
1802 // `Sync` as well, so we're ok loaning out an immutable pointer to these
1803 // contents.
1804 unsafe { self.ptr.as_ref() }
1805 }
1806
1807 // Non-inlined part of `drop`.
1808 #[inline(never)]
1809 unsafe fn drop_slow(&mut self) {
1810 // Destroy the data at this time, even though we must not free the box
1811 // allocation itself (there might still be weak pointers lying around).
1812 unsafe { ptr::drop_in_place(Self::get_mut_unchecked(self)) };
1813
1814 // Drop the weak ref collectively held by all strong references
1815 // Take a reference to `self.alloc` instead of cloning because 1. it'll
1816 // last long enough, and 2. you should be able to drop `Arc`s with
1817 // unclonable allocators
1818 drop(Weak { ptr: self.ptr, alloc: &self.alloc });
1819 }
1820
1821 /// Returns `true` if the two `Arc`s point to the same allocation in a vein similar to
1822 /// [`ptr::eq`]. This function ignores the metadata of `dyn Trait` pointers.
1823 ///
1824 /// # Examples
1825 ///
1826 /// ```
1827 /// use std::sync::Arc;
1828 ///
1829 /// let five = Arc::new(5);
1830 /// let same_five = Arc::clone(&five);
1831 /// let other_five = Arc::new(5);
1832 ///
1833 /// assert!(Arc::ptr_eq(&five, &same_five));
1834 /// assert!(!Arc::ptr_eq(&five, &other_five));
1835 /// ```
1836 ///
1837 /// [`ptr::eq`]: core::ptr::eq "ptr::eq"
1838 #[inline]
1839 #[must_use]
1840 #[stable(feature = "ptr_eq", since = "1.17.0")]
1841 pub fn ptr_eq(this: &Self, other: &Self) -> bool {
1842 ptr::addr_eq(this.ptr.as_ptr(), other.ptr.as_ptr())
1843 }
1844}
1845
1846impl<T: ?Sized> Arc<T> {
1847 /// Allocates an `ArcInner<T>` with sufficient space for
1848 /// a possibly-unsized inner value where the value has the layout provided.
1849 ///
1850 /// The function `mem_to_arcinner` is called with the data pointer
1851 /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
1852 #[cfg(not(no_global_oom_handling))]
1853 unsafe fn allocate_for_layout(
1854 value_layout: Layout,
1855 allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
1856 mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
1857 ) -> *mut ArcInner<T> {
1858 let layout = arcinner_layout_for_value_layout(value_layout);
1859
1860 let ptr = allocate(layout).unwrap_or_else(|_| handle_alloc_error(layout));
1861
1862 unsafe { Self::initialize_arcinner(ptr, layout, mem_to_arcinner) }
1863 }
1864
1865 /// Allocates an `ArcInner<T>` with sufficient space for
1866 /// a possibly-unsized inner value where the value has the layout provided,
1867 /// returning an error if allocation fails.
1868 ///
1869 /// The function `mem_to_arcinner` is called with the data pointer
1870 /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
1871 unsafe fn try_allocate_for_layout(
1872 value_layout: Layout,
1873 allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
1874 mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
1875 ) -> Result<*mut ArcInner<T>, AllocError> {
1876 let layout = arcinner_layout_for_value_layout(value_layout);
1877
1878 let ptr = allocate(layout)?;
1879
1880 let inner = unsafe { Self::initialize_arcinner(ptr, layout, mem_to_arcinner) };
1881
1882 Ok(inner)
1883 }
1884
1885 unsafe fn initialize_arcinner(
1886 ptr: NonNull<[u8]>,
1887 layout: Layout,
1888 mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
1889 ) -> *mut ArcInner<T> {
1890 let inner = mem_to_arcinner(ptr.as_non_null_ptr().as_ptr());
1891 debug_assert_eq!(unsafe { Layout::for_value_raw(inner) }, layout);
1892
1893 unsafe {
1894 ptr::addr_of_mut!((*inner).strong).write(atomic::AtomicUsize::new(1));
1895 ptr::addr_of_mut!((*inner).weak).write(atomic::AtomicUsize::new(1));
1896 }
1897
1898 inner
1899 }
1900}
1901
1902impl<T: ?Sized, A: Allocator> Arc<T, A> {
1903 /// Allocates an `ArcInner<T>` with sufficient space for an unsized inner value.
1904 #[inline]
1905 #[cfg(not(no_global_oom_handling))]
1906 unsafe fn allocate_for_ptr_in(ptr: *const T, alloc: &A) -> *mut ArcInner<T> {
1907 // Allocate for the `ArcInner<T>` using the given value.
1908 unsafe {
1909 Arc::allocate_for_layout(
1910 Layout::for_value_raw(ptr),
1911 |layout| alloc.allocate(layout),
1912 |mem| mem.with_metadata_of(ptr as *const ArcInner<T>),
1913 )
1914 }
1915 }
1916
1917 #[cfg(not(no_global_oom_handling))]
1918 fn from_box_in(src: Box<T, A>) -> Arc<T, A> {
1919 unsafe {
1920 let value_size = size_of_val(&*src);
1921 let ptr = Self::allocate_for_ptr_in(&*src, Box::allocator(&src));
1922
1923 // Copy value as bytes
1924 ptr::copy_nonoverlapping(
1925 core::ptr::addr_of!(*src) as *const u8,
1926 ptr::addr_of_mut!((*ptr).data) as *mut u8,
1927 value_size,
1928 );
1929
1930 // Free the allocation without dropping its contents
1931 let (bptr, alloc) = Box::into_raw_with_allocator(src);
1932 let src = Box::from_raw_in(bptr as *mut mem::ManuallyDrop<T>, alloc.by_ref());
1933 drop(src);
1934
1935 Self::from_ptr_in(ptr, alloc)
1936 }
1937 }
1938}
1939
1940impl<T> Arc<[T]> {
1941 /// Allocates an `ArcInner<[T]>` with the given length.
1942 #[cfg(not(no_global_oom_handling))]
1943 unsafe fn allocate_for_slice(len: usize) -> *mut ArcInner<[T]> {
1944 unsafe {
1945 Self::allocate_for_layout(
1946 Layout::array::<T>(len).unwrap(),
1947 |layout| Global.allocate(layout),
1948 |mem| ptr::slice_from_raw_parts_mut(mem.cast::<T>(), len) as *mut ArcInner<[T]>,
1949 )
1950 }
1951 }
1952
1953 /// Copy elements from slice into newly allocated `Arc<[T]>`
1954 ///
1955 /// Unsafe because the caller must either take ownership or bind `T: Copy`.
1956 #[cfg(not(no_global_oom_handling))]
1957 unsafe fn copy_from_slice(v: &[T]) -> Arc<[T]> {
1958 unsafe {
1959 let ptr = Self::allocate_for_slice(v.len());
1960
1961 ptr::copy_nonoverlapping(v.as_ptr(), ptr::addr_of_mut!((*ptr).data) as *mut T, v.len());
1962
1963 Self::from_ptr(ptr)
1964 }
1965 }
1966
1967 /// Constructs an `Arc<[T]>` from an iterator known to be of a certain size.
1968 ///
1969 /// Behavior is undefined should the size be wrong.
1970 #[cfg(not(no_global_oom_handling))]
1971 unsafe fn from_iter_exact(iter: impl Iterator<Item = T>, len: usize) -> Arc<[T]> {
1972 // Panic guard while cloning T elements.
1973 // In the event of a panic, elements that have been written
1974 // into the new ArcInner will be dropped, then the memory freed.
1975 struct Guard<T> {
1976 mem: NonNull<u8>,
1977 elems: *mut T,
1978 layout: Layout,
1979 n_elems: usize,
1980 }
1981
1982 impl<T> Drop for Guard<T> {
1983 fn drop(&mut self) {
1984 unsafe {
1985 let slice = from_raw_parts_mut(self.elems, self.n_elems);
1986 ptr::drop_in_place(slice);
1987
1988 Global.deallocate(self.mem, self.layout);
1989 }
1990 }
1991 }
1992
1993 unsafe {
1994 let ptr = Self::allocate_for_slice(len);
1995
1996 let mem = ptr as *mut _ as *mut u8;
1997 let layout = Layout::for_value_raw(ptr);
1998
1999 // Pointer to first element
2000 let elems = ptr::addr_of_mut!((*ptr).data) as *mut T;
2001
2002 let mut guard = Guard { mem: NonNull::new_unchecked(mem), elems, layout, n_elems: 0 };
2003
2004 for (i, item) in iter.enumerate() {
2005 ptr::write(elems.add(i), item);
2006 guard.n_elems += 1;
2007 }
2008
2009 // All clear. Forget the guard so it doesn't free the new ArcInner.
2010 mem::forget(guard);
2011
2012 Self::from_ptr(ptr)
2013 }
2014 }
2015}
2016
2017impl<T, A: Allocator> Arc<[T], A> {
2018 /// Allocates an `ArcInner<[T]>` with the given length.
2019 #[inline]
2020 #[cfg(not(no_global_oom_handling))]
2021 unsafe fn allocate_for_slice_in(len: usize, alloc: &A) -> *mut ArcInner<[T]> {
2022 unsafe {
2023 Arc::allocate_for_layout(
2024 value_layout:Layout::array::<T>(len).unwrap(),
2025 |layout| alloc.allocate(layout),
2026 |mem: *mut u8| ptr::slice_from_raw_parts_mut(data:mem.cast::<T>(), len) as *mut ArcInner<[T]>,
2027 )
2028 }
2029 }
2030}
2031
2032/// Specialization trait used for `From<&[T]>`.
2033#[cfg(not(no_global_oom_handling))]
2034trait ArcFromSlice<T> {
2035 fn from_slice(slice: &[T]) -> Self;
2036}
2037
2038#[cfg(not(no_global_oom_handling))]
2039impl<T: Clone> ArcFromSlice<T> for Arc<[T]> {
2040 #[inline]
2041 default fn from_slice(v: &[T]) -> Self {
2042 unsafe { Self::from_iter_exact(iter:v.iter().cloned(), v.len()) }
2043 }
2044}
2045
2046#[cfg(not(no_global_oom_handling))]
2047impl<T: Copy> ArcFromSlice<T> for Arc<[T]> {
2048 #[inline]
2049 fn from_slice(v: &[T]) -> Self {
2050 unsafe { Arc::copy_from_slice(v) }
2051 }
2052}
2053
2054#[stable(feature = "rust1", since = "1.0.0")]
2055impl<T: ?Sized, A: Allocator + Clone> Clone for Arc<T, A> {
2056 /// Makes a clone of the `Arc` pointer.
2057 ///
2058 /// This creates another pointer to the same allocation, increasing the
2059 /// strong reference count.
2060 ///
2061 /// # Examples
2062 ///
2063 /// ```
2064 /// use std::sync::Arc;
2065 ///
2066 /// let five = Arc::new(5);
2067 ///
2068 /// let _ = Arc::clone(&five);
2069 /// ```
2070 #[inline]
2071 fn clone(&self) -> Arc<T, A> {
2072 // Using a relaxed ordering is alright here, as knowledge of the
2073 // original reference prevents other threads from erroneously deleting
2074 // the object.
2075 //
2076 // As explained in the [Boost documentation][1], Increasing the
2077 // reference counter can always be done with memory_order_relaxed: New
2078 // references to an object can only be formed from an existing
2079 // reference, and passing an existing reference from one thread to
2080 // another must already provide any required synchronization.
2081 //
2082 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
2083 let old_size = self.inner().strong.fetch_add(1, Relaxed);
2084
2085 // However we need to guard against massive refcounts in case someone is `mem::forget`ing
2086 // Arcs. If we don't do this the count can overflow and users will use-after free. This
2087 // branch will never be taken in any realistic program. We abort because such a program is
2088 // incredibly degenerate, and we don't care to support it.
2089 //
2090 // This check is not 100% water-proof: we error when the refcount grows beyond `isize::MAX`.
2091 // But we do that check *after* having done the increment, so there is a chance here that
2092 // the worst already happened and we actually do overflow the `usize` counter. However, that
2093 // requires the counter to grow from `isize::MAX` to `usize::MAX` between the increment
2094 // above and the `abort` below, which seems exceedingly unlikely.
2095 //
2096 // This is a global invariant, and also applies when using a compare-exchange loop to increment
2097 // counters in other methods.
2098 // Otherwise, the counter could be brought to an almost-overflow using a compare-exchange loop,
2099 // and then overflow using a few `fetch_add`s.
2100 if old_size > MAX_REFCOUNT {
2101 abort();
2102 }
2103
2104 unsafe { Self::from_inner_in(self.ptr, self.alloc.clone()) }
2105 }
2106}
2107
2108#[stable(feature = "rust1", since = "1.0.0")]
2109impl<T: ?Sized, A: Allocator> Deref for Arc<T, A> {
2110 type Target = T;
2111
2112 #[inline]
2113 fn deref(&self) -> &T {
2114 &self.inner().data
2115 }
2116}
2117
2118#[unstable(feature = "deref_pure_trait", issue = "87121")]
2119unsafe impl<T: ?Sized, A: Allocator> DerefPure for Arc<T, A> {}
2120
2121#[unstable(feature = "receiver_trait", issue = "none")]
2122impl<T: ?Sized> Receiver for Arc<T> {}
2123
2124impl<T: Clone, A: Allocator + Clone> Arc<T, A> {
2125 /// Makes a mutable reference into the given `Arc`.
2126 ///
2127 /// If there are other `Arc` pointers to the same allocation, then `make_mut` will
2128 /// [`clone`] the inner value to a new allocation to ensure unique ownership. This is also
2129 /// referred to as clone-on-write.
2130 ///
2131 /// However, if there are no other `Arc` pointers to this allocation, but some [`Weak`]
2132 /// pointers, then the [`Weak`] pointers will be dissociated and the inner value will not
2133 /// be cloned.
2134 ///
2135 /// See also [`get_mut`], which will fail rather than cloning the inner value
2136 /// or dissociating [`Weak`] pointers.
2137 ///
2138 /// [`clone`]: Clone::clone
2139 /// [`get_mut`]: Arc::get_mut
2140 ///
2141 /// # Examples
2142 ///
2143 /// ```
2144 /// use std::sync::Arc;
2145 ///
2146 /// let mut data = Arc::new(5);
2147 ///
2148 /// *Arc::make_mut(&mut data) += 1; // Won't clone anything
2149 /// let mut other_data = Arc::clone(&data); // Won't clone inner data
2150 /// *Arc::make_mut(&mut data) += 1; // Clones inner data
2151 /// *Arc::make_mut(&mut data) += 1; // Won't clone anything
2152 /// *Arc::make_mut(&mut other_data) *= 2; // Won't clone anything
2153 ///
2154 /// // Now `data` and `other_data` point to different allocations.
2155 /// assert_eq!(*data, 8);
2156 /// assert_eq!(*other_data, 12);
2157 /// ```
2158 ///
2159 /// [`Weak`] pointers will be dissociated:
2160 ///
2161 /// ```
2162 /// use std::sync::Arc;
2163 ///
2164 /// let mut data = Arc::new(75);
2165 /// let weak = Arc::downgrade(&data);
2166 ///
2167 /// assert!(75 == *data);
2168 /// assert!(75 == *weak.upgrade().unwrap());
2169 ///
2170 /// *Arc::make_mut(&mut data) += 1;
2171 ///
2172 /// assert!(76 == *data);
2173 /// assert!(weak.upgrade().is_none());
2174 /// ```
2175 #[cfg(not(no_global_oom_handling))]
2176 #[inline]
2177 #[stable(feature = "arc_unique", since = "1.4.0")]
2178 pub fn make_mut(this: &mut Self) -> &mut T {
2179 // Note that we hold both a strong reference and a weak reference.
2180 // Thus, releasing our strong reference only will not, by itself, cause
2181 // the memory to be deallocated.
2182 //
2183 // Use Acquire to ensure that we see any writes to `weak` that happen
2184 // before release writes (i.e., decrements) to `strong`. Since we hold a
2185 // weak count, there's no chance the ArcInner itself could be
2186 // deallocated.
2187 if this.inner().strong.compare_exchange(1, 0, Acquire, Relaxed).is_err() {
2188 // Another strong pointer exists, so we must clone.
2189 // Pre-allocate memory to allow writing the cloned value directly.
2190 let mut arc = Self::new_uninit_in(this.alloc.clone());
2191 unsafe {
2192 let data = Arc::get_mut_unchecked(&mut arc);
2193 (**this).write_clone_into_raw(data.as_mut_ptr());
2194 *this = arc.assume_init();
2195 }
2196 } else if this.inner().weak.load(Relaxed) != 1 {
2197 // Relaxed suffices in the above because this is fundamentally an
2198 // optimization: we are always racing with weak pointers being
2199 // dropped. Worst case, we end up allocated a new Arc unnecessarily.
2200
2201 // We removed the last strong ref, but there are additional weak
2202 // refs remaining. We'll move the contents to a new Arc, and
2203 // invalidate the other weak refs.
2204
2205 // Note that it is not possible for the read of `weak` to yield
2206 // usize::MAX (i.e., locked), since the weak count can only be
2207 // locked by a thread with a strong reference.
2208
2209 // Materialize our own implicit weak pointer, so that it can clean
2210 // up the ArcInner as needed.
2211 let _weak = Weak { ptr: this.ptr, alloc: this.alloc.clone() };
2212
2213 // Can just steal the data, all that's left is Weaks
2214 let mut arc = Self::new_uninit_in(this.alloc.clone());
2215 unsafe {
2216 let data = Arc::get_mut_unchecked(&mut arc);
2217 data.as_mut_ptr().copy_from_nonoverlapping(&**this, 1);
2218 ptr::write(this, arc.assume_init());
2219 }
2220 } else {
2221 // We were the sole reference of either kind; bump back up the
2222 // strong ref count.
2223 this.inner().strong.store(1, Release);
2224 }
2225
2226 // As with `get_mut()`, the unsafety is ok because our reference was
2227 // either unique to begin with, or became one upon cloning the contents.
2228 unsafe { Self::get_mut_unchecked(this) }
2229 }
2230
2231 /// If we have the only reference to `T` then unwrap it. Otherwise, clone `T` and return the
2232 /// clone.
2233 ///
2234 /// Assuming `arc_t` is of type `Arc<T>`, this function is functionally equivalent to
2235 /// `(*arc_t).clone()`, but will avoid cloning the inner value where possible.
2236 ///
2237 /// # Examples
2238 ///
2239 /// ```
2240 /// # use std::{ptr, sync::Arc};
2241 /// let inner = String::from("test");
2242 /// let ptr = inner.as_ptr();
2243 ///
2244 /// let arc = Arc::new(inner);
2245 /// let inner = Arc::unwrap_or_clone(arc);
2246 /// // The inner value was not cloned
2247 /// assert!(ptr::eq(ptr, inner.as_ptr()));
2248 ///
2249 /// let arc = Arc::new(inner);
2250 /// let arc2 = arc.clone();
2251 /// let inner = Arc::unwrap_or_clone(arc);
2252 /// // Because there were 2 references, we had to clone the inner value.
2253 /// assert!(!ptr::eq(ptr, inner.as_ptr()));
2254 /// // `arc2` is the last reference, so when we unwrap it we get back
2255 /// // the original `String`.
2256 /// let inner = Arc::unwrap_or_clone(arc2);
2257 /// assert!(ptr::eq(ptr, inner.as_ptr()));
2258 /// ```
2259 #[inline]
2260 #[stable(feature = "arc_unwrap_or_clone", since = "1.76.0")]
2261 pub fn unwrap_or_clone(this: Self) -> T {
2262 Arc::try_unwrap(this).unwrap_or_else(|arc| (*arc).clone())
2263 }
2264}
2265
2266impl<T: ?Sized, A: Allocator> Arc<T, A> {
2267 /// Returns a mutable reference into the given `Arc`, if there are
2268 /// no other `Arc` or [`Weak`] pointers to the same allocation.
2269 ///
2270 /// Returns [`None`] otherwise, because it is not safe to
2271 /// mutate a shared value.
2272 ///
2273 /// See also [`make_mut`][make_mut], which will [`clone`][clone]
2274 /// the inner value when there are other `Arc` pointers.
2275 ///
2276 /// [make_mut]: Arc::make_mut
2277 /// [clone]: Clone::clone
2278 ///
2279 /// # Examples
2280 ///
2281 /// ```
2282 /// use std::sync::Arc;
2283 ///
2284 /// let mut x = Arc::new(3);
2285 /// *Arc::get_mut(&mut x).unwrap() = 4;
2286 /// assert_eq!(*x, 4);
2287 ///
2288 /// let _y = Arc::clone(&x);
2289 /// assert!(Arc::get_mut(&mut x).is_none());
2290 /// ```
2291 #[inline]
2292 #[stable(feature = "arc_unique", since = "1.4.0")]
2293 pub fn get_mut(this: &mut Self) -> Option<&mut T> {
2294 if this.is_unique() {
2295 // This unsafety is ok because we're guaranteed that the pointer
2296 // returned is the *only* pointer that will ever be returned to T. Our
2297 // reference count is guaranteed to be 1 at this point, and we required
2298 // the Arc itself to be `mut`, so we're returning the only possible
2299 // reference to the inner data.
2300 unsafe { Some(Arc::get_mut_unchecked(this)) }
2301 } else {
2302 None
2303 }
2304 }
2305
2306 /// Returns a mutable reference into the given `Arc`,
2307 /// without any check.
2308 ///
2309 /// See also [`get_mut`], which is safe and does appropriate checks.
2310 ///
2311 /// [`get_mut`]: Arc::get_mut
2312 ///
2313 /// # Safety
2314 ///
2315 /// If any other `Arc` or [`Weak`] pointers to the same allocation exist, then
2316 /// they must not be dereferenced or have active borrows for the duration
2317 /// of the returned borrow, and their inner type must be exactly the same as the
2318 /// inner type of this Rc (including lifetimes). This is trivially the case if no
2319 /// such pointers exist, for example immediately after `Arc::new`.
2320 ///
2321 /// # Examples
2322 ///
2323 /// ```
2324 /// #![feature(get_mut_unchecked)]
2325 ///
2326 /// use std::sync::Arc;
2327 ///
2328 /// let mut x = Arc::new(String::new());
2329 /// unsafe {
2330 /// Arc::get_mut_unchecked(&mut x).push_str("foo")
2331 /// }
2332 /// assert_eq!(*x, "foo");
2333 /// ```
2334 /// Other `Arc` pointers to the same allocation must be to the same type.
2335 /// ```no_run
2336 /// #![feature(get_mut_unchecked)]
2337 ///
2338 /// use std::sync::Arc;
2339 ///
2340 /// let x: Arc<str> = Arc::from("Hello, world!");
2341 /// let mut y: Arc<[u8]> = x.clone().into();
2342 /// unsafe {
2343 /// // this is Undefined Behavior, because x's inner type is str, not [u8]
2344 /// Arc::get_mut_unchecked(&mut y).fill(0xff); // 0xff is invalid in UTF-8
2345 /// }
2346 /// println!("{}", &*x); // Invalid UTF-8 in a str
2347 /// ```
2348 /// Other `Arc` pointers to the same allocation must be to the exact same type, including lifetimes.
2349 /// ```no_run
2350 /// #![feature(get_mut_unchecked)]
2351 ///
2352 /// use std::sync::Arc;
2353 ///
2354 /// let x: Arc<&str> = Arc::new("Hello, world!");
2355 /// {
2356 /// let s = String::from("Oh, no!");
2357 /// let mut y: Arc<&str> = x.clone().into();
2358 /// unsafe {
2359 /// // this is Undefined Behavior, because x's inner type
2360 /// // is &'long str, not &'short str
2361 /// *Arc::get_mut_unchecked(&mut y) = &s;
2362 /// }
2363 /// }
2364 /// println!("{}", &*x); // Use-after-free
2365 /// ```
2366 #[inline]
2367 #[unstable(feature = "get_mut_unchecked", issue = "63292")]
2368 pub unsafe fn get_mut_unchecked(this: &mut Self) -> &mut T {
2369 // We are careful to *not* create a reference covering the "count" fields, as
2370 // this would alias with concurrent access to the reference counts (e.g. by `Weak`).
2371 unsafe { &mut (*this.ptr.as_ptr()).data }
2372 }
2373
2374 /// Determine whether this is the unique reference (including weak refs) to
2375 /// the underlying data.
2376 ///
2377 /// Note that this requires locking the weak ref count.
2378 fn is_unique(&mut self) -> bool {
2379 // lock the weak pointer count if we appear to be the sole weak pointer
2380 // holder.
2381 //
2382 // The acquire label here ensures a happens-before relationship with any
2383 // writes to `strong` (in particular in `Weak::upgrade`) prior to decrements
2384 // of the `weak` count (via `Weak::drop`, which uses release). If the upgraded
2385 // weak ref was never dropped, the CAS here will fail so we do not care to synchronize.
2386 if self.inner().weak.compare_exchange(1, usize::MAX, Acquire, Relaxed).is_ok() {
2387 // This needs to be an `Acquire` to synchronize with the decrement of the `strong`
2388 // counter in `drop` -- the only access that happens when any but the last reference
2389 // is being dropped.
2390 let unique = self.inner().strong.load(Acquire) == 1;
2391
2392 // The release write here synchronizes with a read in `downgrade`,
2393 // effectively preventing the above read of `strong` from happening
2394 // after the write.
2395 self.inner().weak.store(1, Release); // release the lock
2396 unique
2397 } else {
2398 false
2399 }
2400 }
2401}
2402
2403#[stable(feature = "rust1", since = "1.0.0")]
2404unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for Arc<T, A> {
2405 /// Drops the `Arc`.
2406 ///
2407 /// This will decrement the strong reference count. If the strong reference
2408 /// count reaches zero then the only other references (if any) are
2409 /// [`Weak`], so we `drop` the inner value.
2410 ///
2411 /// # Examples
2412 ///
2413 /// ```
2414 /// use std::sync::Arc;
2415 ///
2416 /// struct Foo;
2417 ///
2418 /// impl Drop for Foo {
2419 /// fn drop(&mut self) {
2420 /// println!("dropped!");
2421 /// }
2422 /// }
2423 ///
2424 /// let foo = Arc::new(Foo);
2425 /// let foo2 = Arc::clone(&foo);
2426 ///
2427 /// drop(foo); // Doesn't print anything
2428 /// drop(foo2); // Prints "dropped!"
2429 /// ```
2430 #[inline]
2431 fn drop(&mut self) {
2432 // Because `fetch_sub` is already atomic, we do not need to synchronize
2433 // with other threads unless we are going to delete the object. This
2434 // same logic applies to the below `fetch_sub` to the `weak` count.
2435 if self.inner().strong.fetch_sub(1, Release) != 1 {
2436 return;
2437 }
2438
2439 // This fence is needed to prevent reordering of use of the data and
2440 // deletion of the data. Because it is marked `Release`, the decreasing
2441 // of the reference count synchronizes with this `Acquire` fence. This
2442 // means that use of the data happens before decreasing the reference
2443 // count, which happens before this fence, which happens before the
2444 // deletion of the data.
2445 //
2446 // As explained in the [Boost documentation][1],
2447 //
2448 // > It is important to enforce any possible access to the object in one
2449 // > thread (through an existing reference) to *happen before* deleting
2450 // > the object in a different thread. This is achieved by a "release"
2451 // > operation after dropping a reference (any access to the object
2452 // > through this reference must obviously happened before), and an
2453 // > "acquire" operation before deleting the object.
2454 //
2455 // In particular, while the contents of an Arc are usually immutable, it's
2456 // possible to have interior writes to something like a Mutex<T>. Since a
2457 // Mutex is not acquired when it is deleted, we can't rely on its
2458 // synchronization logic to make writes in thread A visible to a destructor
2459 // running in thread B.
2460 //
2461 // Also note that the Acquire fence here could probably be replaced with an
2462 // Acquire load, which could improve performance in highly-contended
2463 // situations. See [2].
2464 //
2465 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
2466 // [2]: (https://github.com/rust-lang/rust/pull/41714)
2467 acquire!(self.inner().strong);
2468
2469 unsafe {
2470 self.drop_slow();
2471 }
2472 }
2473}
2474
2475impl<A: Allocator> Arc<dyn Any + Send + Sync, A> {
2476 /// Attempt to downcast the `Arc<dyn Any + Send + Sync>` to a concrete type.
2477 ///
2478 /// # Examples
2479 ///
2480 /// ```
2481 /// use std::any::Any;
2482 /// use std::sync::Arc;
2483 ///
2484 /// fn print_if_string(value: Arc<dyn Any + Send + Sync>) {
2485 /// if let Ok(string) = value.downcast::<String>() {
2486 /// println!("String ({}): {}", string.len(), string);
2487 /// }
2488 /// }
2489 ///
2490 /// let my_string = "Hello World".to_string();
2491 /// print_if_string(Arc::new(my_string));
2492 /// print_if_string(Arc::new(0i8));
2493 /// ```
2494 #[inline]
2495 #[stable(feature = "rc_downcast", since = "1.29.0")]
2496 pub fn downcast<T>(self) -> Result<Arc<T, A>, Self>
2497 where
2498 T: Any + Send + Sync,
2499 {
2500 if (*self).is::<T>() {
2501 unsafe {
2502 let (ptr, alloc) = self.internal_into_inner_with_allocator();
2503 Ok(Arc::from_inner_in(ptr.cast(), alloc))
2504 }
2505 } else {
2506 Err(self)
2507 }
2508 }
2509
2510 /// Downcasts the `Arc<dyn Any + Send + Sync>` to a concrete type.
2511 ///
2512 /// For a safe alternative see [`downcast`].
2513 ///
2514 /// # Examples
2515 ///
2516 /// ```
2517 /// #![feature(downcast_unchecked)]
2518 ///
2519 /// use std::any::Any;
2520 /// use std::sync::Arc;
2521 ///
2522 /// let x: Arc<dyn Any + Send + Sync> = Arc::new(1_usize);
2523 ///
2524 /// unsafe {
2525 /// assert_eq!(*x.downcast_unchecked::<usize>(), 1);
2526 /// }
2527 /// ```
2528 ///
2529 /// # Safety
2530 ///
2531 /// The contained value must be of type `T`. Calling this method
2532 /// with the incorrect type is *undefined behavior*.
2533 ///
2534 ///
2535 /// [`downcast`]: Self::downcast
2536 #[inline]
2537 #[unstable(feature = "downcast_unchecked", issue = "90850")]
2538 pub unsafe fn downcast_unchecked<T>(self) -> Arc<T, A>
2539 where
2540 T: Any + Send + Sync,
2541 {
2542 unsafe {
2543 let (ptr, alloc) = self.internal_into_inner_with_allocator();
2544 Arc::from_inner_in(ptr.cast(), alloc)
2545 }
2546 }
2547}
2548
2549impl<T> Weak<T> {
2550 /// Constructs a new `Weak<T>`, without allocating any memory.
2551 /// Calling [`upgrade`] on the return value always gives [`None`].
2552 ///
2553 /// [`upgrade`]: Weak::upgrade
2554 ///
2555 /// # Examples
2556 ///
2557 /// ```
2558 /// use std::sync::Weak;
2559 ///
2560 /// let empty: Weak<i64> = Weak::new();
2561 /// assert!(empty.upgrade().is_none());
2562 /// ```
2563 #[inline]
2564 #[stable(feature = "downgraded_weak", since = "1.10.0")]
2565 #[rustc_const_stable(feature = "const_weak_new", since = "1.73.0")]
2566 #[must_use]
2567 pub const fn new() -> Weak<T> {
2568 Weak {
2569 ptr: unsafe {
2570 NonNull::new_unchecked(ptr::without_provenance_mut::<ArcInner<T>>(usize::MAX))
2571 },
2572 alloc: Global,
2573 }
2574 }
2575}
2576
2577impl<T, A: Allocator> Weak<T, A> {
2578 /// Constructs a new `Weak<T, A>`, without allocating any memory, technically in the provided
2579 /// allocator.
2580 /// Calling [`upgrade`] on the return value always gives [`None`].
2581 ///
2582 /// [`upgrade`]: Weak::upgrade
2583 ///
2584 /// # Examples
2585 ///
2586 /// ```
2587 /// #![feature(allocator_api)]
2588 ///
2589 /// use std::sync::Weak;
2590 /// use std::alloc::System;
2591 ///
2592 /// let empty: Weak<i64, _> = Weak::new_in(System);
2593 /// assert!(empty.upgrade().is_none());
2594 /// ```
2595 #[inline]
2596 #[unstable(feature = "allocator_api", issue = "32838")]
2597 pub fn new_in(alloc: A) -> Weak<T, A> {
2598 Weak {
2599 ptr: unsafe {
2600 NonNull::new_unchecked(ptr::without_provenance_mut::<ArcInner<T>>(usize::MAX))
2601 },
2602 alloc,
2603 }
2604 }
2605}
2606
2607/// Helper type to allow accessing the reference counts without
2608/// making any assertions about the data field.
2609struct WeakInner<'a> {
2610 weak: &'a atomic::AtomicUsize,
2611 strong: &'a atomic::AtomicUsize,
2612}
2613
2614impl<T: ?Sized> Weak<T> {
2615 /// Converts a raw pointer previously created by [`into_raw`] back into `Weak<T>`.
2616 ///
2617 /// This can be used to safely get a strong reference (by calling [`upgrade`]
2618 /// later) or to deallocate the weak count by dropping the `Weak<T>`.
2619 ///
2620 /// It takes ownership of one weak reference (with the exception of pointers created by [`new`],
2621 /// as these don't own anything; the method still works on them).
2622 ///
2623 /// # Safety
2624 ///
2625 /// The pointer must have originated from the [`into_raw`] and must still own its potential
2626 /// weak reference.
2627 ///
2628 /// It is allowed for the strong count to be 0 at the time of calling this. Nevertheless, this
2629 /// takes ownership of one weak reference currently represented as a raw pointer (the weak
2630 /// count is not modified by this operation) and therefore it must be paired with a previous
2631 /// call to [`into_raw`].
2632 /// # Examples
2633 ///
2634 /// ```
2635 /// use std::sync::{Arc, Weak};
2636 ///
2637 /// let strong = Arc::new("hello".to_owned());
2638 ///
2639 /// let raw_1 = Arc::downgrade(&strong).into_raw();
2640 /// let raw_2 = Arc::downgrade(&strong).into_raw();
2641 ///
2642 /// assert_eq!(2, Arc::weak_count(&strong));
2643 ///
2644 /// assert_eq!("hello", &*unsafe { Weak::from_raw(raw_1) }.upgrade().unwrap());
2645 /// assert_eq!(1, Arc::weak_count(&strong));
2646 ///
2647 /// drop(strong);
2648 ///
2649 /// // Decrement the last weak count.
2650 /// assert!(unsafe { Weak::from_raw(raw_2) }.upgrade().is_none());
2651 /// ```
2652 ///
2653 /// [`new`]: Weak::new
2654 /// [`into_raw`]: Weak::into_raw
2655 /// [`upgrade`]: Weak::upgrade
2656 #[inline]
2657 #[stable(feature = "weak_into_raw", since = "1.45.0")]
2658 pub unsafe fn from_raw(ptr: *const T) -> Self {
2659 unsafe { Weak::from_raw_in(ptr, Global) }
2660 }
2661}
2662
2663impl<T: ?Sized, A: Allocator> Weak<T, A> {
2664 /// Returns a raw pointer to the object `T` pointed to by this `Weak<T>`.
2665 ///
2666 /// The pointer is valid only if there are some strong references. The pointer may be dangling,
2667 /// unaligned or even [`null`] otherwise.
2668 ///
2669 /// # Examples
2670 ///
2671 /// ```
2672 /// use std::sync::Arc;
2673 /// use std::ptr;
2674 ///
2675 /// let strong = Arc::new("hello".to_owned());
2676 /// let weak = Arc::downgrade(&strong);
2677 /// // Both point to the same object
2678 /// assert!(ptr::eq(&*strong, weak.as_ptr()));
2679 /// // The strong here keeps it alive, so we can still access the object.
2680 /// assert_eq!("hello", unsafe { &*weak.as_ptr() });
2681 ///
2682 /// drop(strong);
2683 /// // But not any more. We can do weak.as_ptr(), but accessing the pointer would lead to
2684 /// // undefined behaviour.
2685 /// // assert_eq!("hello", unsafe { &*weak.as_ptr() });
2686 /// ```
2687 ///
2688 /// [`null`]: core::ptr::null "ptr::null"
2689 #[must_use]
2690 #[stable(feature = "weak_into_raw", since = "1.45.0")]
2691 pub fn as_ptr(&self) -> *const T {
2692 let ptr: *mut ArcInner<T> = NonNull::as_ptr(self.ptr);
2693
2694 if is_dangling(ptr) {
2695 // If the pointer is dangling, we return the sentinel directly. This cannot be
2696 // a valid payload address, as the payload is at least as aligned as ArcInner (usize).
2697 ptr as *const T
2698 } else {
2699 // SAFETY: if is_dangling returns false, then the pointer is dereferenceable.
2700 // The payload may be dropped at this point, and we have to maintain provenance,
2701 // so use raw pointer manipulation.
2702 unsafe { ptr::addr_of_mut!((*ptr).data) }
2703 }
2704 }
2705
2706 /// Consumes the `Weak<T>` and turns it into a raw pointer.
2707 ///
2708 /// This converts the weak pointer into a raw pointer, while still preserving the ownership of
2709 /// one weak reference (the weak count is not modified by this operation). It can be turned
2710 /// back into the `Weak<T>` with [`from_raw`].
2711 ///
2712 /// The same restrictions of accessing the target of the pointer as with
2713 /// [`as_ptr`] apply.
2714 ///
2715 /// # Examples
2716 ///
2717 /// ```
2718 /// use std::sync::{Arc, Weak};
2719 ///
2720 /// let strong = Arc::new("hello".to_owned());
2721 /// let weak = Arc::downgrade(&strong);
2722 /// let raw = weak.into_raw();
2723 ///
2724 /// assert_eq!(1, Arc::weak_count(&strong));
2725 /// assert_eq!("hello", unsafe { &*raw });
2726 ///
2727 /// drop(unsafe { Weak::from_raw(raw) });
2728 /// assert_eq!(0, Arc::weak_count(&strong));
2729 /// ```
2730 ///
2731 /// [`from_raw`]: Weak::from_raw
2732 /// [`as_ptr`]: Weak::as_ptr
2733 #[must_use = "losing the pointer will leak memory"]
2734 #[stable(feature = "weak_into_raw", since = "1.45.0")]
2735 pub fn into_raw(self) -> *const T {
2736 let result = self.as_ptr();
2737 mem::forget(self);
2738 result
2739 }
2740
2741 /// Converts a raw pointer previously created by [`into_raw`] back into `Weak<T>` in the provided
2742 /// allocator.
2743 ///
2744 /// This can be used to safely get a strong reference (by calling [`upgrade`]
2745 /// later) or to deallocate the weak count by dropping the `Weak<T>`.
2746 ///
2747 /// It takes ownership of one weak reference (with the exception of pointers created by [`new`],
2748 /// as these don't own anything; the method still works on them).
2749 ///
2750 /// # Safety
2751 ///
2752 /// The pointer must have originated from the [`into_raw`] and must still own its potential
2753 /// weak reference, and must point to a block of memory allocated by `alloc`.
2754 ///
2755 /// It is allowed for the strong count to be 0 at the time of calling this. Nevertheless, this
2756 /// takes ownership of one weak reference currently represented as a raw pointer (the weak
2757 /// count is not modified by this operation) and therefore it must be paired with a previous
2758 /// call to [`into_raw`].
2759 /// # Examples
2760 ///
2761 /// ```
2762 /// use std::sync::{Arc, Weak};
2763 ///
2764 /// let strong = Arc::new("hello".to_owned());
2765 ///
2766 /// let raw_1 = Arc::downgrade(&strong).into_raw();
2767 /// let raw_2 = Arc::downgrade(&strong).into_raw();
2768 ///
2769 /// assert_eq!(2, Arc::weak_count(&strong));
2770 ///
2771 /// assert_eq!("hello", &*unsafe { Weak::from_raw(raw_1) }.upgrade().unwrap());
2772 /// assert_eq!(1, Arc::weak_count(&strong));
2773 ///
2774 /// drop(strong);
2775 ///
2776 /// // Decrement the last weak count.
2777 /// assert!(unsafe { Weak::from_raw(raw_2) }.upgrade().is_none());
2778 /// ```
2779 ///
2780 /// [`new`]: Weak::new
2781 /// [`into_raw`]: Weak::into_raw
2782 /// [`upgrade`]: Weak::upgrade
2783 #[inline]
2784 #[unstable(feature = "allocator_api", issue = "32838")]
2785 pub unsafe fn from_raw_in(ptr: *const T, alloc: A) -> Self {
2786 // See Weak::as_ptr for context on how the input pointer is derived.
2787
2788 let ptr = if is_dangling(ptr) {
2789 // This is a dangling Weak.
2790 ptr as *mut ArcInner<T>
2791 } else {
2792 // Otherwise, we're guaranteed the pointer came from a nondangling Weak.
2793 // SAFETY: data_offset is safe to call, as ptr references a real (potentially dropped) T.
2794 let offset = unsafe { data_offset(ptr) };
2795 // Thus, we reverse the offset to get the whole RcBox.
2796 // SAFETY: the pointer originated from a Weak, so this offset is safe.
2797 unsafe { ptr.byte_sub(offset) as *mut ArcInner<T> }
2798 };
2799
2800 // SAFETY: we now have recovered the original Weak pointer, so can create the Weak.
2801 Weak { ptr: unsafe { NonNull::new_unchecked(ptr) }, alloc }
2802 }
2803}
2804
2805impl<T: ?Sized, A: Allocator> Weak<T, A> {
2806 /// Attempts to upgrade the `Weak` pointer to an [`Arc`], delaying
2807 /// dropping of the inner value if successful.
2808 ///
2809 /// Returns [`None`] if the inner value has since been dropped.
2810 ///
2811 /// # Examples
2812 ///
2813 /// ```
2814 /// use std::sync::Arc;
2815 ///
2816 /// let five = Arc::new(5);
2817 ///
2818 /// let weak_five = Arc::downgrade(&five);
2819 ///
2820 /// let strong_five: Option<Arc<_>> = weak_five.upgrade();
2821 /// assert!(strong_five.is_some());
2822 ///
2823 /// // Destroy all strong pointers.
2824 /// drop(strong_five);
2825 /// drop(five);
2826 ///
2827 /// assert!(weak_five.upgrade().is_none());
2828 /// ```
2829 #[must_use = "this returns a new `Arc`, \
2830 without modifying the original weak pointer"]
2831 #[stable(feature = "arc_weak", since = "1.4.0")]
2832 pub fn upgrade(&self) -> Option<Arc<T, A>>
2833 where
2834 A: Clone,
2835 {
2836 #[inline]
2837 fn checked_increment(n: usize) -> Option<usize> {
2838 // Any write of 0 we can observe leaves the field in permanently zero state.
2839 if n == 0 {
2840 return None;
2841 }
2842 // See comments in `Arc::clone` for why we do this (for `mem::forget`).
2843 assert!(n <= MAX_REFCOUNT, "{}", INTERNAL_OVERFLOW_ERROR);
2844 Some(n + 1)
2845 }
2846
2847 // We use a CAS loop to increment the strong count instead of a
2848 // fetch_add as this function should never take the reference count
2849 // from zero to one.
2850 //
2851 // Relaxed is fine for the failure case because we don't have any expectations about the new state.
2852 // Acquire is necessary for the success case to synchronise with `Arc::new_cyclic`, when the inner
2853 // value can be initialized after `Weak` references have already been created. In that case, we
2854 // expect to observe the fully initialized value.
2855 if self.inner()?.strong.fetch_update(Acquire, Relaxed, checked_increment).is_ok() {
2856 // SAFETY: pointer is not null, verified in checked_increment
2857 unsafe { Some(Arc::from_inner_in(self.ptr, self.alloc.clone())) }
2858 } else {
2859 None
2860 }
2861 }
2862
2863 /// Gets the number of strong (`Arc`) pointers pointing to this allocation.
2864 ///
2865 /// If `self` was created using [`Weak::new`], this will return 0.
2866 #[must_use]
2867 #[stable(feature = "weak_counts", since = "1.41.0")]
2868 pub fn strong_count(&self) -> usize {
2869 if let Some(inner) = self.inner() { inner.strong.load(Relaxed) } else { 0 }
2870 }
2871
2872 /// Gets an approximation of the number of `Weak` pointers pointing to this
2873 /// allocation.
2874 ///
2875 /// If `self` was created using [`Weak::new`], or if there are no remaining
2876 /// strong pointers, this will return 0.
2877 ///
2878 /// # Accuracy
2879 ///
2880 /// Due to implementation details, the returned value can be off by 1 in
2881 /// either direction when other threads are manipulating any `Arc`s or
2882 /// `Weak`s pointing to the same allocation.
2883 #[must_use]
2884 #[stable(feature = "weak_counts", since = "1.41.0")]
2885 pub fn weak_count(&self) -> usize {
2886 if let Some(inner) = self.inner() {
2887 let weak = inner.weak.load(Acquire);
2888 let strong = inner.strong.load(Relaxed);
2889 if strong == 0 {
2890 0
2891 } else {
2892 // Since we observed that there was at least one strong pointer
2893 // after reading the weak count, we know that the implicit weak
2894 // reference (present whenever any strong references are alive)
2895 // was still around when we observed the weak count, and can
2896 // therefore safely subtract it.
2897 weak - 1
2898 }
2899 } else {
2900 0
2901 }
2902 }
2903
2904 /// Returns `None` when the pointer is dangling and there is no allocated `ArcInner`,
2905 /// (i.e., when this `Weak` was created by `Weak::new`).
2906 #[inline]
2907 fn inner(&self) -> Option<WeakInner<'_>> {
2908 let ptr = self.ptr.as_ptr();
2909 if is_dangling(ptr) {
2910 None
2911 } else {
2912 // We are careful to *not* create a reference covering the "data" field, as
2913 // the field may be mutated concurrently (for example, if the last `Arc`
2914 // is dropped, the data field will be dropped in-place).
2915 Some(unsafe { WeakInner { strong: &(*ptr).strong, weak: &(*ptr).weak } })
2916 }
2917 }
2918
2919 /// Returns `true` if the two `Weak`s point to the same allocation similar to [`ptr::eq`], or if
2920 /// both don't point to any allocation (because they were created with `Weak::new()`). However,
2921 /// this function ignores the metadata of `dyn Trait` pointers.
2922 ///
2923 /// # Notes
2924 ///
2925 /// Since this compares pointers it means that `Weak::new()` will equal each
2926 /// other, even though they don't point to any allocation.
2927 ///
2928 /// # Examples
2929 ///
2930 /// ```
2931 /// use std::sync::Arc;
2932 ///
2933 /// let first_rc = Arc::new(5);
2934 /// let first = Arc::downgrade(&first_rc);
2935 /// let second = Arc::downgrade(&first_rc);
2936 ///
2937 /// assert!(first.ptr_eq(&second));
2938 ///
2939 /// let third_rc = Arc::new(5);
2940 /// let third = Arc::downgrade(&third_rc);
2941 ///
2942 /// assert!(!first.ptr_eq(&third));
2943 /// ```
2944 ///
2945 /// Comparing `Weak::new`.
2946 ///
2947 /// ```
2948 /// use std::sync::{Arc, Weak};
2949 ///
2950 /// let first = Weak::new();
2951 /// let second = Weak::new();
2952 /// assert!(first.ptr_eq(&second));
2953 ///
2954 /// let third_rc = Arc::new(());
2955 /// let third = Arc::downgrade(&third_rc);
2956 /// assert!(!first.ptr_eq(&third));
2957 /// ```
2958 ///
2959 /// [`ptr::eq`]: core::ptr::eq "ptr::eq"
2960 #[inline]
2961 #[must_use]
2962 #[stable(feature = "weak_ptr_eq", since = "1.39.0")]
2963 pub fn ptr_eq(&self, other: &Self) -> bool {
2964 ptr::addr_eq(self.ptr.as_ptr(), other.ptr.as_ptr())
2965 }
2966}
2967
2968#[stable(feature = "arc_weak", since = "1.4.0")]
2969impl<T: ?Sized, A: Allocator + Clone> Clone for Weak<T, A> {
2970 /// Makes a clone of the `Weak` pointer that points to the same allocation.
2971 ///
2972 /// # Examples
2973 ///
2974 /// ```
2975 /// use std::sync::{Arc, Weak};
2976 ///
2977 /// let weak_five = Arc::downgrade(&Arc::new(5));
2978 ///
2979 /// let _ = Weak::clone(&weak_five);
2980 /// ```
2981 #[inline]
2982 fn clone(&self) -> Weak<T, A> {
2983 if let Some(inner) = self.inner() {
2984 // See comments in Arc::clone() for why this is relaxed. This can use a
2985 // fetch_add (ignoring the lock) because the weak count is only locked
2986 // where are *no other* weak pointers in existence. (So we can't be
2987 // running this code in that case).
2988 let old_size = inner.weak.fetch_add(1, Relaxed);
2989
2990 // See comments in Arc::clone() for why we do this (for mem::forget).
2991 if old_size > MAX_REFCOUNT {
2992 abort();
2993 }
2994 }
2995
2996 Weak { ptr: self.ptr, alloc: self.alloc.clone() }
2997 }
2998}
2999
3000#[stable(feature = "downgraded_weak", since = "1.10.0")]
3001impl<T> Default for Weak<T> {
3002 /// Constructs a new `Weak<T>`, without allocating memory.
3003 /// Calling [`upgrade`] on the return value always
3004 /// gives [`None`].
3005 ///
3006 /// [`upgrade`]: Weak::upgrade
3007 ///
3008 /// # Examples
3009 ///
3010 /// ```
3011 /// use std::sync::Weak;
3012 ///
3013 /// let empty: Weak<i64> = Default::default();
3014 /// assert!(empty.upgrade().is_none());
3015 /// ```
3016 fn default() -> Weak<T> {
3017 Weak::new()
3018 }
3019}
3020
3021#[stable(feature = "arc_weak", since = "1.4.0")]
3022unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for Weak<T, A> {
3023 /// Drops the `Weak` pointer.
3024 ///
3025 /// # Examples
3026 ///
3027 /// ```
3028 /// use std::sync::{Arc, Weak};
3029 ///
3030 /// struct Foo;
3031 ///
3032 /// impl Drop for Foo {
3033 /// fn drop(&mut self) {
3034 /// println!("dropped!");
3035 /// }
3036 /// }
3037 ///
3038 /// let foo = Arc::new(Foo);
3039 /// let weak_foo = Arc::downgrade(&foo);
3040 /// let other_weak_foo = Weak::clone(&weak_foo);
3041 ///
3042 /// drop(weak_foo); // Doesn't print anything
3043 /// drop(foo); // Prints "dropped!"
3044 ///
3045 /// assert!(other_weak_foo.upgrade().is_none());
3046 /// ```
3047 fn drop(&mut self) {
3048 // If we find out that we were the last weak pointer, then its time to
3049 // deallocate the data entirely. See the discussion in Arc::drop() about
3050 // the memory orderings
3051 //
3052 // It's not necessary to check for the locked state here, because the
3053 // weak count can only be locked if there was precisely one weak ref,
3054 // meaning that drop could only subsequently run ON that remaining weak
3055 // ref, which can only happen after the lock is released.
3056 let inner = if let Some(inner) = self.inner() { inner } else { return };
3057
3058 if inner.weak.fetch_sub(1, Release) == 1 {
3059 acquire!(inner.weak);
3060 unsafe {
3061 self.alloc.deallocate(self.ptr.cast(), Layout::for_value_raw(self.ptr.as_ptr()))
3062 }
3063 }
3064 }
3065}
3066
3067#[stable(feature = "rust1", since = "1.0.0")]
3068trait ArcEqIdent<T: ?Sized + PartialEq, A: Allocator> {
3069 fn eq(&self, other: &Arc<T, A>) -> bool;
3070 fn ne(&self, other: &Arc<T, A>) -> bool;
3071}
3072
3073#[stable(feature = "rust1", since = "1.0.0")]
3074impl<T: ?Sized + PartialEq, A: Allocator> ArcEqIdent<T, A> for Arc<T, A> {
3075 #[inline]
3076 default fn eq(&self, other: &Arc<T, A>) -> bool {
3077 **self == **other
3078 }
3079 #[inline]
3080 default fn ne(&self, other: &Arc<T, A>) -> bool {
3081 **self != **other
3082 }
3083}
3084
3085/// We're doing this specialization here, and not as a more general optimization on `&T`, because it
3086/// would otherwise add a cost to all equality checks on refs. We assume that `Arc`s are used to
3087/// store large values, that are slow to clone, but also heavy to check for equality, causing this
3088/// cost to pay off more easily. It's also more likely to have two `Arc` clones, that point to
3089/// the same value, than two `&T`s.
3090///
3091/// We can only do this when `T: Eq` as a `PartialEq` might be deliberately irreflexive.
3092#[stable(feature = "rust1", since = "1.0.0")]
3093impl<T: ?Sized + crate::rc::MarkerEq, A: Allocator> ArcEqIdent<T, A> for Arc<T, A> {
3094 #[inline]
3095 fn eq(&self, other: &Arc<T, A>) -> bool {
3096 Arc::ptr_eq(self, other) || **self == **other
3097 }
3098
3099 #[inline]
3100 fn ne(&self, other: &Arc<T, A>) -> bool {
3101 !Arc::ptr_eq(self, other) && **self != **other
3102 }
3103}
3104
3105#[stable(feature = "rust1", since = "1.0.0")]
3106impl<T: ?Sized + PartialEq, A: Allocator> PartialEq for Arc<T, A> {
3107 /// Equality for two `Arc`s.
3108 ///
3109 /// Two `Arc`s are equal if their inner values are equal, even if they are
3110 /// stored in different allocation.
3111 ///
3112 /// If `T` also implements `Eq` (implying reflexivity of equality),
3113 /// two `Arc`s that point to the same allocation are always equal.
3114 ///
3115 /// # Examples
3116 ///
3117 /// ```
3118 /// use std::sync::Arc;
3119 ///
3120 /// let five = Arc::new(5);
3121 ///
3122 /// assert!(five == Arc::new(5));
3123 /// ```
3124 #[inline]
3125 fn eq(&self, other: &Arc<T, A>) -> bool {
3126 ArcEqIdent::eq(self, other)
3127 }
3128
3129 /// Inequality for two `Arc`s.
3130 ///
3131 /// Two `Arc`s are not equal if their inner values are not equal.
3132 ///
3133 /// If `T` also implements `Eq` (implying reflexivity of equality),
3134 /// two `Arc`s that point to the same value are always equal.
3135 ///
3136 /// # Examples
3137 ///
3138 /// ```
3139 /// use std::sync::Arc;
3140 ///
3141 /// let five = Arc::new(5);
3142 ///
3143 /// assert!(five != Arc::new(6));
3144 /// ```
3145 #[inline]
3146 fn ne(&self, other: &Arc<T, A>) -> bool {
3147 ArcEqIdent::ne(self, other)
3148 }
3149}
3150
3151#[stable(feature = "rust1", since = "1.0.0")]
3152impl<T: ?Sized + PartialOrd, A: Allocator> PartialOrd for Arc<T, A> {
3153 /// Partial comparison for two `Arc`s.
3154 ///
3155 /// The two are compared by calling `partial_cmp()` on their inner values.
3156 ///
3157 /// # Examples
3158 ///
3159 /// ```
3160 /// use std::sync::Arc;
3161 /// use std::cmp::Ordering;
3162 ///
3163 /// let five = Arc::new(5);
3164 ///
3165 /// assert_eq!(Some(Ordering::Less), five.partial_cmp(&Arc::new(6)));
3166 /// ```
3167 fn partial_cmp(&self, other: &Arc<T, A>) -> Option<Ordering> {
3168 (**self).partial_cmp(&**other)
3169 }
3170
3171 /// Less-than comparison for two `Arc`s.
3172 ///
3173 /// The two are compared by calling `<` on their inner values.
3174 ///
3175 /// # Examples
3176 ///
3177 /// ```
3178 /// use std::sync::Arc;
3179 ///
3180 /// let five = Arc::new(5);
3181 ///
3182 /// assert!(five < Arc::new(6));
3183 /// ```
3184 fn lt(&self, other: &Arc<T, A>) -> bool {
3185 *(*self) < *(*other)
3186 }
3187
3188 /// 'Less than or equal to' comparison for two `Arc`s.
3189 ///
3190 /// The two are compared by calling `<=` on their inner values.
3191 ///
3192 /// # Examples
3193 ///
3194 /// ```
3195 /// use std::sync::Arc;
3196 ///
3197 /// let five = Arc::new(5);
3198 ///
3199 /// assert!(five <= Arc::new(5));
3200 /// ```
3201 fn le(&self, other: &Arc<T, A>) -> bool {
3202 *(*self) <= *(*other)
3203 }
3204
3205 /// Greater-than comparison for two `Arc`s.
3206 ///
3207 /// The two are compared by calling `>` on their inner values.
3208 ///
3209 /// # Examples
3210 ///
3211 /// ```
3212 /// use std::sync::Arc;
3213 ///
3214 /// let five = Arc::new(5);
3215 ///
3216 /// assert!(five > Arc::new(4));
3217 /// ```
3218 fn gt(&self, other: &Arc<T, A>) -> bool {
3219 *(*self) > *(*other)
3220 }
3221
3222 /// 'Greater than or equal to' comparison for two `Arc`s.
3223 ///
3224 /// The two are compared by calling `>=` on their inner values.
3225 ///
3226 /// # Examples
3227 ///
3228 /// ```
3229 /// use std::sync::Arc;
3230 ///
3231 /// let five = Arc::new(5);
3232 ///
3233 /// assert!(five >= Arc::new(5));
3234 /// ```
3235 fn ge(&self, other: &Arc<T, A>) -> bool {
3236 *(*self) >= *(*other)
3237 }
3238}
3239#[stable(feature = "rust1", since = "1.0.0")]
3240impl<T: ?Sized + Ord, A: Allocator> Ord for Arc<T, A> {
3241 /// Comparison for two `Arc`s.
3242 ///
3243 /// The two are compared by calling `cmp()` on their inner values.
3244 ///
3245 /// # Examples
3246 ///
3247 /// ```
3248 /// use std::sync::Arc;
3249 /// use std::cmp::Ordering;
3250 ///
3251 /// let five = Arc::new(5);
3252 ///
3253 /// assert_eq!(Ordering::Less, five.cmp(&Arc::new(6)));
3254 /// ```
3255 fn cmp(&self, other: &Arc<T, A>) -> Ordering {
3256 (**self).cmp(&**other)
3257 }
3258}
3259#[stable(feature = "rust1", since = "1.0.0")]
3260impl<T: ?Sized + Eq, A: Allocator> Eq for Arc<T, A> {}
3261
3262#[stable(feature = "rust1", since = "1.0.0")]
3263impl<T: ?Sized + fmt::Display, A: Allocator> fmt::Display for Arc<T, A> {
3264 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3265 fmt::Display::fmt(&**self, f)
3266 }
3267}
3268
3269#[stable(feature = "rust1", since = "1.0.0")]
3270impl<T: ?Sized + fmt::Debug, A: Allocator> fmt::Debug for Arc<T, A> {
3271 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3272 fmt::Debug::fmt(&**self, f)
3273 }
3274}
3275
3276#[stable(feature = "rust1", since = "1.0.0")]
3277impl<T: ?Sized, A: Allocator> fmt::Pointer for Arc<T, A> {
3278 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3279 fmt::Pointer::fmt(&core::ptr::addr_of!(**self), f)
3280 }
3281}
3282
3283#[cfg(not(no_global_oom_handling))]
3284#[stable(feature = "rust1", since = "1.0.0")]
3285impl<T: Default> Default for Arc<T> {
3286 /// Creates a new `Arc<T>`, with the `Default` value for `T`.
3287 ///
3288 /// # Examples
3289 ///
3290 /// ```
3291 /// use std::sync::Arc;
3292 ///
3293 /// let x: Arc<i32> = Default::default();
3294 /// assert_eq!(*x, 0);
3295 /// ```
3296 fn default() -> Arc<T> {
3297 Arc::new(data:Default::default())
3298 }
3299}
3300
3301#[stable(feature = "rust1", since = "1.0.0")]
3302impl<T: ?Sized + Hash, A: Allocator> Hash for Arc<T, A> {
3303 fn hash<H: Hasher>(&self, state: &mut H) {
3304 (**self).hash(state)
3305 }
3306}
3307
3308#[cfg(not(no_global_oom_handling))]
3309#[stable(feature = "from_for_ptrs", since = "1.6.0")]
3310impl<T> From<T> for Arc<T> {
3311 /// Converts a `T` into an `Arc<T>`
3312 ///
3313 /// The conversion moves the value into a
3314 /// newly allocated `Arc`. It is equivalent to
3315 /// calling `Arc::new(t)`.
3316 ///
3317 /// # Example
3318 /// ```rust
3319 /// # use std::sync::Arc;
3320 /// let x = 5;
3321 /// let arc = Arc::new(5);
3322 ///
3323 /// assert_eq!(Arc::from(x), arc);
3324 /// ```
3325 fn from(t: T) -> Self {
3326 Arc::new(data:t)
3327 }
3328}
3329
3330#[cfg(not(no_global_oom_handling))]
3331#[stable(feature = "shared_from_array", since = "1.74.0")]
3332impl<T, const N: usize> From<[T; N]> for Arc<[T]> {
3333 /// Converts a [`[T; N]`](prim@array) into an `Arc<[T]>`.
3334 ///
3335 /// The conversion moves the array into a newly allocated `Arc`.
3336 ///
3337 /// # Example
3338 ///
3339 /// ```
3340 /// # use std::sync::Arc;
3341 /// let original: [i32; 3] = [1, 2, 3];
3342 /// let shared: Arc<[i32]> = Arc::from(original);
3343 /// assert_eq!(&[1, 2, 3], &shared[..]);
3344 /// ```
3345 #[inline]
3346 fn from(v: [T; N]) -> Arc<[T]> {
3347 Arc::<[T; N]>::from(v)
3348 }
3349}
3350
3351#[cfg(not(no_global_oom_handling))]
3352#[stable(feature = "shared_from_slice", since = "1.21.0")]
3353impl<T: Clone> From<&[T]> for Arc<[T]> {
3354 /// Allocate a reference-counted slice and fill it by cloning `v`'s items.
3355 ///
3356 /// # Example
3357 ///
3358 /// ```
3359 /// # use std::sync::Arc;
3360 /// let original: &[i32] = &[1, 2, 3];
3361 /// let shared: Arc<[i32]> = Arc::from(original);
3362 /// assert_eq!(&[1, 2, 3], &shared[..]);
3363 /// ```
3364 #[inline]
3365 fn from(v: &[T]) -> Arc<[T]> {
3366 <Self as ArcFromSlice<T>>::from_slice(v)
3367 }
3368}
3369
3370#[cfg(not(no_global_oom_handling))]
3371#[stable(feature = "shared_from_slice", since = "1.21.0")]
3372impl From<&str> for Arc<str> {
3373 /// Allocate a reference-counted `str` and copy `v` into it.
3374 ///
3375 /// # Example
3376 ///
3377 /// ```
3378 /// # use std::sync::Arc;
3379 /// let shared: Arc<str> = Arc::from("eggplant");
3380 /// assert_eq!("eggplant", &shared[..]);
3381 /// ```
3382 #[inline]
3383 fn from(v: &str) -> Arc<str> {
3384 let arc: Arc<[u8]> = Arc::<[u8]>::from(v.as_bytes());
3385 unsafe { Arc::from_raw(ptr:Arc::into_raw(this:arc) as *const str) }
3386 }
3387}
3388
3389#[cfg(not(no_global_oom_handling))]
3390#[stable(feature = "shared_from_slice", since = "1.21.0")]
3391impl From<String> for Arc<str> {
3392 /// Allocate a reference-counted `str` and copy `v` into it.
3393 ///
3394 /// # Example
3395 ///
3396 /// ```
3397 /// # use std::sync::Arc;
3398 /// let unique: String = "eggplant".to_owned();
3399 /// let shared: Arc<str> = Arc::from(unique);
3400 /// assert_eq!("eggplant", &shared[..]);
3401 /// ```
3402 #[inline]
3403 fn from(v: String) -> Arc<str> {
3404 Arc::from(&v[..])
3405 }
3406}
3407
3408#[cfg(not(no_global_oom_handling))]
3409#[stable(feature = "shared_from_slice", since = "1.21.0")]
3410impl<T: ?Sized, A: Allocator> From<Box<T, A>> for Arc<T, A> {
3411 /// Move a boxed object to a new, reference-counted allocation.
3412 ///
3413 /// # Example
3414 ///
3415 /// ```
3416 /// # use std::sync::Arc;
3417 /// let unique: Box<str> = Box::from("eggplant");
3418 /// let shared: Arc<str> = Arc::from(unique);
3419 /// assert_eq!("eggplant", &shared[..]);
3420 /// ```
3421 #[inline]
3422 fn from(v: Box<T, A>) -> Arc<T, A> {
3423 Arc::from_box_in(src:v)
3424 }
3425}
3426
3427#[cfg(not(no_global_oom_handling))]
3428#[stable(feature = "shared_from_slice", since = "1.21.0")]
3429impl<T, A: Allocator + Clone> From<Vec<T, A>> for Arc<[T], A> {
3430 /// Allocate a reference-counted slice and move `v`'s items into it.
3431 ///
3432 /// # Example
3433 ///
3434 /// ```
3435 /// # use std::sync::Arc;
3436 /// let unique: Vec<i32> = vec![1, 2, 3];
3437 /// let shared: Arc<[i32]> = Arc::from(unique);
3438 /// assert_eq!(&[1, 2, 3], &shared[..]);
3439 /// ```
3440 #[inline]
3441 fn from(v: Vec<T, A>) -> Arc<[T], A> {
3442 unsafe {
3443 let (vec_ptr, len, cap, alloc) = v.into_raw_parts_with_alloc();
3444
3445 let rc_ptr = Self::allocate_for_slice_in(len, &alloc);
3446 ptr::copy_nonoverlapping(vec_ptr, ptr::addr_of_mut!((*rc_ptr).data) as *mut T, len);
3447
3448 // Create a `Vec<T, &A>` with length 0, to deallocate the buffer
3449 // without dropping its contents or the allocator
3450 let _ = Vec::from_raw_parts_in(vec_ptr, 0, cap, &alloc);
3451
3452 Self::from_ptr_in(rc_ptr, alloc)
3453 }
3454 }
3455}
3456
3457#[stable(feature = "shared_from_cow", since = "1.45.0")]
3458impl<'a, B> From<Cow<'a, B>> for Arc<B>
3459where
3460 B: ToOwned + ?Sized,
3461 Arc<B>: From<&'a B> + From<B::Owned>,
3462{
3463 /// Create an atomically reference-counted pointer from
3464 /// a clone-on-write pointer by copying its content.
3465 ///
3466 /// # Example
3467 ///
3468 /// ```rust
3469 /// # use std::sync::Arc;
3470 /// # use std::borrow::Cow;
3471 /// let cow: Cow<'_, str> = Cow::Borrowed("eggplant");
3472 /// let shared: Arc<str> = Arc::from(cow);
3473 /// assert_eq!("eggplant", &shared[..]);
3474 /// ```
3475 #[inline]
3476 fn from(cow: Cow<'a, B>) -> Arc<B> {
3477 match cow {
3478 Cow::Borrowed(s: &B) => Arc::from(s),
3479 Cow::Owned(s: ::Owned) => Arc::from(s),
3480 }
3481 }
3482}
3483
3484#[stable(feature = "shared_from_str", since = "1.62.0")]
3485impl From<Arc<str>> for Arc<[u8]> {
3486 /// Converts an atomically reference-counted string slice into a byte slice.
3487 ///
3488 /// # Example
3489 ///
3490 /// ```
3491 /// # use std::sync::Arc;
3492 /// let string: Arc<str> = Arc::from("eggplant");
3493 /// let bytes: Arc<[u8]> = Arc::from(string);
3494 /// assert_eq!("eggplant".as_bytes(), bytes.as_ref());
3495 /// ```
3496 #[inline]
3497 fn from(rc: Arc<str>) -> Self {
3498 // SAFETY: `str` has the same layout as `[u8]`.
3499 unsafe { Arc::from_raw(ptr:Arc::into_raw(this:rc) as *const [u8]) }
3500 }
3501}
3502
3503#[stable(feature = "boxed_slice_try_from", since = "1.43.0")]
3504impl<T, A: Allocator, const N: usize> TryFrom<Arc<[T], A>> for Arc<[T; N], A> {
3505 type Error = Arc<[T], A>;
3506
3507 fn try_from(boxed_slice: Arc<[T], A>) -> Result<Self, Self::Error> {
3508 if boxed_slice.len() == N {
3509 let (ptr: NonNull>, alloc: A) = boxed_slice.internal_into_inner_with_allocator();
3510 Ok(unsafe { Arc::from_inner_in(ptr:ptr.cast(), alloc) })
3511 } else {
3512 Err(boxed_slice)
3513 }
3514 }
3515}
3516
3517#[cfg(not(no_global_oom_handling))]
3518#[stable(feature = "shared_from_iter", since = "1.37.0")]
3519impl<T> FromIterator<T> for Arc<[T]> {
3520 /// Takes each element in the `Iterator` and collects it into an `Arc<[T]>`.
3521 ///
3522 /// # Performance characteristics
3523 ///
3524 /// ## The general case
3525 ///
3526 /// In the general case, collecting into `Arc<[T]>` is done by first
3527 /// collecting into a `Vec<T>`. That is, when writing the following:
3528 ///
3529 /// ```rust
3530 /// # use std::sync::Arc;
3531 /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0).collect();
3532 /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
3533 /// ```
3534 ///
3535 /// this behaves as if we wrote:
3536 ///
3537 /// ```rust
3538 /// # use std::sync::Arc;
3539 /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0)
3540 /// .collect::<Vec<_>>() // The first set of allocations happens here.
3541 /// .into(); // A second allocation for `Arc<[T]>` happens here.
3542 /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
3543 /// ```
3544 ///
3545 /// This will allocate as many times as needed for constructing the `Vec<T>`
3546 /// and then it will allocate once for turning the `Vec<T>` into the `Arc<[T]>`.
3547 ///
3548 /// ## Iterators of known length
3549 ///
3550 /// When your `Iterator` implements `TrustedLen` and is of an exact size,
3551 /// a single allocation will be made for the `Arc<[T]>`. For example:
3552 ///
3553 /// ```rust
3554 /// # use std::sync::Arc;
3555 /// let evens: Arc<[u8]> = (0..10).collect(); // Just a single allocation happens here.
3556 /// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>());
3557 /// ```
3558 fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
3559 ToArcSlice::to_arc_slice(iter.into_iter())
3560 }
3561}
3562
3563#[cfg(not(no_global_oom_handling))]
3564/// Specialization trait used for collecting into `Arc<[T]>`.
3565trait ToArcSlice<T>: Iterator<Item = T> + Sized {
3566 fn to_arc_slice(self) -> Arc<[T]>;
3567}
3568
3569#[cfg(not(no_global_oom_handling))]
3570impl<T, I: Iterator<Item = T>> ToArcSlice<T> for I {
3571 default fn to_arc_slice(self) -> Arc<[T]> {
3572 self.collect::<Vec<T>>().into()
3573 }
3574}
3575
3576#[cfg(not(no_global_oom_handling))]
3577impl<T, I: iter::TrustedLen<Item = T>> ToArcSlice<T> for I {
3578 fn to_arc_slice(self) -> Arc<[T]> {
3579 // This is the case for a `TrustedLen` iterator.
3580 let (low, high) = self.size_hint();
3581 if let Some(high) = high {
3582 debug_assert_eq!(
3583 low,
3584 high,
3585 "TrustedLen iterator's size hint is not exact: {:?}",
3586 (low, high)
3587 );
3588
3589 unsafe {
3590 // SAFETY: We need to ensure that the iterator has an exact length and we have.
3591 Arc::from_iter_exact(self, low)
3592 }
3593 } else {
3594 // TrustedLen contract guarantees that `upper_bound == None` implies an iterator
3595 // length exceeding `usize::MAX`.
3596 // The default implementation would collect into a vec which would panic.
3597 // Thus we panic here immediately without invoking `Vec` code.
3598 panic!("capacity overflow");
3599 }
3600 }
3601}
3602
3603#[stable(feature = "rust1", since = "1.0.0")]
3604impl<T: ?Sized, A: Allocator> borrow::Borrow<T> for Arc<T, A> {
3605 fn borrow(&self) -> &T {
3606 &**self
3607 }
3608}
3609
3610#[stable(since = "1.5.0", feature = "smart_ptr_as_ref")]
3611impl<T: ?Sized, A: Allocator> AsRef<T> for Arc<T, A> {
3612 fn as_ref(&self) -> &T {
3613 &**self
3614 }
3615}
3616
3617#[stable(feature = "pin", since = "1.33.0")]
3618impl<T: ?Sized, A: Allocator> Unpin for Arc<T, A> {}
3619
3620/// Get the offset within an `ArcInner` for the payload behind a pointer.
3621///
3622/// # Safety
3623///
3624/// The pointer must point to (and have valid metadata for) a previously
3625/// valid instance of T, but the T is allowed to be dropped.
3626unsafe fn data_offset<T: ?Sized>(ptr: *const T) -> usize {
3627 // Align the unsized value to the end of the ArcInner.
3628 // Because RcBox is repr(C), it will always be the last field in memory.
3629 // SAFETY: since the only unsized types possible are slices, trait objects,
3630 // and extern types, the input safety requirement is currently enough to
3631 // satisfy the requirements of align_of_val_raw; this is an implementation
3632 // detail of the language that must not be relied upon outside of std.
3633 unsafe { data_offset_align(align_of_val_raw(val:ptr)) }
3634}
3635
3636#[inline]
3637fn data_offset_align(align: usize) -> usize {
3638 let layout: Layout = Layout::new::<ArcInner<()>>();
3639 layout.size() + layout.padding_needed_for(align)
3640}
3641
3642#[stable(feature = "arc_error", since = "1.52.0")]
3643impl<T: core::error::Error + ?Sized> core::error::Error for Arc<T> {
3644 #[allow(deprecated, deprecated_in_future)]
3645 fn description(&self) -> &str {
3646 core::error::Error::description(&**self)
3647 }
3648
3649 #[allow(deprecated)]
3650 fn cause(&self) -> Option<&dyn core::error::Error> {
3651 core::error::Error::cause(&**self)
3652 }
3653
3654 fn source(&self) -> Option<&(dyn core::error::Error + 'static)> {
3655 core::error::Error::source(&**self)
3656 }
3657
3658 fn provide<'a>(&'a self, req: &mut core::error::Request<'a>) {
3659 core::error::Error::provide(&**self, request:req);
3660 }
3661}
3662