1#![stable(feature = "rust1", since = "1.0.0")]
2
3//! Thread-safe reference-counting pointers.
4//!
5//! See the [`Arc<T>`][Arc] documentation for more details.
6//!
7//! **Note**: This module is only available on platforms that support atomic
8//! loads and stores of pointers. This may be detected at compile time using
9//! `#[cfg(target_has_atomic = "ptr")]`.
10
11use core::any::Any;
12use core::borrow;
13use core::cmp::Ordering;
14use core::fmt;
15use core::hash::{Hash, Hasher};
16use core::hint;
17use core::intrinsics::abort;
18#[cfg(not(no_global_oom_handling))]
19use core::iter;
20use core::marker::{PhantomData, Unsize};
21#[cfg(not(no_global_oom_handling))]
22use core::mem::size_of_val;
23use core::mem::{self, align_of_val_raw};
24use core::ops::{CoerceUnsized, Deref, DispatchFromDyn, Receiver};
25use core::panic::{RefUnwindSafe, UnwindSafe};
26use core::pin::Pin;
27use core::ptr::{self, NonNull};
28#[cfg(not(no_global_oom_handling))]
29use core::slice::from_raw_parts_mut;
30use core::sync::atomic;
31use core::sync::atomic::Ordering::{Acquire, Relaxed, Release};
32
33#[cfg(not(no_global_oom_handling))]
34use crate::alloc::handle_alloc_error;
35#[cfg(not(no_global_oom_handling))]
36use crate::alloc::WriteCloneIntoRaw;
37use crate::alloc::{AllocError, Allocator, Global, Layout};
38use crate::borrow::{Cow, ToOwned};
39use crate::boxed::Box;
40use crate::rc::is_dangling;
41#[cfg(not(no_global_oom_handling))]
42use crate::string::String;
43#[cfg(not(no_global_oom_handling))]
44use crate::vec::Vec;
45
46#[cfg(test)]
47mod tests;
48
49/// A soft limit on the amount of references that may be made to an `Arc`.
50///
51/// Going above this limit will abort your program (although not
52/// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references.
53/// Trying to go above it might call a `panic` (if not actually going above it).
54///
55/// This is a global invariant, and also applies when using a compare-exchange loop.
56///
57/// See comment in `Arc::clone`.
58const MAX_REFCOUNT: usize = (isize::MAX) as usize;
59
60/// The error in case either counter reaches above `MAX_REFCOUNT`, and we can `panic` safely.
61const INTERNAL_OVERFLOW_ERROR: &str = "Arc counter overflow";
62
63#[cfg(not(sanitize = "thread"))]
64macro_rules! acquire {
65 ($x:expr) => {
66 atomic::fence(Acquire)
67 };
68}
69
70// ThreadSanitizer does not support memory fences. To avoid false positive
71// reports in Arc / Weak implementation use atomic loads for synchronization
72// instead.
73#[cfg(sanitize = "thread")]
74macro_rules! acquire {
75 ($x:expr) => {
76 $x.load(Acquire)
77 };
78}
79
80/// A thread-safe reference-counting pointer. 'Arc' stands for 'Atomically
81/// Reference Counted'.
82///
83/// The type `Arc<T>` provides shared ownership of a value of type `T`,
84/// allocated in the heap. Invoking [`clone`][clone] on `Arc` produces
85/// a new `Arc` instance, which points to the same allocation on the heap as the
86/// source `Arc`, while increasing a reference count. When the last `Arc`
87/// pointer to a given allocation is destroyed, the value stored in that allocation (often
88/// referred to as "inner value") is also dropped.
89///
90/// Shared references in Rust disallow mutation by default, and `Arc` is no
91/// exception: you cannot generally obtain a mutable reference to something
92/// inside an `Arc`. If you need to mutate through an `Arc`, use
93/// [`Mutex`][mutex], [`RwLock`][rwlock], or one of the [`Atomic`][atomic]
94/// types.
95///
96/// **Note**: This type is only available on platforms that support atomic
97/// loads and stores of pointers, which includes all platforms that support
98/// the `std` crate but not all those which only support [`alloc`](crate).
99/// This may be detected at compile time using `#[cfg(target_has_atomic = "ptr")]`.
100///
101/// ## Thread Safety
102///
103/// Unlike [`Rc<T>`], `Arc<T>` uses atomic operations for its reference
104/// counting. This means that it is thread-safe. The disadvantage is that
105/// atomic operations are more expensive than ordinary memory accesses. If you
106/// are not sharing reference-counted allocations between threads, consider using
107/// [`Rc<T>`] for lower overhead. [`Rc<T>`] is a safe default, because the
108/// compiler will catch any attempt to send an [`Rc<T>`] between threads.
109/// However, a library might choose `Arc<T>` in order to give library consumers
110/// more flexibility.
111///
112/// `Arc<T>` will implement [`Send`] and [`Sync`] as long as the `T` implements
113/// [`Send`] and [`Sync`]. Why can't you put a non-thread-safe type `T` in an
114/// `Arc<T>` to make it thread-safe? This may be a bit counter-intuitive at
115/// first: after all, isn't the point of `Arc<T>` thread safety? The key is
116/// this: `Arc<T>` makes it thread safe to have multiple ownership of the same
117/// data, but it doesn't add thread safety to its data. Consider
118/// <code>Arc<[RefCell\<T>]></code>. [`RefCell<T>`] isn't [`Sync`], and if `Arc<T>` was always
119/// [`Send`], <code>Arc<[RefCell\<T>]></code> would be as well. But then we'd have a problem:
120/// [`RefCell<T>`] is not thread safe; it keeps track of the borrowing count using
121/// non-atomic operations.
122///
123/// In the end, this means that you may need to pair `Arc<T>` with some sort of
124/// [`std::sync`] type, usually [`Mutex<T>`][mutex].
125///
126/// ## Breaking cycles with `Weak`
127///
128/// The [`downgrade`][downgrade] method can be used to create a non-owning
129/// [`Weak`] pointer. A [`Weak`] pointer can be [`upgrade`][upgrade]d
130/// to an `Arc`, but this will return [`None`] if the value stored in the allocation has
131/// already been dropped. In other words, `Weak` pointers do not keep the value
132/// inside the allocation alive; however, they *do* keep the allocation
133/// (the backing store for the value) alive.
134///
135/// A cycle between `Arc` pointers will never be deallocated. For this reason,
136/// [`Weak`] is used to break cycles. For example, a tree could have
137/// strong `Arc` pointers from parent nodes to children, and [`Weak`]
138/// pointers from children back to their parents.
139///
140/// # Cloning references
141///
142/// Creating a new reference from an existing reference-counted pointer is done using the
143/// `Clone` trait implemented for [`Arc<T>`][Arc] and [`Weak<T>`][Weak].
144///
145/// ```
146/// use std::sync::Arc;
147/// let foo = Arc::new(vec![1.0, 2.0, 3.0]);
148/// // The two syntaxes below are equivalent.
149/// let a = foo.clone();
150/// let b = Arc::clone(&foo);
151/// // a, b, and foo are all Arcs that point to the same memory location
152/// ```
153///
154/// ## `Deref` behavior
155///
156/// `Arc<T>` automatically dereferences to `T` (via the [`Deref`] trait),
157/// so you can call `T`'s methods on a value of type `Arc<T>`. To avoid name
158/// clashes with `T`'s methods, the methods of `Arc<T>` itself are associated
159/// functions, called using [fully qualified syntax]:
160///
161/// ```
162/// use std::sync::Arc;
163///
164/// let my_arc = Arc::new(());
165/// let my_weak = Arc::downgrade(&my_arc);
166/// ```
167///
168/// `Arc<T>`'s implementations of traits like `Clone` may also be called using
169/// fully qualified syntax. Some people prefer to use fully qualified syntax,
170/// while others prefer using method-call syntax.
171///
172/// ```
173/// use std::sync::Arc;
174///
175/// let arc = Arc::new(());
176/// // Method-call syntax
177/// let arc2 = arc.clone();
178/// // Fully qualified syntax
179/// let arc3 = Arc::clone(&arc);
180/// ```
181///
182/// [`Weak<T>`][Weak] does not auto-dereference to `T`, because the inner value may have
183/// already been dropped.
184///
185/// [`Rc<T>`]: crate::rc::Rc
186/// [clone]: Clone::clone
187/// [mutex]: ../../std/sync/struct.Mutex.html
188/// [rwlock]: ../../std/sync/struct.RwLock.html
189/// [atomic]: core::sync::atomic
190/// [downgrade]: Arc::downgrade
191/// [upgrade]: Weak::upgrade
192/// [RefCell\<T>]: core::cell::RefCell
193/// [`RefCell<T>`]: core::cell::RefCell
194/// [`std::sync`]: ../../std/sync/index.html
195/// [`Arc::clone(&from)`]: Arc::clone
196/// [fully qualified syntax]: https://doc.rust-lang.org/book/ch19-03-advanced-traits.html#fully-qualified-syntax-for-disambiguation-calling-methods-with-the-same-name
197///
198/// # Examples
199///
200/// Sharing some immutable data between threads:
201///
202// Note that we **do not** run these tests here. The windows builders get super
203// unhappy if a thread outlives the main thread and then exits at the same time
204// (something deadlocks) so we just avoid this entirely by not running these
205// tests.
206/// ```no_run
207/// use std::sync::Arc;
208/// use std::thread;
209///
210/// let five = Arc::new(5);
211///
212/// for _ in 0..10 {
213/// let five = Arc::clone(&five);
214///
215/// thread::spawn(move || {
216/// println!("{five:?}");
217/// });
218/// }
219/// ```
220///
221/// Sharing a mutable [`AtomicUsize`]:
222///
223/// [`AtomicUsize`]: core::sync::atomic::AtomicUsize "sync::atomic::AtomicUsize"
224///
225/// ```no_run
226/// use std::sync::Arc;
227/// use std::sync::atomic::{AtomicUsize, Ordering};
228/// use std::thread;
229///
230/// let val = Arc::new(AtomicUsize::new(5));
231///
232/// for _ in 0..10 {
233/// let val = Arc::clone(&val);
234///
235/// thread::spawn(move || {
236/// let v = val.fetch_add(1, Ordering::SeqCst);
237/// println!("{v:?}");
238/// });
239/// }
240/// ```
241///
242/// See the [`rc` documentation][rc_examples] for more examples of reference
243/// counting in general.
244///
245/// [rc_examples]: crate::rc#examples
246#[cfg_attr(not(test), rustc_diagnostic_item = "Arc")]
247#[stable(feature = "rust1", since = "1.0.0")]
248pub struct Arc<
249 T: ?Sized,
250 #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
251> {
252 ptr: NonNull<ArcInner<T>>,
253 phantom: PhantomData<ArcInner<T>>,
254 alloc: A,
255}
256
257#[stable(feature = "rust1", since = "1.0.0")]
258unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Send> Send for Arc<T, A> {}
259#[stable(feature = "rust1", since = "1.0.0")]
260unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Sync> Sync for Arc<T, A> {}
261
262#[stable(feature = "catch_unwind", since = "1.9.0")]
263impl<T: RefUnwindSafe + ?Sized, A: Allocator + UnwindSafe> UnwindSafe for Arc<T, A> {}
264
265#[unstable(feature = "coerce_unsized", issue = "18598")]
266impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<Arc<U, A>> for Arc<T, A> {}
267
268#[unstable(feature = "dispatch_from_dyn", issue = "none")]
269impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Arc<U>> for Arc<T> {}
270
271impl<T: ?Sized> Arc<T> {
272 unsafe fn from_inner(ptr: NonNull<ArcInner<T>>) -> Self {
273 unsafe { Self::from_inner_in(ptr, alloc:Global) }
274 }
275
276 unsafe fn from_ptr(ptr: *mut ArcInner<T>) -> Self {
277 unsafe { Self::from_ptr_in(ptr, alloc:Global) }
278 }
279}
280
281impl<T: ?Sized, A: Allocator> Arc<T, A> {
282 #[inline]
283 fn internal_into_inner_with_allocator(self) -> (NonNull<ArcInner<T>>, A) {
284 let this: ManuallyDrop> = mem::ManuallyDrop::new(self);
285 (this.ptr, unsafe { ptr::read(&this.alloc) })
286 }
287
288 #[inline]
289 unsafe fn from_inner_in(ptr: NonNull<ArcInner<T>>, alloc: A) -> Self {
290 Self { ptr, phantom: PhantomData, alloc }
291 }
292
293 #[inline]
294 unsafe fn from_ptr_in(ptr: *mut ArcInner<T>, alloc: A) -> Self {
295 unsafe { Self::from_inner_in(ptr:NonNull::new_unchecked(ptr), alloc) }
296 }
297}
298
299/// `Weak` is a version of [`Arc`] that holds a non-owning reference to the
300/// managed allocation. The allocation is accessed by calling [`upgrade`] on the `Weak`
301/// pointer, which returns an <code>[Option]<[Arc]\<T>></code>.
302///
303/// Since a `Weak` reference does not count towards ownership, it will not
304/// prevent the value stored in the allocation from being dropped, and `Weak` itself makes no
305/// guarantees about the value still being present. Thus it may return [`None`]
306/// when [`upgrade`]d. Note however that a `Weak` reference *does* prevent the allocation
307/// itself (the backing store) from being deallocated.
308///
309/// A `Weak` pointer is useful for keeping a temporary reference to the allocation
310/// managed by [`Arc`] without preventing its inner value from being dropped. It is also used to
311/// prevent circular references between [`Arc`] pointers, since mutual owning references
312/// would never allow either [`Arc`] to be dropped. For example, a tree could
313/// have strong [`Arc`] pointers from parent nodes to children, and `Weak`
314/// pointers from children back to their parents.
315///
316/// The typical way to obtain a `Weak` pointer is to call [`Arc::downgrade`].
317///
318/// [`upgrade`]: Weak::upgrade
319#[stable(feature = "arc_weak", since = "1.4.0")]
320#[cfg_attr(not(test), rustc_diagnostic_item = "ArcWeak")]
321pub struct Weak<
322 T: ?Sized,
323 #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
324> {
325 // This is a `NonNull` to allow optimizing the size of this type in enums,
326 // but it is not necessarily a valid pointer.
327 // `Weak::new` sets this to `usize::MAX` so that it doesn’t need
328 // to allocate space on the heap. That's not a value a real pointer
329 // will ever have because RcBox has alignment at least 2.
330 // This is only possible when `T: Sized`; unsized `T` never dangle.
331 ptr: NonNull<ArcInner<T>>,
332 alloc: A,
333}
334
335#[stable(feature = "arc_weak", since = "1.4.0")]
336unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Send> Send for Weak<T, A> {}
337#[stable(feature = "arc_weak", since = "1.4.0")]
338unsafe impl<T: ?Sized + Sync + Send, A: Allocator + Sync> Sync for Weak<T, A> {}
339
340#[unstable(feature = "coerce_unsized", issue = "18598")]
341impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<Weak<U, A>> for Weak<T, A> {}
342#[unstable(feature = "dispatch_from_dyn", issue = "none")]
343impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Weak<U>> for Weak<T> {}
344
345#[stable(feature = "arc_weak", since = "1.4.0")]
346impl<T: ?Sized> fmt::Debug for Weak<T> {
347 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
348 write!(f, "(Weak)")
349 }
350}
351
352// This is repr(C) to future-proof against possible field-reordering, which
353// would interfere with otherwise safe [into|from]_raw() of transmutable
354// inner types.
355#[repr(C)]
356struct ArcInner<T: ?Sized> {
357 strong: atomic::AtomicUsize,
358
359 // the value usize::MAX acts as a sentinel for temporarily "locking" the
360 // ability to upgrade weak pointers or downgrade strong ones; this is used
361 // to avoid races in `make_mut` and `get_mut`.
362 weak: atomic::AtomicUsize,
363
364 data: T,
365}
366
367/// Calculate layout for `ArcInner<T>` using the inner value's layout
368fn arcinner_layout_for_value_layout(layout: Layout) -> Layout {
369 // Calculate layout using the given value layout.
370 // Previously, layout was calculated on the expression
371 // `&*(ptr as *const ArcInner<T>)`, but this created a misaligned
372 // reference (see #54908).
373 Layout::new::<ArcInner<()>>().extend(next:layout).unwrap().0.pad_to_align()
374}
375
376unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {}
377unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {}
378
379impl<T> Arc<T> {
380 /// Constructs a new `Arc<T>`.
381 ///
382 /// # Examples
383 ///
384 /// ```
385 /// use std::sync::Arc;
386 ///
387 /// let five = Arc::new(5);
388 /// ```
389 #[cfg(not(no_global_oom_handling))]
390 #[inline]
391 #[stable(feature = "rust1", since = "1.0.0")]
392 pub fn new(data: T) -> Arc<T> {
393 // Start the weak pointer count as 1 which is the weak pointer that's
394 // held by all the strong pointers (kinda), see std/rc.rs for more info
395 let x: Box<_> = Box::new(ArcInner {
396 strong: atomic::AtomicUsize::new(1),
397 weak: atomic::AtomicUsize::new(1),
398 data,
399 });
400 unsafe { Self::from_inner(Box::leak(x).into()) }
401 }
402
403 /// Constructs a new `Arc<T>` while giving you a `Weak<T>` to the allocation,
404 /// to allow you to construct a `T` which holds a weak pointer to itself.
405 ///
406 /// Generally, a structure circularly referencing itself, either directly or
407 /// indirectly, should not hold a strong reference to itself to prevent a memory leak.
408 /// Using this function, you get access to the weak pointer during the
409 /// initialization of `T`, before the `Arc<T>` is created, such that you can
410 /// clone and store it inside the `T`.
411 ///
412 /// `new_cyclic` first allocates the managed allocation for the `Arc<T>`,
413 /// then calls your closure, giving it a `Weak<T>` to this allocation,
414 /// and only afterwards completes the construction of the `Arc<T>` by placing
415 /// the `T` returned from your closure into the allocation.
416 ///
417 /// Since the new `Arc<T>` is not fully-constructed until `Arc<T>::new_cyclic`
418 /// returns, calling [`upgrade`] on the weak reference inside your closure will
419 /// fail and result in a `None` value.
420 ///
421 /// # Panics
422 ///
423 /// If `data_fn` panics, the panic is propagated to the caller, and the
424 /// temporary [`Weak<T>`] is dropped normally.
425 ///
426 /// # Example
427 ///
428 /// ```
429 /// # #![allow(dead_code)]
430 /// use std::sync::{Arc, Weak};
431 ///
432 /// struct Gadget {
433 /// me: Weak<Gadget>,
434 /// }
435 ///
436 /// impl Gadget {
437 /// /// Construct a reference counted Gadget.
438 /// fn new() -> Arc<Self> {
439 /// // `me` is a `Weak<Gadget>` pointing at the new allocation of the
440 /// // `Arc` we're constructing.
441 /// Arc::new_cyclic(|me| {
442 /// // Create the actual struct here.
443 /// Gadget { me: me.clone() }
444 /// })
445 /// }
446 ///
447 /// /// Return a reference counted pointer to Self.
448 /// fn me(&self) -> Arc<Self> {
449 /// self.me.upgrade().unwrap()
450 /// }
451 /// }
452 /// ```
453 /// [`upgrade`]: Weak::upgrade
454 #[cfg(not(no_global_oom_handling))]
455 #[inline]
456 #[stable(feature = "arc_new_cyclic", since = "1.60.0")]
457 pub fn new_cyclic<F>(data_fn: F) -> Arc<T>
458 where
459 F: FnOnce(&Weak<T>) -> T,
460 {
461 // Construct the inner in the "uninitialized" state with a single
462 // weak reference.
463 let uninit_ptr: NonNull<_> = Box::leak(Box::new(ArcInner {
464 strong: atomic::AtomicUsize::new(0),
465 weak: atomic::AtomicUsize::new(1),
466 data: mem::MaybeUninit::<T>::uninit(),
467 }))
468 .into();
469 let init_ptr: NonNull<ArcInner<T>> = uninit_ptr.cast();
470
471 let weak = Weak { ptr: init_ptr, alloc: Global };
472
473 // It's important we don't give up ownership of the weak pointer, or
474 // else the memory might be freed by the time `data_fn` returns. If
475 // we really wanted to pass ownership, we could create an additional
476 // weak pointer for ourselves, but this would result in additional
477 // updates to the weak reference count which might not be necessary
478 // otherwise.
479 let data = data_fn(&weak);
480
481 // Now we can properly initialize the inner value and turn our weak
482 // reference into a strong reference.
483 let strong = unsafe {
484 let inner = init_ptr.as_ptr();
485 ptr::write(ptr::addr_of_mut!((*inner).data), data);
486
487 // The above write to the data field must be visible to any threads which
488 // observe a non-zero strong count. Therefore we need at least "Release" ordering
489 // in order to synchronize with the `compare_exchange_weak` in `Weak::upgrade`.
490 //
491 // "Acquire" ordering is not required. When considering the possible behaviours
492 // of `data_fn` we only need to look at what it could do with a reference to a
493 // non-upgradeable `Weak`:
494 // - It can *clone* the `Weak`, increasing the weak reference count.
495 // - It can drop those clones, decreasing the weak reference count (but never to zero).
496 //
497 // These side effects do not impact us in any way, and no other side effects are
498 // possible with safe code alone.
499 let prev_value = (*inner).strong.fetch_add(1, Release);
500 debug_assert_eq!(prev_value, 0, "No prior strong references should exist");
501
502 Arc::from_inner(init_ptr)
503 };
504
505 // Strong references should collectively own a shared weak reference,
506 // so don't run the destructor for our old weak reference.
507 mem::forget(weak);
508 strong
509 }
510
511 /// Constructs a new `Arc` with uninitialized contents.
512 ///
513 /// # Examples
514 ///
515 /// ```
516 /// #![feature(new_uninit)]
517 /// #![feature(get_mut_unchecked)]
518 ///
519 /// use std::sync::Arc;
520 ///
521 /// let mut five = Arc::<u32>::new_uninit();
522 ///
523 /// // Deferred initialization:
524 /// Arc::get_mut(&mut five).unwrap().write(5);
525 ///
526 /// let five = unsafe { five.assume_init() };
527 ///
528 /// assert_eq!(*five, 5)
529 /// ```
530 #[cfg(not(no_global_oom_handling))]
531 #[inline]
532 #[unstable(feature = "new_uninit", issue = "63291")]
533 #[must_use]
534 pub fn new_uninit() -> Arc<mem::MaybeUninit<T>> {
535 unsafe {
536 Arc::from_ptr(Arc::allocate_for_layout(
537 Layout::new::<T>(),
538 |layout| Global.allocate(layout),
539 <*mut u8>::cast,
540 ))
541 }
542 }
543
544 /// Constructs a new `Arc` with uninitialized contents, with the memory
545 /// being filled with `0` bytes.
546 ///
547 /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
548 /// of this method.
549 ///
550 /// # Examples
551 ///
552 /// ```
553 /// #![feature(new_uninit)]
554 ///
555 /// use std::sync::Arc;
556 ///
557 /// let zero = Arc::<u32>::new_zeroed();
558 /// let zero = unsafe { zero.assume_init() };
559 ///
560 /// assert_eq!(*zero, 0)
561 /// ```
562 ///
563 /// [zeroed]: mem::MaybeUninit::zeroed
564 #[cfg(not(no_global_oom_handling))]
565 #[inline]
566 #[unstable(feature = "new_uninit", issue = "63291")]
567 #[must_use]
568 pub fn new_zeroed() -> Arc<mem::MaybeUninit<T>> {
569 unsafe {
570 Arc::from_ptr(Arc::allocate_for_layout(
571 Layout::new::<T>(),
572 |layout| Global.allocate_zeroed(layout),
573 <*mut u8>::cast,
574 ))
575 }
576 }
577
578 /// Constructs a new `Pin<Arc<T>>`. If `T` does not implement `Unpin`, then
579 /// `data` will be pinned in memory and unable to be moved.
580 #[cfg(not(no_global_oom_handling))]
581 #[stable(feature = "pin", since = "1.33.0")]
582 #[must_use]
583 pub fn pin(data: T) -> Pin<Arc<T>> {
584 unsafe { Pin::new_unchecked(Arc::new(data)) }
585 }
586
587 /// Constructs a new `Pin<Arc<T>>`, return an error if allocation fails.
588 #[unstable(feature = "allocator_api", issue = "32838")]
589 #[inline]
590 pub fn try_pin(data: T) -> Result<Pin<Arc<T>>, AllocError> {
591 unsafe { Ok(Pin::new_unchecked(Arc::try_new(data)?)) }
592 }
593
594 /// Constructs a new `Arc<T>`, returning an error if allocation fails.
595 ///
596 /// # Examples
597 ///
598 /// ```
599 /// #![feature(allocator_api)]
600 /// use std::sync::Arc;
601 ///
602 /// let five = Arc::try_new(5)?;
603 /// # Ok::<(), std::alloc::AllocError>(())
604 /// ```
605 #[unstable(feature = "allocator_api", issue = "32838")]
606 #[inline]
607 pub fn try_new(data: T) -> Result<Arc<T>, AllocError> {
608 // Start the weak pointer count as 1 which is the weak pointer that's
609 // held by all the strong pointers (kinda), see std/rc.rs for more info
610 let x: Box<_> = Box::try_new(ArcInner {
611 strong: atomic::AtomicUsize::new(1),
612 weak: atomic::AtomicUsize::new(1),
613 data,
614 })?;
615 unsafe { Ok(Self::from_inner(Box::leak(x).into())) }
616 }
617
618 /// Constructs a new `Arc` with uninitialized contents, returning an error
619 /// if allocation fails.
620 ///
621 /// # Examples
622 ///
623 /// ```
624 /// #![feature(new_uninit, allocator_api)]
625 /// #![feature(get_mut_unchecked)]
626 ///
627 /// use std::sync::Arc;
628 ///
629 /// let mut five = Arc::<u32>::try_new_uninit()?;
630 ///
631 /// // Deferred initialization:
632 /// Arc::get_mut(&mut five).unwrap().write(5);
633 ///
634 /// let five = unsafe { five.assume_init() };
635 ///
636 /// assert_eq!(*five, 5);
637 /// # Ok::<(), std::alloc::AllocError>(())
638 /// ```
639 #[unstable(feature = "allocator_api", issue = "32838")]
640 // #[unstable(feature = "new_uninit", issue = "63291")]
641 pub fn try_new_uninit() -> Result<Arc<mem::MaybeUninit<T>>, AllocError> {
642 unsafe {
643 Ok(Arc::from_ptr(Arc::try_allocate_for_layout(
644 Layout::new::<T>(),
645 |layout| Global.allocate(layout),
646 <*mut u8>::cast,
647 )?))
648 }
649 }
650
651 /// Constructs a new `Arc` with uninitialized contents, with the memory
652 /// being filled with `0` bytes, returning an error if allocation fails.
653 ///
654 /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
655 /// of this method.
656 ///
657 /// # Examples
658 ///
659 /// ```
660 /// #![feature(new_uninit, allocator_api)]
661 ///
662 /// use std::sync::Arc;
663 ///
664 /// let zero = Arc::<u32>::try_new_zeroed()?;
665 /// let zero = unsafe { zero.assume_init() };
666 ///
667 /// assert_eq!(*zero, 0);
668 /// # Ok::<(), std::alloc::AllocError>(())
669 /// ```
670 ///
671 /// [zeroed]: mem::MaybeUninit::zeroed
672 #[unstable(feature = "allocator_api", issue = "32838")]
673 // #[unstable(feature = "new_uninit", issue = "63291")]
674 pub fn try_new_zeroed() -> Result<Arc<mem::MaybeUninit<T>>, AllocError> {
675 unsafe {
676 Ok(Arc::from_ptr(Arc::try_allocate_for_layout(
677 Layout::new::<T>(),
678 |layout| Global.allocate_zeroed(layout),
679 <*mut u8>::cast,
680 )?))
681 }
682 }
683}
684
685impl<T, A: Allocator> Arc<T, A> {
686 /// Returns a reference to the underlying allocator.
687 ///
688 /// Note: this is an associated function, which means that you have
689 /// to call it as `Arc::allocator(&a)` instead of `a.allocator()`. This
690 /// is so that there is no conflict with a method on the inner type.
691 #[inline]
692 #[unstable(feature = "allocator_api", issue = "32838")]
693 pub fn allocator(this: &Self) -> &A {
694 &this.alloc
695 }
696 /// Constructs a new `Arc<T>` in the provided allocator.
697 ///
698 /// # Examples
699 ///
700 /// ```
701 /// #![feature(allocator_api)]
702 ///
703 /// use std::sync::Arc;
704 /// use std::alloc::System;
705 ///
706 /// let five = Arc::new_in(5, System);
707 /// ```
708 #[inline]
709 #[cfg(not(no_global_oom_handling))]
710 #[unstable(feature = "allocator_api", issue = "32838")]
711 pub fn new_in(data: T, alloc: A) -> Arc<T, A> {
712 // Start the weak pointer count as 1 which is the weak pointer that's
713 // held by all the strong pointers (kinda), see std/rc.rs for more info
714 let x = Box::new_in(
715 ArcInner {
716 strong: atomic::AtomicUsize::new(1),
717 weak: atomic::AtomicUsize::new(1),
718 data,
719 },
720 alloc,
721 );
722 let (ptr, alloc) = Box::into_unique(x);
723 unsafe { Self::from_inner_in(ptr.into(), alloc) }
724 }
725
726 /// Constructs a new `Arc` with uninitialized contents in the provided allocator.
727 ///
728 /// # Examples
729 ///
730 /// ```
731 /// #![feature(new_uninit)]
732 /// #![feature(get_mut_unchecked)]
733 /// #![feature(allocator_api)]
734 ///
735 /// use std::sync::Arc;
736 /// use std::alloc::System;
737 ///
738 /// let mut five = Arc::<u32, _>::new_uninit_in(System);
739 ///
740 /// let five = unsafe {
741 /// // Deferred initialization:
742 /// Arc::get_mut_unchecked(&mut five).as_mut_ptr().write(5);
743 ///
744 /// five.assume_init()
745 /// };
746 ///
747 /// assert_eq!(*five, 5)
748 /// ```
749 #[cfg(not(no_global_oom_handling))]
750 #[unstable(feature = "allocator_api", issue = "32838")]
751 // #[unstable(feature = "new_uninit", issue = "63291")]
752 #[inline]
753 pub fn new_uninit_in(alloc: A) -> Arc<mem::MaybeUninit<T>, A> {
754 unsafe {
755 Arc::from_ptr_in(
756 Arc::allocate_for_layout(
757 Layout::new::<T>(),
758 |layout| alloc.allocate(layout),
759 <*mut u8>::cast,
760 ),
761 alloc,
762 )
763 }
764 }
765
766 /// Constructs a new `Arc` with uninitialized contents, with the memory
767 /// being filled with `0` bytes, in the provided allocator.
768 ///
769 /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
770 /// of this method.
771 ///
772 /// # Examples
773 ///
774 /// ```
775 /// #![feature(new_uninit)]
776 /// #![feature(allocator_api)]
777 ///
778 /// use std::sync::Arc;
779 /// use std::alloc::System;
780 ///
781 /// let zero = Arc::<u32, _>::new_zeroed_in(System);
782 /// let zero = unsafe { zero.assume_init() };
783 ///
784 /// assert_eq!(*zero, 0)
785 /// ```
786 ///
787 /// [zeroed]: mem::MaybeUninit::zeroed
788 #[cfg(not(no_global_oom_handling))]
789 #[unstable(feature = "allocator_api", issue = "32838")]
790 // #[unstable(feature = "new_uninit", issue = "63291")]
791 #[inline]
792 pub fn new_zeroed_in(alloc: A) -> Arc<mem::MaybeUninit<T>, A> {
793 unsafe {
794 Arc::from_ptr_in(
795 Arc::allocate_for_layout(
796 Layout::new::<T>(),
797 |layout| alloc.allocate_zeroed(layout),
798 <*mut u8>::cast,
799 ),
800 alloc,
801 )
802 }
803 }
804
805 /// Constructs a new `Pin<Arc<T, A>>` in the provided allocator. If `T` does not implement `Unpin`,
806 /// then `data` will be pinned in memory and unable to be moved.
807 #[cfg(not(no_global_oom_handling))]
808 #[unstable(feature = "allocator_api", issue = "32838")]
809 #[inline]
810 pub fn pin_in(data: T, alloc: A) -> Pin<Arc<T, A>> {
811 unsafe { Pin::new_unchecked(Arc::new_in(data, alloc)) }
812 }
813
814 /// Constructs a new `Pin<Arc<T, A>>` in the provided allocator, return an error if allocation
815 /// fails.
816 #[inline]
817 #[unstable(feature = "allocator_api", issue = "32838")]
818 pub fn try_pin_in(data: T, alloc: A) -> Result<Pin<Arc<T, A>>, AllocError> {
819 unsafe { Ok(Pin::new_unchecked(Arc::try_new_in(data, alloc)?)) }
820 }
821
822 /// Constructs a new `Arc<T, A>` in the provided allocator, returning an error if allocation fails.
823 ///
824 /// # Examples
825 ///
826 /// ```
827 /// #![feature(allocator_api)]
828 ///
829 /// use std::sync::Arc;
830 /// use std::alloc::System;
831 ///
832 /// let five = Arc::try_new_in(5, System)?;
833 /// # Ok::<(), std::alloc::AllocError>(())
834 /// ```
835 #[inline]
836 #[unstable(feature = "allocator_api", issue = "32838")]
837 #[inline]
838 pub fn try_new_in(data: T, alloc: A) -> Result<Arc<T, A>, AllocError> {
839 // Start the weak pointer count as 1 which is the weak pointer that's
840 // held by all the strong pointers (kinda), see std/rc.rs for more info
841 let x = Box::try_new_in(
842 ArcInner {
843 strong: atomic::AtomicUsize::new(1),
844 weak: atomic::AtomicUsize::new(1),
845 data,
846 },
847 alloc,
848 )?;
849 let (ptr, alloc) = Box::into_unique(x);
850 Ok(unsafe { Self::from_inner_in(ptr.into(), alloc) })
851 }
852
853 /// Constructs a new `Arc` with uninitialized contents, in the provided allocator, returning an
854 /// error if allocation fails.
855 ///
856 /// # Examples
857 ///
858 /// ```
859 /// #![feature(new_uninit, allocator_api)]
860 /// #![feature(get_mut_unchecked)]
861 ///
862 /// use std::sync::Arc;
863 /// use std::alloc::System;
864 ///
865 /// let mut five = Arc::<u32, _>::try_new_uninit_in(System)?;
866 ///
867 /// let five = unsafe {
868 /// // Deferred initialization:
869 /// Arc::get_mut_unchecked(&mut five).as_mut_ptr().write(5);
870 ///
871 /// five.assume_init()
872 /// };
873 ///
874 /// assert_eq!(*five, 5);
875 /// # Ok::<(), std::alloc::AllocError>(())
876 /// ```
877 #[unstable(feature = "allocator_api", issue = "32838")]
878 // #[unstable(feature = "new_uninit", issue = "63291")]
879 #[inline]
880 pub fn try_new_uninit_in(alloc: A) -> Result<Arc<mem::MaybeUninit<T>, A>, AllocError> {
881 unsafe {
882 Ok(Arc::from_ptr_in(
883 Arc::try_allocate_for_layout(
884 Layout::new::<T>(),
885 |layout| alloc.allocate(layout),
886 <*mut u8>::cast,
887 )?,
888 alloc,
889 ))
890 }
891 }
892
893 /// Constructs a new `Arc` with uninitialized contents, with the memory
894 /// being filled with `0` bytes, in the provided allocator, returning an error if allocation
895 /// fails.
896 ///
897 /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
898 /// of this method.
899 ///
900 /// # Examples
901 ///
902 /// ```
903 /// #![feature(new_uninit, allocator_api)]
904 ///
905 /// use std::sync::Arc;
906 /// use std::alloc::System;
907 ///
908 /// let zero = Arc::<u32, _>::try_new_zeroed_in(System)?;
909 /// let zero = unsafe { zero.assume_init() };
910 ///
911 /// assert_eq!(*zero, 0);
912 /// # Ok::<(), std::alloc::AllocError>(())
913 /// ```
914 ///
915 /// [zeroed]: mem::MaybeUninit::zeroed
916 #[unstable(feature = "allocator_api", issue = "32838")]
917 // #[unstable(feature = "new_uninit", issue = "63291")]
918 #[inline]
919 pub fn try_new_zeroed_in(alloc: A) -> Result<Arc<mem::MaybeUninit<T>, A>, AllocError> {
920 unsafe {
921 Ok(Arc::from_ptr_in(
922 Arc::try_allocate_for_layout(
923 Layout::new::<T>(),
924 |layout| alloc.allocate_zeroed(layout),
925 <*mut u8>::cast,
926 )?,
927 alloc,
928 ))
929 }
930 }
931 /// Returns the inner value, if the `Arc` has exactly one strong reference.
932 ///
933 /// Otherwise, an [`Err`] is returned with the same `Arc` that was
934 /// passed in.
935 ///
936 /// This will succeed even if there are outstanding weak references.
937 ///
938 /// It is strongly recommended to use [`Arc::into_inner`] instead if you don't
939 /// want to keep the `Arc` in the [`Err`] case.
940 /// Immediately dropping the [`Err`] payload, like in the expression
941 /// `Arc::try_unwrap(this).ok()`, can still cause the strong count to
942 /// drop to zero and the inner value of the `Arc` to be dropped:
943 /// For instance if two threads each execute this expression in parallel, then
944 /// there is a race condition. The threads could first both check whether they
945 /// have the last clone of their `Arc` via `Arc::try_unwrap`, and then
946 /// both drop their `Arc` in the call to [`ok`][`Result::ok`],
947 /// taking the strong count from two down to zero.
948 ///
949 /// # Examples
950 ///
951 /// ```
952 /// use std::sync::Arc;
953 ///
954 /// let x = Arc::new(3);
955 /// assert_eq!(Arc::try_unwrap(x), Ok(3));
956 ///
957 /// let x = Arc::new(4);
958 /// let _y = Arc::clone(&x);
959 /// assert_eq!(*Arc::try_unwrap(x).unwrap_err(), 4);
960 /// ```
961 #[inline]
962 #[stable(feature = "arc_unique", since = "1.4.0")]
963 pub fn try_unwrap(this: Self) -> Result<T, Self> {
964 if this.inner().strong.compare_exchange(1, 0, Relaxed, Relaxed).is_err() {
965 return Err(this);
966 }
967
968 acquire!(this.inner().strong);
969
970 unsafe {
971 let elem = ptr::read(&this.ptr.as_ref().data);
972 let alloc = ptr::read(&this.alloc); // copy the allocator
973
974 // Make a weak pointer to clean up the implicit strong-weak reference
975 let _weak = Weak { ptr: this.ptr, alloc };
976 mem::forget(this);
977
978 Ok(elem)
979 }
980 }
981
982 /// Returns the inner value, if the `Arc` has exactly one strong reference.
983 ///
984 /// Otherwise, [`None`] is returned and the `Arc` is dropped.
985 ///
986 /// This will succeed even if there are outstanding weak references.
987 ///
988 /// If `Arc::into_inner` is called on every clone of this `Arc`,
989 /// it is guaranteed that exactly one of the calls returns the inner value.
990 /// This means in particular that the inner value is not dropped.
991 ///
992 /// [`Arc::try_unwrap`] is conceptually similar to `Arc::into_inner`, but it
993 /// is meant for different use-cases. If used as a direct replacement
994 /// for `Arc::into_inner` anyway, such as with the expression
995 /// <code>[Arc::try_unwrap]\(this).[ok][Result::ok]()</code>, then it does
996 /// **not** give the same guarantee as described in the previous paragraph.
997 /// For more information, see the examples below and read the documentation
998 /// of [`Arc::try_unwrap`].
999 ///
1000 /// # Examples
1001 ///
1002 /// Minimal example demonstrating the guarantee that `Arc::into_inner` gives.
1003 /// ```
1004 /// use std::sync::Arc;
1005 ///
1006 /// let x = Arc::new(3);
1007 /// let y = Arc::clone(&x);
1008 ///
1009 /// // Two threads calling `Arc::into_inner` on both clones of an `Arc`:
1010 /// let x_thread = std::thread::spawn(|| Arc::into_inner(x));
1011 /// let y_thread = std::thread::spawn(|| Arc::into_inner(y));
1012 ///
1013 /// let x_inner_value = x_thread.join().unwrap();
1014 /// let y_inner_value = y_thread.join().unwrap();
1015 ///
1016 /// // One of the threads is guaranteed to receive the inner value:
1017 /// assert!(matches!(
1018 /// (x_inner_value, y_inner_value),
1019 /// (None, Some(3)) | (Some(3), None)
1020 /// ));
1021 /// // The result could also be `(None, None)` if the threads called
1022 /// // `Arc::try_unwrap(x).ok()` and `Arc::try_unwrap(y).ok()` instead.
1023 /// ```
1024 ///
1025 /// A more practical example demonstrating the need for `Arc::into_inner`:
1026 /// ```
1027 /// use std::sync::Arc;
1028 ///
1029 /// // Definition of a simple singly linked list using `Arc`:
1030 /// #[derive(Clone)]
1031 /// struct LinkedList<T>(Option<Arc<Node<T>>>);
1032 /// struct Node<T>(T, Option<Arc<Node<T>>>);
1033 ///
1034 /// // Dropping a long `LinkedList<T>` relying on the destructor of `Arc`
1035 /// // can cause a stack overflow. To prevent this, we can provide a
1036 /// // manual `Drop` implementation that does the destruction in a loop:
1037 /// impl<T> Drop for LinkedList<T> {
1038 /// fn drop(&mut self) {
1039 /// let mut link = self.0.take();
1040 /// while let Some(arc_node) = link.take() {
1041 /// if let Some(Node(_value, next)) = Arc::into_inner(arc_node) {
1042 /// link = next;
1043 /// }
1044 /// }
1045 /// }
1046 /// }
1047 ///
1048 /// // Implementation of `new` and `push` omitted
1049 /// impl<T> LinkedList<T> {
1050 /// /* ... */
1051 /// # fn new() -> Self {
1052 /// # LinkedList(None)
1053 /// # }
1054 /// # fn push(&mut self, x: T) {
1055 /// # self.0 = Some(Arc::new(Node(x, self.0.take())));
1056 /// # }
1057 /// }
1058 ///
1059 /// // The following code could have still caused a stack overflow
1060 /// // despite the manual `Drop` impl if that `Drop` impl had used
1061 /// // `Arc::try_unwrap(arc).ok()` instead of `Arc::into_inner(arc)`.
1062 ///
1063 /// // Create a long list and clone it
1064 /// let mut x = LinkedList::new();
1065 /// for i in 0..100000 {
1066 /// x.push(i); // Adds i to the front of x
1067 /// }
1068 /// let y = x.clone();
1069 ///
1070 /// // Drop the clones in parallel
1071 /// let x_thread = std::thread::spawn(|| drop(x));
1072 /// let y_thread = std::thread::spawn(|| drop(y));
1073 /// x_thread.join().unwrap();
1074 /// y_thread.join().unwrap();
1075 /// ```
1076 #[inline]
1077 #[stable(feature = "arc_into_inner", since = "1.70.0")]
1078 pub fn into_inner(this: Self) -> Option<T> {
1079 // Make sure that the ordinary `Drop` implementation isn’t called as well
1080 let mut this = mem::ManuallyDrop::new(this);
1081
1082 // Following the implementation of `drop` and `drop_slow`
1083 if this.inner().strong.fetch_sub(1, Release) != 1 {
1084 return None;
1085 }
1086
1087 acquire!(this.inner().strong);
1088
1089 // SAFETY: This mirrors the line
1090 //
1091 // unsafe { ptr::drop_in_place(Self::get_mut_unchecked(self)) };
1092 //
1093 // in `drop_slow`. Instead of dropping the value behind the pointer,
1094 // it is read and eventually returned; `ptr::read` has the same
1095 // safety conditions as `ptr::drop_in_place`.
1096
1097 let inner = unsafe { ptr::read(Self::get_mut_unchecked(&mut this)) };
1098 let alloc = unsafe { ptr::read(&this.alloc) };
1099
1100 drop(Weak { ptr: this.ptr, alloc });
1101
1102 Some(inner)
1103 }
1104}
1105
1106impl<T> Arc<[T]> {
1107 /// Constructs a new atomically reference-counted slice with uninitialized contents.
1108 ///
1109 /// # Examples
1110 ///
1111 /// ```
1112 /// #![feature(new_uninit)]
1113 /// #![feature(get_mut_unchecked)]
1114 ///
1115 /// use std::sync::Arc;
1116 ///
1117 /// let mut values = Arc::<[u32]>::new_uninit_slice(3);
1118 ///
1119 /// // Deferred initialization:
1120 /// let data = Arc::get_mut(&mut values).unwrap();
1121 /// data[0].write(1);
1122 /// data[1].write(2);
1123 /// data[2].write(3);
1124 ///
1125 /// let values = unsafe { values.assume_init() };
1126 ///
1127 /// assert_eq!(*values, [1, 2, 3])
1128 /// ```
1129 #[cfg(not(no_global_oom_handling))]
1130 #[inline]
1131 #[unstable(feature = "new_uninit", issue = "63291")]
1132 #[must_use]
1133 pub fn new_uninit_slice(len: usize) -> Arc<[mem::MaybeUninit<T>]> {
1134 unsafe { Arc::from_ptr(Arc::allocate_for_slice(len)) }
1135 }
1136
1137 /// Constructs a new atomically reference-counted slice with uninitialized contents, with the memory being
1138 /// filled with `0` bytes.
1139 ///
1140 /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
1141 /// incorrect usage of this method.
1142 ///
1143 /// # Examples
1144 ///
1145 /// ```
1146 /// #![feature(new_uninit)]
1147 ///
1148 /// use std::sync::Arc;
1149 ///
1150 /// let values = Arc::<[u32]>::new_zeroed_slice(3);
1151 /// let values = unsafe { values.assume_init() };
1152 ///
1153 /// assert_eq!(*values, [0, 0, 0])
1154 /// ```
1155 ///
1156 /// [zeroed]: mem::MaybeUninit::zeroed
1157 #[cfg(not(no_global_oom_handling))]
1158 #[inline]
1159 #[unstable(feature = "new_uninit", issue = "63291")]
1160 #[must_use]
1161 pub fn new_zeroed_slice(len: usize) -> Arc<[mem::MaybeUninit<T>]> {
1162 unsafe {
1163 Arc::from_ptr(Arc::allocate_for_layout(
1164 Layout::array::<T>(len).unwrap(),
1165 |layout| Global.allocate_zeroed(layout),
1166 |mem| {
1167 ptr::slice_from_raw_parts_mut(mem as *mut T, len)
1168 as *mut ArcInner<[mem::MaybeUninit<T>]>
1169 },
1170 ))
1171 }
1172 }
1173}
1174
1175impl<T, A: Allocator> Arc<[T], A> {
1176 /// Constructs a new atomically reference-counted slice with uninitialized contents in the
1177 /// provided allocator.
1178 ///
1179 /// # Examples
1180 ///
1181 /// ```
1182 /// #![feature(new_uninit)]
1183 /// #![feature(get_mut_unchecked)]
1184 /// #![feature(allocator_api)]
1185 ///
1186 /// use std::sync::Arc;
1187 /// use std::alloc::System;
1188 ///
1189 /// let mut values = Arc::<[u32], _>::new_uninit_slice_in(3, System);
1190 ///
1191 /// let values = unsafe {
1192 /// // Deferred initialization:
1193 /// Arc::get_mut_unchecked(&mut values)[0].as_mut_ptr().write(1);
1194 /// Arc::get_mut_unchecked(&mut values)[1].as_mut_ptr().write(2);
1195 /// Arc::get_mut_unchecked(&mut values)[2].as_mut_ptr().write(3);
1196 ///
1197 /// values.assume_init()
1198 /// };
1199 ///
1200 /// assert_eq!(*values, [1, 2, 3])
1201 /// ```
1202 #[cfg(not(no_global_oom_handling))]
1203 #[unstable(feature = "new_uninit", issue = "63291")]
1204 #[inline]
1205 pub fn new_uninit_slice_in(len: usize, alloc: A) -> Arc<[mem::MaybeUninit<T>], A> {
1206 unsafe { Arc::from_ptr_in(Arc::allocate_for_slice_in(len, &alloc), alloc) }
1207 }
1208
1209 /// Constructs a new atomically reference-counted slice with uninitialized contents, with the memory being
1210 /// filled with `0` bytes, in the provided allocator.
1211 ///
1212 /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
1213 /// incorrect usage of this method.
1214 ///
1215 /// # Examples
1216 ///
1217 /// ```
1218 /// #![feature(new_uninit)]
1219 /// #![feature(allocator_api)]
1220 ///
1221 /// use std::sync::Arc;
1222 /// use std::alloc::System;
1223 ///
1224 /// let values = Arc::<[u32], _>::new_zeroed_slice_in(3, System);
1225 /// let values = unsafe { values.assume_init() };
1226 ///
1227 /// assert_eq!(*values, [0, 0, 0])
1228 /// ```
1229 ///
1230 /// [zeroed]: mem::MaybeUninit::zeroed
1231 #[cfg(not(no_global_oom_handling))]
1232 #[unstable(feature = "new_uninit", issue = "63291")]
1233 #[inline]
1234 pub fn new_zeroed_slice_in(len: usize, alloc: A) -> Arc<[mem::MaybeUninit<T>], A> {
1235 unsafe {
1236 Arc::from_ptr_in(
1237 Arc::allocate_for_layout(
1238 Layout::array::<T>(len).unwrap(),
1239 |layout| alloc.allocate_zeroed(layout),
1240 |mem| {
1241 ptr::slice_from_raw_parts_mut(mem.cast::<T>(), len)
1242 as *mut ArcInner<[mem::MaybeUninit<T>]>
1243 },
1244 ),
1245 alloc,
1246 )
1247 }
1248 }
1249}
1250
1251impl<T, A: Allocator> Arc<mem::MaybeUninit<T>, A> {
1252 /// Converts to `Arc<T>`.
1253 ///
1254 /// # Safety
1255 ///
1256 /// As with [`MaybeUninit::assume_init`],
1257 /// it is up to the caller to guarantee that the inner value
1258 /// really is in an initialized state.
1259 /// Calling this when the content is not yet fully initialized
1260 /// causes immediate undefined behavior.
1261 ///
1262 /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
1263 ///
1264 /// # Examples
1265 ///
1266 /// ```
1267 /// #![feature(new_uninit)]
1268 /// #![feature(get_mut_unchecked)]
1269 ///
1270 /// use std::sync::Arc;
1271 ///
1272 /// let mut five = Arc::<u32>::new_uninit();
1273 ///
1274 /// // Deferred initialization:
1275 /// Arc::get_mut(&mut five).unwrap().write(5);
1276 ///
1277 /// let five = unsafe { five.assume_init() };
1278 ///
1279 /// assert_eq!(*five, 5)
1280 /// ```
1281 #[unstable(feature = "new_uninit", issue = "63291")]
1282 #[must_use = "`self` will be dropped if the result is not used"]
1283 #[inline]
1284 pub unsafe fn assume_init(self) -> Arc<T, A> {
1285 let (ptr, alloc) = self.internal_into_inner_with_allocator();
1286 unsafe { Arc::from_inner_in(ptr.cast(), alloc) }
1287 }
1288}
1289
1290impl<T, A: Allocator> Arc<[mem::MaybeUninit<T>], A> {
1291 /// Converts to `Arc<[T]>`.
1292 ///
1293 /// # Safety
1294 ///
1295 /// As with [`MaybeUninit::assume_init`],
1296 /// it is up to the caller to guarantee that the inner value
1297 /// really is in an initialized state.
1298 /// Calling this when the content is not yet fully initialized
1299 /// causes immediate undefined behavior.
1300 ///
1301 /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
1302 ///
1303 /// # Examples
1304 ///
1305 /// ```
1306 /// #![feature(new_uninit)]
1307 /// #![feature(get_mut_unchecked)]
1308 ///
1309 /// use std::sync::Arc;
1310 ///
1311 /// let mut values = Arc::<[u32]>::new_uninit_slice(3);
1312 ///
1313 /// // Deferred initialization:
1314 /// let data = Arc::get_mut(&mut values).unwrap();
1315 /// data[0].write(1);
1316 /// data[1].write(2);
1317 /// data[2].write(3);
1318 ///
1319 /// let values = unsafe { values.assume_init() };
1320 ///
1321 /// assert_eq!(*values, [1, 2, 3])
1322 /// ```
1323 #[unstable(feature = "new_uninit", issue = "63291")]
1324 #[must_use = "`self` will be dropped if the result is not used"]
1325 #[inline]
1326 pub unsafe fn assume_init(self) -> Arc<[T], A> {
1327 let (ptr, alloc) = self.internal_into_inner_with_allocator();
1328 unsafe { Arc::from_ptr_in(ptr.as_ptr() as _, alloc) }
1329 }
1330}
1331
1332impl<T: ?Sized> Arc<T> {
1333 /// Constructs an `Arc<T>` from a raw pointer.
1334 ///
1335 /// The raw pointer must have been previously returned by a call to
1336 /// [`Arc<U>::into_raw`][into_raw] where `U` must have the same size and
1337 /// alignment as `T`. This is trivially true if `U` is `T`.
1338 /// Note that if `U` is not `T` but has the same size and alignment, this is
1339 /// basically like transmuting references of different types. See
1340 /// [`mem::transmute`][transmute] for more information on what
1341 /// restrictions apply in this case.
1342 ///
1343 /// The user of `from_raw` has to make sure a specific value of `T` is only
1344 /// dropped once.
1345 ///
1346 /// This function is unsafe because improper use may lead to memory unsafety,
1347 /// even if the returned `Arc<T>` is never accessed.
1348 ///
1349 /// [into_raw]: Arc::into_raw
1350 /// [transmute]: core::mem::transmute
1351 ///
1352 /// # Examples
1353 ///
1354 /// ```
1355 /// use std::sync::Arc;
1356 ///
1357 /// let x = Arc::new("hello".to_owned());
1358 /// let x_ptr = Arc::into_raw(x);
1359 ///
1360 /// unsafe {
1361 /// // Convert back to an `Arc` to prevent leak.
1362 /// let x = Arc::from_raw(x_ptr);
1363 /// assert_eq!(&*x, "hello");
1364 ///
1365 /// // Further calls to `Arc::from_raw(x_ptr)` would be memory-unsafe.
1366 /// }
1367 ///
1368 /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling!
1369 /// ```
1370 #[inline]
1371 #[stable(feature = "rc_raw", since = "1.17.0")]
1372 pub unsafe fn from_raw(ptr: *const T) -> Self {
1373 unsafe { Arc::from_raw_in(ptr, Global) }
1374 }
1375
1376 /// Increments the strong reference count on the `Arc<T>` associated with the
1377 /// provided pointer by one.
1378 ///
1379 /// # Safety
1380 ///
1381 /// The pointer must have been obtained through `Arc::into_raw`, and the
1382 /// associated `Arc` instance must be valid (i.e. the strong count must be at
1383 /// least 1) for the duration of this method.
1384 ///
1385 /// # Examples
1386 ///
1387 /// ```
1388 /// use std::sync::Arc;
1389 ///
1390 /// let five = Arc::new(5);
1391 ///
1392 /// unsafe {
1393 /// let ptr = Arc::into_raw(five);
1394 /// Arc::increment_strong_count(ptr);
1395 ///
1396 /// // This assertion is deterministic because we haven't shared
1397 /// // the `Arc` between threads.
1398 /// let five = Arc::from_raw(ptr);
1399 /// assert_eq!(2, Arc::strong_count(&five));
1400 /// }
1401 /// ```
1402 #[inline]
1403 #[stable(feature = "arc_mutate_strong_count", since = "1.51.0")]
1404 pub unsafe fn increment_strong_count(ptr: *const T) {
1405 unsafe { Arc::increment_strong_count_in(ptr, Global) }
1406 }
1407
1408 /// Decrements the strong reference count on the `Arc<T>` associated with the
1409 /// provided pointer by one.
1410 ///
1411 /// # Safety
1412 ///
1413 /// The pointer must have been obtained through `Arc::into_raw`, and the
1414 /// associated `Arc` instance must be valid (i.e. the strong count must be at
1415 /// least 1) when invoking this method. This method can be used to release the final
1416 /// `Arc` and backing storage, but **should not** be called after the final `Arc` has been
1417 /// released.
1418 ///
1419 /// # Examples
1420 ///
1421 /// ```
1422 /// use std::sync::Arc;
1423 ///
1424 /// let five = Arc::new(5);
1425 ///
1426 /// unsafe {
1427 /// let ptr = Arc::into_raw(five);
1428 /// Arc::increment_strong_count(ptr);
1429 ///
1430 /// // Those assertions are deterministic because we haven't shared
1431 /// // the `Arc` between threads.
1432 /// let five = Arc::from_raw(ptr);
1433 /// assert_eq!(2, Arc::strong_count(&five));
1434 /// Arc::decrement_strong_count(ptr);
1435 /// assert_eq!(1, Arc::strong_count(&five));
1436 /// }
1437 /// ```
1438 #[inline]
1439 #[stable(feature = "arc_mutate_strong_count", since = "1.51.0")]
1440 pub unsafe fn decrement_strong_count(ptr: *const T) {
1441 unsafe { Arc::decrement_strong_count_in(ptr, Global) }
1442 }
1443}
1444
1445impl<T: ?Sized, A: Allocator> Arc<T, A> {
1446 /// Consumes the `Arc`, returning the wrapped pointer.
1447 ///
1448 /// To avoid a memory leak the pointer must be converted back to an `Arc` using
1449 /// [`Arc::from_raw`].
1450 ///
1451 /// # Examples
1452 ///
1453 /// ```
1454 /// use std::sync::Arc;
1455 ///
1456 /// let x = Arc::new("hello".to_owned());
1457 /// let x_ptr = Arc::into_raw(x);
1458 /// assert_eq!(unsafe { &*x_ptr }, "hello");
1459 /// ```
1460 #[must_use = "losing the pointer will leak memory"]
1461 #[stable(feature = "rc_raw", since = "1.17.0")]
1462 #[rustc_never_returns_null_ptr]
1463 pub fn into_raw(this: Self) -> *const T {
1464 let ptr = Self::as_ptr(&this);
1465 mem::forget(this);
1466 ptr
1467 }
1468
1469 /// Provides a raw pointer to the data.
1470 ///
1471 /// The counts are not affected in any way and the `Arc` is not consumed. The pointer is valid for
1472 /// as long as there are strong counts in the `Arc`.
1473 ///
1474 /// # Examples
1475 ///
1476 /// ```
1477 /// use std::sync::Arc;
1478 ///
1479 /// let x = Arc::new("hello".to_owned());
1480 /// let y = Arc::clone(&x);
1481 /// let x_ptr = Arc::as_ptr(&x);
1482 /// assert_eq!(x_ptr, Arc::as_ptr(&y));
1483 /// assert_eq!(unsafe { &*x_ptr }, "hello");
1484 /// ```
1485 #[must_use]
1486 #[stable(feature = "rc_as_ptr", since = "1.45.0")]
1487 #[rustc_never_returns_null_ptr]
1488 pub fn as_ptr(this: &Self) -> *const T {
1489 let ptr: *mut ArcInner<T> = NonNull::as_ptr(this.ptr);
1490
1491 // SAFETY: This cannot go through Deref::deref or RcBoxPtr::inner because
1492 // this is required to retain raw/mut provenance such that e.g. `get_mut` can
1493 // write through the pointer after the Rc is recovered through `from_raw`.
1494 unsafe { ptr::addr_of_mut!((*ptr).data) }
1495 }
1496
1497 /// Constructs an `Arc<T, A>` from a raw pointer.
1498 ///
1499 /// The raw pointer must have been previously returned by a call to
1500 /// [`Arc<U, A>::into_raw`][into_raw] where `U` must have the same size and
1501 /// alignment as `T`. This is trivially true if `U` is `T`.
1502 /// Note that if `U` is not `T` but has the same size and alignment, this is
1503 /// basically like transmuting references of different types. See
1504 /// [`mem::transmute`] for more information on what
1505 /// restrictions apply in this case.
1506 ///
1507 /// The raw pointer must point to a block of memory allocated by `alloc`
1508 ///
1509 /// The user of `from_raw` has to make sure a specific value of `T` is only
1510 /// dropped once.
1511 ///
1512 /// This function is unsafe because improper use may lead to memory unsafety,
1513 /// even if the returned `Arc<T>` is never accessed.
1514 ///
1515 /// [into_raw]: Arc::into_raw
1516 ///
1517 /// # Examples
1518 ///
1519 /// ```
1520 /// #![feature(allocator_api)]
1521 ///
1522 /// use std::sync::Arc;
1523 /// use std::alloc::System;
1524 ///
1525 /// let x = Arc::new_in("hello".to_owned(), System);
1526 /// let x_ptr = Arc::into_raw(x);
1527 ///
1528 /// unsafe {
1529 /// // Convert back to an `Arc` to prevent leak.
1530 /// let x = Arc::from_raw_in(x_ptr, System);
1531 /// assert_eq!(&*x, "hello");
1532 ///
1533 /// // Further calls to `Arc::from_raw(x_ptr)` would be memory-unsafe.
1534 /// }
1535 ///
1536 /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling!
1537 /// ```
1538 #[inline]
1539 #[unstable(feature = "allocator_api", issue = "32838")]
1540 pub unsafe fn from_raw_in(ptr: *const T, alloc: A) -> Self {
1541 unsafe {
1542 let offset = data_offset(ptr);
1543
1544 // Reverse the offset to find the original ArcInner.
1545 let arc_ptr = ptr.byte_sub(offset) as *mut ArcInner<T>;
1546
1547 Self::from_ptr_in(arc_ptr, alloc)
1548 }
1549 }
1550
1551 /// Creates a new [`Weak`] pointer to this allocation.
1552 ///
1553 /// # Examples
1554 ///
1555 /// ```
1556 /// use std::sync::Arc;
1557 ///
1558 /// let five = Arc::new(5);
1559 ///
1560 /// let weak_five = Arc::downgrade(&five);
1561 /// ```
1562 #[must_use = "this returns a new `Weak` pointer, \
1563 without modifying the original `Arc`"]
1564 #[stable(feature = "arc_weak", since = "1.4.0")]
1565 pub fn downgrade(this: &Self) -> Weak<T, A>
1566 where
1567 A: Clone,
1568 {
1569 // This Relaxed is OK because we're checking the value in the CAS
1570 // below.
1571 let mut cur = this.inner().weak.load(Relaxed);
1572
1573 loop {
1574 // check if the weak counter is currently "locked"; if so, spin.
1575 if cur == usize::MAX {
1576 hint::spin_loop();
1577 cur = this.inner().weak.load(Relaxed);
1578 continue;
1579 }
1580
1581 // We can't allow the refcount to increase much past `MAX_REFCOUNT`.
1582 assert!(cur <= MAX_REFCOUNT, "{}", INTERNAL_OVERFLOW_ERROR);
1583
1584 // NOTE: this code currently ignores the possibility of overflow
1585 // into usize::MAX; in general both Rc and Arc need to be adjusted
1586 // to deal with overflow.
1587
1588 // Unlike with Clone(), we need this to be an Acquire read to
1589 // synchronize with the write coming from `is_unique`, so that the
1590 // events prior to that write happen before this read.
1591 match this.inner().weak.compare_exchange_weak(cur, cur + 1, Acquire, Relaxed) {
1592 Ok(_) => {
1593 // Make sure we do not create a dangling Weak
1594 debug_assert!(!is_dangling(this.ptr.as_ptr()));
1595 return Weak { ptr: this.ptr, alloc: this.alloc.clone() };
1596 }
1597 Err(old) => cur = old,
1598 }
1599 }
1600 }
1601
1602 /// Gets the number of [`Weak`] pointers to this allocation.
1603 ///
1604 /// # Safety
1605 ///
1606 /// This method by itself is safe, but using it correctly requires extra care.
1607 /// Another thread can change the weak count at any time,
1608 /// including potentially between calling this method and acting on the result.
1609 ///
1610 /// # Examples
1611 ///
1612 /// ```
1613 /// use std::sync::Arc;
1614 ///
1615 /// let five = Arc::new(5);
1616 /// let _weak_five = Arc::downgrade(&five);
1617 ///
1618 /// // This assertion is deterministic because we haven't shared
1619 /// // the `Arc` or `Weak` between threads.
1620 /// assert_eq!(1, Arc::weak_count(&five));
1621 /// ```
1622 #[inline]
1623 #[must_use]
1624 #[stable(feature = "arc_counts", since = "1.15.0")]
1625 pub fn weak_count(this: &Self) -> usize {
1626 let cnt = this.inner().weak.load(Relaxed);
1627 // If the weak count is currently locked, the value of the
1628 // count was 0 just before taking the lock.
1629 if cnt == usize::MAX { 0 } else { cnt - 1 }
1630 }
1631
1632 /// Gets the number of strong (`Arc`) pointers to this allocation.
1633 ///
1634 /// # Safety
1635 ///
1636 /// This method by itself is safe, but using it correctly requires extra care.
1637 /// Another thread can change the strong count at any time,
1638 /// including potentially between calling this method and acting on the result.
1639 ///
1640 /// # Examples
1641 ///
1642 /// ```
1643 /// use std::sync::Arc;
1644 ///
1645 /// let five = Arc::new(5);
1646 /// let _also_five = Arc::clone(&five);
1647 ///
1648 /// // This assertion is deterministic because we haven't shared
1649 /// // the `Arc` between threads.
1650 /// assert_eq!(2, Arc::strong_count(&five));
1651 /// ```
1652 #[inline]
1653 #[must_use]
1654 #[stable(feature = "arc_counts", since = "1.15.0")]
1655 pub fn strong_count(this: &Self) -> usize {
1656 this.inner().strong.load(Relaxed)
1657 }
1658
1659 /// Increments the strong reference count on the `Arc<T>` associated with the
1660 /// provided pointer by one.
1661 ///
1662 /// # Safety
1663 ///
1664 /// The pointer must have been obtained through `Arc::into_raw`, and the
1665 /// associated `Arc` instance must be valid (i.e. the strong count must be at
1666 /// least 1) for the duration of this method,, and `ptr` must point to a block of memory
1667 /// allocated by `alloc`.
1668 ///
1669 /// # Examples
1670 ///
1671 /// ```
1672 /// #![feature(allocator_api)]
1673 ///
1674 /// use std::sync::Arc;
1675 /// use std::alloc::System;
1676 ///
1677 /// let five = Arc::new_in(5, System);
1678 ///
1679 /// unsafe {
1680 /// let ptr = Arc::into_raw(five);
1681 /// Arc::increment_strong_count_in(ptr, System);
1682 ///
1683 /// // This assertion is deterministic because we haven't shared
1684 /// // the `Arc` between threads.
1685 /// let five = Arc::from_raw_in(ptr, System);
1686 /// assert_eq!(2, Arc::strong_count(&five));
1687 /// }
1688 /// ```
1689 #[inline]
1690 #[unstable(feature = "allocator_api", issue = "32838")]
1691 pub unsafe fn increment_strong_count_in(ptr: *const T, alloc: A)
1692 where
1693 A: Clone,
1694 {
1695 // Retain Arc, but don't touch refcount by wrapping in ManuallyDrop
1696 let arc = unsafe { mem::ManuallyDrop::new(Arc::from_raw_in(ptr, alloc)) };
1697 // Now increase refcount, but don't drop new refcount either
1698 let _arc_clone: mem::ManuallyDrop<_> = arc.clone();
1699 }
1700
1701 /// Decrements the strong reference count on the `Arc<T>` associated with the
1702 /// provided pointer by one.
1703 ///
1704 /// # Safety
1705 ///
1706 /// The pointer must have been obtained through `Arc::into_raw`, the
1707 /// associated `Arc` instance must be valid (i.e. the strong count must be at
1708 /// least 1) when invoking this method, and `ptr` must point to a block of memory
1709 /// allocated by `alloc`. This method can be used to release the final
1710 /// `Arc` and backing storage, but **should not** be called after the final `Arc` has been
1711 /// released.
1712 ///
1713 /// # Examples
1714 ///
1715 /// ```
1716 /// #![feature(allocator_api)]
1717 ///
1718 /// use std::sync::Arc;
1719 /// use std::alloc::System;
1720 ///
1721 /// let five = Arc::new_in(5, System);
1722 ///
1723 /// unsafe {
1724 /// let ptr = Arc::into_raw(five);
1725 /// Arc::increment_strong_count_in(ptr, System);
1726 ///
1727 /// // Those assertions are deterministic because we haven't shared
1728 /// // the `Arc` between threads.
1729 /// let five = Arc::from_raw_in(ptr, System);
1730 /// assert_eq!(2, Arc::strong_count(&five));
1731 /// Arc::decrement_strong_count_in(ptr, System);
1732 /// assert_eq!(1, Arc::strong_count(&five));
1733 /// }
1734 /// ```
1735 #[inline]
1736 #[unstable(feature = "allocator_api", issue = "32838")]
1737 pub unsafe fn decrement_strong_count_in(ptr: *const T, alloc: A) {
1738 unsafe { drop(Arc::from_raw_in(ptr, alloc)) };
1739 }
1740
1741 #[inline]
1742 fn inner(&self) -> &ArcInner<T> {
1743 // This unsafety is ok because while this arc is alive we're guaranteed
1744 // that the inner pointer is valid. Furthermore, we know that the
1745 // `ArcInner` structure itself is `Sync` because the inner data is
1746 // `Sync` as well, so we're ok loaning out an immutable pointer to these
1747 // contents.
1748 unsafe { self.ptr.as_ref() }
1749 }
1750
1751 // Non-inlined part of `drop`.
1752 #[inline(never)]
1753 unsafe fn drop_slow(&mut self) {
1754 // Destroy the data at this time, even though we must not free the box
1755 // allocation itself (there might still be weak pointers lying around).
1756 unsafe { ptr::drop_in_place(Self::get_mut_unchecked(self)) };
1757
1758 // Drop the weak ref collectively held by all strong references
1759 // Take a reference to `self.alloc` instead of cloning because 1. it'll
1760 // last long enough, and 2. you should be able to drop `Arc`s with
1761 // unclonable allocators
1762 drop(Weak { ptr: self.ptr, alloc: &self.alloc });
1763 }
1764
1765 /// Returns `true` if the two `Arc`s point to the same allocation in a vein similar to
1766 /// [`ptr::eq`]. This function ignores the metadata of `dyn Trait` pointers.
1767 ///
1768 /// # Examples
1769 ///
1770 /// ```
1771 /// use std::sync::Arc;
1772 ///
1773 /// let five = Arc::new(5);
1774 /// let same_five = Arc::clone(&five);
1775 /// let other_five = Arc::new(5);
1776 ///
1777 /// assert!(Arc::ptr_eq(&five, &same_five));
1778 /// assert!(!Arc::ptr_eq(&five, &other_five));
1779 /// ```
1780 ///
1781 /// [`ptr::eq`]: core::ptr::eq "ptr::eq"
1782 #[inline]
1783 #[must_use]
1784 #[stable(feature = "ptr_eq", since = "1.17.0")]
1785 pub fn ptr_eq(this: &Self, other: &Self) -> bool {
1786 ptr::addr_eq(this.ptr.as_ptr(), other.ptr.as_ptr())
1787 }
1788}
1789
1790impl<T: ?Sized> Arc<T> {
1791 /// Allocates an `ArcInner<T>` with sufficient space for
1792 /// a possibly-unsized inner value where the value has the layout provided.
1793 ///
1794 /// The function `mem_to_arcinner` is called with the data pointer
1795 /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
1796 #[cfg(not(no_global_oom_handling))]
1797 unsafe fn allocate_for_layout(
1798 value_layout: Layout,
1799 allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
1800 mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
1801 ) -> *mut ArcInner<T> {
1802 let layout = arcinner_layout_for_value_layout(value_layout);
1803
1804 let ptr = allocate(layout).unwrap_or_else(|_| handle_alloc_error(layout));
1805
1806 unsafe { Self::initialize_arcinner(ptr, layout, mem_to_arcinner) }
1807 }
1808
1809 /// Allocates an `ArcInner<T>` with sufficient space for
1810 /// a possibly-unsized inner value where the value has the layout provided,
1811 /// returning an error if allocation fails.
1812 ///
1813 /// The function `mem_to_arcinner` is called with the data pointer
1814 /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
1815 unsafe fn try_allocate_for_layout(
1816 value_layout: Layout,
1817 allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
1818 mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
1819 ) -> Result<*mut ArcInner<T>, AllocError> {
1820 let layout = arcinner_layout_for_value_layout(value_layout);
1821
1822 let ptr = allocate(layout)?;
1823
1824 let inner = unsafe { Self::initialize_arcinner(ptr, layout, mem_to_arcinner) };
1825
1826 Ok(inner)
1827 }
1828
1829 unsafe fn initialize_arcinner(
1830 ptr: NonNull<[u8]>,
1831 layout: Layout,
1832 mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
1833 ) -> *mut ArcInner<T> {
1834 let inner = mem_to_arcinner(ptr.as_non_null_ptr().as_ptr());
1835 debug_assert_eq!(unsafe { Layout::for_value_raw(inner) }, layout);
1836
1837 unsafe {
1838 ptr::addr_of_mut!((*inner).strong).write(atomic::AtomicUsize::new(1));
1839 ptr::addr_of_mut!((*inner).weak).write(atomic::AtomicUsize::new(1));
1840 }
1841
1842 inner
1843 }
1844}
1845
1846impl<T: ?Sized, A: Allocator> Arc<T, A> {
1847 /// Allocates an `ArcInner<T>` with sufficient space for an unsized inner value.
1848 #[inline]
1849 #[cfg(not(no_global_oom_handling))]
1850 unsafe fn allocate_for_ptr_in(ptr: *const T, alloc: &A) -> *mut ArcInner<T> {
1851 // Allocate for the `ArcInner<T>` using the given value.
1852 unsafe {
1853 Arc::allocate_for_layout(
1854 Layout::for_value_raw(ptr),
1855 |layout| alloc.allocate(layout),
1856 |mem| mem.with_metadata_of(ptr as *const ArcInner<T>),
1857 )
1858 }
1859 }
1860
1861 #[cfg(not(no_global_oom_handling))]
1862 fn from_box_in(src: Box<T, A>) -> Arc<T, A> {
1863 unsafe {
1864 let value_size = size_of_val(&*src);
1865 let ptr = Self::allocate_for_ptr_in(&*src, Box::allocator(&src));
1866
1867 // Copy value as bytes
1868 ptr::copy_nonoverlapping(
1869 &*src as *const T as *const u8,
1870 ptr::addr_of_mut!((*ptr).data) as *mut u8,
1871 value_size,
1872 );
1873
1874 // Free the allocation without dropping its contents
1875 let (bptr, alloc) = Box::into_raw_with_allocator(src);
1876 let src = Box::from_raw_in(bptr as *mut mem::ManuallyDrop<T>, alloc.by_ref());
1877 drop(src);
1878
1879 Self::from_ptr_in(ptr, alloc)
1880 }
1881 }
1882}
1883
1884impl<T> Arc<[T]> {
1885 /// Allocates an `ArcInner<[T]>` with the given length.
1886 #[cfg(not(no_global_oom_handling))]
1887 unsafe fn allocate_for_slice(len: usize) -> *mut ArcInner<[T]> {
1888 unsafe {
1889 Self::allocate_for_layout(
1890 Layout::array::<T>(len).unwrap(),
1891 |layout| Global.allocate(layout),
1892 |mem| ptr::slice_from_raw_parts_mut(mem.cast::<T>(), len) as *mut ArcInner<[T]>,
1893 )
1894 }
1895 }
1896
1897 /// Copy elements from slice into newly allocated `Arc<[T]>`
1898 ///
1899 /// Unsafe because the caller must either take ownership or bind `T: Copy`.
1900 #[cfg(not(no_global_oom_handling))]
1901 unsafe fn copy_from_slice(v: &[T]) -> Arc<[T]> {
1902 unsafe {
1903 let ptr = Self::allocate_for_slice(v.len());
1904
1905 ptr::copy_nonoverlapping(v.as_ptr(), ptr::addr_of_mut!((*ptr).data) as *mut T, v.len());
1906
1907 Self::from_ptr(ptr)
1908 }
1909 }
1910
1911 /// Constructs an `Arc<[T]>` from an iterator known to be of a certain size.
1912 ///
1913 /// Behavior is undefined should the size be wrong.
1914 #[cfg(not(no_global_oom_handling))]
1915 unsafe fn from_iter_exact(iter: impl Iterator<Item = T>, len: usize) -> Arc<[T]> {
1916 // Panic guard while cloning T elements.
1917 // In the event of a panic, elements that have been written
1918 // into the new ArcInner will be dropped, then the memory freed.
1919 struct Guard<T> {
1920 mem: NonNull<u8>,
1921 elems: *mut T,
1922 layout: Layout,
1923 n_elems: usize,
1924 }
1925
1926 impl<T> Drop for Guard<T> {
1927 fn drop(&mut self) {
1928 unsafe {
1929 let slice = from_raw_parts_mut(self.elems, self.n_elems);
1930 ptr::drop_in_place(slice);
1931
1932 Global.deallocate(self.mem, self.layout);
1933 }
1934 }
1935 }
1936
1937 unsafe {
1938 let ptr = Self::allocate_for_slice(len);
1939
1940 let mem = ptr as *mut _ as *mut u8;
1941 let layout = Layout::for_value_raw(ptr);
1942
1943 // Pointer to first element
1944 let elems = ptr::addr_of_mut!((*ptr).data) as *mut T;
1945
1946 let mut guard = Guard { mem: NonNull::new_unchecked(mem), elems, layout, n_elems: 0 };
1947
1948 for (i, item) in iter.enumerate() {
1949 ptr::write(elems.add(i), item);
1950 guard.n_elems += 1;
1951 }
1952
1953 // All clear. Forget the guard so it doesn't free the new ArcInner.
1954 mem::forget(guard);
1955
1956 Self::from_ptr(ptr)
1957 }
1958 }
1959}
1960
1961impl<T, A: Allocator> Arc<[T], A> {
1962 /// Allocates an `ArcInner<[T]>` with the given length.
1963 #[inline]
1964 #[cfg(not(no_global_oom_handling))]
1965 unsafe fn allocate_for_slice_in(len: usize, alloc: &A) -> *mut ArcInner<[T]> {
1966 unsafe {
1967 Arc::allocate_for_layout(
1968 value_layout:Layout::array::<T>(len).unwrap(),
1969 |layout| alloc.allocate(layout),
1970 |mem: *mut u8| ptr::slice_from_raw_parts_mut(data:mem.cast::<T>(), len) as *mut ArcInner<[T]>,
1971 )
1972 }
1973 }
1974}
1975
1976/// Specialization trait used for `From<&[T]>`.
1977#[cfg(not(no_global_oom_handling))]
1978trait ArcFromSlice<T> {
1979 fn from_slice(slice: &[T]) -> Self;
1980}
1981
1982#[cfg(not(no_global_oom_handling))]
1983impl<T: Clone> ArcFromSlice<T> for Arc<[T]> {
1984 #[inline]
1985 default fn from_slice(v: &[T]) -> Self {
1986 unsafe { Self::from_iter_exact(iter:v.iter().cloned(), v.len()) }
1987 }
1988}
1989
1990#[cfg(not(no_global_oom_handling))]
1991impl<T: Copy> ArcFromSlice<T> for Arc<[T]> {
1992 #[inline]
1993 fn from_slice(v: &[T]) -> Self {
1994 unsafe { Arc::copy_from_slice(v) }
1995 }
1996}
1997
1998#[stable(feature = "rust1", since = "1.0.0")]
1999impl<T: ?Sized, A: Allocator + Clone> Clone for Arc<T, A> {
2000 /// Makes a clone of the `Arc` pointer.
2001 ///
2002 /// This creates another pointer to the same allocation, increasing the
2003 /// strong reference count.
2004 ///
2005 /// # Examples
2006 ///
2007 /// ```
2008 /// use std::sync::Arc;
2009 ///
2010 /// let five = Arc::new(5);
2011 ///
2012 /// let _ = Arc::clone(&five);
2013 /// ```
2014 #[inline]
2015 fn clone(&self) -> Arc<T, A> {
2016 // Using a relaxed ordering is alright here, as knowledge of the
2017 // original reference prevents other threads from erroneously deleting
2018 // the object.
2019 //
2020 // As explained in the [Boost documentation][1], Increasing the
2021 // reference counter can always be done with memory_order_relaxed: New
2022 // references to an object can only be formed from an existing
2023 // reference, and passing an existing reference from one thread to
2024 // another must already provide any required synchronization.
2025 //
2026 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
2027 let old_size = self.inner().strong.fetch_add(1, Relaxed);
2028
2029 // However we need to guard against massive refcounts in case someone is `mem::forget`ing
2030 // Arcs. If we don't do this the count can overflow and users will use-after free. This
2031 // branch will never be taken in any realistic program. We abort because such a program is
2032 // incredibly degenerate, and we don't care to support it.
2033 //
2034 // This check is not 100% water-proof: we error when the refcount grows beyond `isize::MAX`.
2035 // But we do that check *after* having done the increment, so there is a chance here that
2036 // the worst already happened and we actually do overflow the `usize` counter. However, that
2037 // requires the counter to grow from `isize::MAX` to `usize::MAX` between the increment
2038 // above and the `abort` below, which seems exceedingly unlikely.
2039 //
2040 // This is a global invariant, and also applies when using a compare-exchange loop to increment
2041 // counters in other methods.
2042 // Otherwise, the counter could be brought to an almost-overflow using a compare-exchange loop,
2043 // and then overflow using a few `fetch_add`s.
2044 if old_size > MAX_REFCOUNT {
2045 abort();
2046 }
2047
2048 unsafe { Self::from_inner_in(self.ptr, self.alloc.clone()) }
2049 }
2050}
2051
2052#[stable(feature = "rust1", since = "1.0.0")]
2053impl<T: ?Sized, A: Allocator> Deref for Arc<T, A> {
2054 type Target = T;
2055
2056 #[inline]
2057 fn deref(&self) -> &T {
2058 &self.inner().data
2059 }
2060}
2061
2062#[unstable(feature = "receiver_trait", issue = "none")]
2063impl<T: ?Sized> Receiver for Arc<T> {}
2064
2065impl<T: Clone, A: Allocator + Clone> Arc<T, A> {
2066 /// Makes a mutable reference into the given `Arc`.
2067 ///
2068 /// If there are other `Arc` pointers to the same allocation, then `make_mut` will
2069 /// [`clone`] the inner value to a new allocation to ensure unique ownership. This is also
2070 /// referred to as clone-on-write.
2071 ///
2072 /// However, if there are no other `Arc` pointers to this allocation, but some [`Weak`]
2073 /// pointers, then the [`Weak`] pointers will be dissociated and the inner value will not
2074 /// be cloned.
2075 ///
2076 /// See also [`get_mut`], which will fail rather than cloning the inner value
2077 /// or dissociating [`Weak`] pointers.
2078 ///
2079 /// [`clone`]: Clone::clone
2080 /// [`get_mut`]: Arc::get_mut
2081 ///
2082 /// # Examples
2083 ///
2084 /// ```
2085 /// use std::sync::Arc;
2086 ///
2087 /// let mut data = Arc::new(5);
2088 ///
2089 /// *Arc::make_mut(&mut data) += 1; // Won't clone anything
2090 /// let mut other_data = Arc::clone(&data); // Won't clone inner data
2091 /// *Arc::make_mut(&mut data) += 1; // Clones inner data
2092 /// *Arc::make_mut(&mut data) += 1; // Won't clone anything
2093 /// *Arc::make_mut(&mut other_data) *= 2; // Won't clone anything
2094 ///
2095 /// // Now `data` and `other_data` point to different allocations.
2096 /// assert_eq!(*data, 8);
2097 /// assert_eq!(*other_data, 12);
2098 /// ```
2099 ///
2100 /// [`Weak`] pointers will be dissociated:
2101 ///
2102 /// ```
2103 /// use std::sync::Arc;
2104 ///
2105 /// let mut data = Arc::new(75);
2106 /// let weak = Arc::downgrade(&data);
2107 ///
2108 /// assert!(75 == *data);
2109 /// assert!(75 == *weak.upgrade().unwrap());
2110 ///
2111 /// *Arc::make_mut(&mut data) += 1;
2112 ///
2113 /// assert!(76 == *data);
2114 /// assert!(weak.upgrade().is_none());
2115 /// ```
2116 #[cfg(not(no_global_oom_handling))]
2117 #[inline]
2118 #[stable(feature = "arc_unique", since = "1.4.0")]
2119 pub fn make_mut(this: &mut Self) -> &mut T {
2120 // Note that we hold both a strong reference and a weak reference.
2121 // Thus, releasing our strong reference only will not, by itself, cause
2122 // the memory to be deallocated.
2123 //
2124 // Use Acquire to ensure that we see any writes to `weak` that happen
2125 // before release writes (i.e., decrements) to `strong`. Since we hold a
2126 // weak count, there's no chance the ArcInner itself could be
2127 // deallocated.
2128 if this.inner().strong.compare_exchange(1, 0, Acquire, Relaxed).is_err() {
2129 // Another strong pointer exists, so we must clone.
2130 // Pre-allocate memory to allow writing the cloned value directly.
2131 let mut arc = Self::new_uninit_in(this.alloc.clone());
2132 unsafe {
2133 let data = Arc::get_mut_unchecked(&mut arc);
2134 (**this).write_clone_into_raw(data.as_mut_ptr());
2135 *this = arc.assume_init();
2136 }
2137 } else if this.inner().weak.load(Relaxed) != 1 {
2138 // Relaxed suffices in the above because this is fundamentally an
2139 // optimization: we are always racing with weak pointers being
2140 // dropped. Worst case, we end up allocated a new Arc unnecessarily.
2141
2142 // We removed the last strong ref, but there are additional weak
2143 // refs remaining. We'll move the contents to a new Arc, and
2144 // invalidate the other weak refs.
2145
2146 // Note that it is not possible for the read of `weak` to yield
2147 // usize::MAX (i.e., locked), since the weak count can only be
2148 // locked by a thread with a strong reference.
2149
2150 // Materialize our own implicit weak pointer, so that it can clean
2151 // up the ArcInner as needed.
2152 let _weak = Weak { ptr: this.ptr, alloc: this.alloc.clone() };
2153
2154 // Can just steal the data, all that's left is Weaks
2155 let mut arc = Self::new_uninit_in(this.alloc.clone());
2156 unsafe {
2157 let data = Arc::get_mut_unchecked(&mut arc);
2158 data.as_mut_ptr().copy_from_nonoverlapping(&**this, 1);
2159 ptr::write(this, arc.assume_init());
2160 }
2161 } else {
2162 // We were the sole reference of either kind; bump back up the
2163 // strong ref count.
2164 this.inner().strong.store(1, Release);
2165 }
2166
2167 // As with `get_mut()`, the unsafety is ok because our reference was
2168 // either unique to begin with, or became one upon cloning the contents.
2169 unsafe { Self::get_mut_unchecked(this) }
2170 }
2171
2172 /// If we have the only reference to `T` then unwrap it. Otherwise, clone `T` and return the
2173 /// clone.
2174 ///
2175 /// Assuming `arc_t` is of type `Arc<T>`, this function is functionally equivalent to
2176 /// `(*arc_t).clone()`, but will avoid cloning the inner value where possible.
2177 ///
2178 /// # Examples
2179 ///
2180 /// ```
2181 /// # use std::{ptr, sync::Arc};
2182 /// let inner = String::from("test");
2183 /// let ptr = inner.as_ptr();
2184 ///
2185 /// let arc = Arc::new(inner);
2186 /// let inner = Arc::unwrap_or_clone(arc);
2187 /// // The inner value was not cloned
2188 /// assert!(ptr::eq(ptr, inner.as_ptr()));
2189 ///
2190 /// let arc = Arc::new(inner);
2191 /// let arc2 = arc.clone();
2192 /// let inner = Arc::unwrap_or_clone(arc);
2193 /// // Because there were 2 references, we had to clone the inner value.
2194 /// assert!(!ptr::eq(ptr, inner.as_ptr()));
2195 /// // `arc2` is the last reference, so when we unwrap it we get back
2196 /// // the original `String`.
2197 /// let inner = Arc::unwrap_or_clone(arc2);
2198 /// assert!(ptr::eq(ptr, inner.as_ptr()));
2199 /// ```
2200 #[inline]
2201 #[stable(feature = "arc_unwrap_or_clone", since = "1.76.0")]
2202 pub fn unwrap_or_clone(this: Self) -> T {
2203 Arc::try_unwrap(this).unwrap_or_else(|arc| (*arc).clone())
2204 }
2205}
2206
2207impl<T: ?Sized, A: Allocator> Arc<T, A> {
2208 /// Returns a mutable reference into the given `Arc`, if there are
2209 /// no other `Arc` or [`Weak`] pointers to the same allocation.
2210 ///
2211 /// Returns [`None`] otherwise, because it is not safe to
2212 /// mutate a shared value.
2213 ///
2214 /// See also [`make_mut`][make_mut], which will [`clone`][clone]
2215 /// the inner value when there are other `Arc` pointers.
2216 ///
2217 /// [make_mut]: Arc::make_mut
2218 /// [clone]: Clone::clone
2219 ///
2220 /// # Examples
2221 ///
2222 /// ```
2223 /// use std::sync::Arc;
2224 ///
2225 /// let mut x = Arc::new(3);
2226 /// *Arc::get_mut(&mut x).unwrap() = 4;
2227 /// assert_eq!(*x, 4);
2228 ///
2229 /// let _y = Arc::clone(&x);
2230 /// assert!(Arc::get_mut(&mut x).is_none());
2231 /// ```
2232 #[inline]
2233 #[stable(feature = "arc_unique", since = "1.4.0")]
2234 pub fn get_mut(this: &mut Self) -> Option<&mut T> {
2235 if this.is_unique() {
2236 // This unsafety is ok because we're guaranteed that the pointer
2237 // returned is the *only* pointer that will ever be returned to T. Our
2238 // reference count is guaranteed to be 1 at this point, and we required
2239 // the Arc itself to be `mut`, so we're returning the only possible
2240 // reference to the inner data.
2241 unsafe { Some(Arc::get_mut_unchecked(this)) }
2242 } else {
2243 None
2244 }
2245 }
2246
2247 /// Returns a mutable reference into the given `Arc`,
2248 /// without any check.
2249 ///
2250 /// See also [`get_mut`], which is safe and does appropriate checks.
2251 ///
2252 /// [`get_mut`]: Arc::get_mut
2253 ///
2254 /// # Safety
2255 ///
2256 /// If any other `Arc` or [`Weak`] pointers to the same allocation exist, then
2257 /// they must not be dereferenced or have active borrows for the duration
2258 /// of the returned borrow, and their inner type must be exactly the same as the
2259 /// inner type of this Rc (including lifetimes). This is trivially the case if no
2260 /// such pointers exist, for example immediately after `Arc::new`.
2261 ///
2262 /// # Examples
2263 ///
2264 /// ```
2265 /// #![feature(get_mut_unchecked)]
2266 ///
2267 /// use std::sync::Arc;
2268 ///
2269 /// let mut x = Arc::new(String::new());
2270 /// unsafe {
2271 /// Arc::get_mut_unchecked(&mut x).push_str("foo")
2272 /// }
2273 /// assert_eq!(*x, "foo");
2274 /// ```
2275 /// Other `Arc` pointers to the same allocation must be to the same type.
2276 /// ```no_run
2277 /// #![feature(get_mut_unchecked)]
2278 ///
2279 /// use std::sync::Arc;
2280 ///
2281 /// let x: Arc<str> = Arc::from("Hello, world!");
2282 /// let mut y: Arc<[u8]> = x.clone().into();
2283 /// unsafe {
2284 /// // this is Undefined Behavior, because x's inner type is str, not [u8]
2285 /// Arc::get_mut_unchecked(&mut y).fill(0xff); // 0xff is invalid in UTF-8
2286 /// }
2287 /// println!("{}", &*x); // Invalid UTF-8 in a str
2288 /// ```
2289 /// Other `Arc` pointers to the same allocation must be to the exact same type, including lifetimes.
2290 /// ```no_run
2291 /// #![feature(get_mut_unchecked)]
2292 ///
2293 /// use std::sync::Arc;
2294 ///
2295 /// let x: Arc<&str> = Arc::new("Hello, world!");
2296 /// {
2297 /// let s = String::from("Oh, no!");
2298 /// let mut y: Arc<&str> = x.clone().into();
2299 /// unsafe {
2300 /// // this is Undefined Behavior, because x's inner type
2301 /// // is &'long str, not &'short str
2302 /// *Arc::get_mut_unchecked(&mut y) = &s;
2303 /// }
2304 /// }
2305 /// println!("{}", &*x); // Use-after-free
2306 /// ```
2307 #[inline]
2308 #[unstable(feature = "get_mut_unchecked", issue = "63292")]
2309 pub unsafe fn get_mut_unchecked(this: &mut Self) -> &mut T {
2310 // We are careful to *not* create a reference covering the "count" fields, as
2311 // this would alias with concurrent access to the reference counts (e.g. by `Weak`).
2312 unsafe { &mut (*this.ptr.as_ptr()).data }
2313 }
2314
2315 /// Determine whether this is the unique reference (including weak refs) to
2316 /// the underlying data.
2317 ///
2318 /// Note that this requires locking the weak ref count.
2319 fn is_unique(&mut self) -> bool {
2320 // lock the weak pointer count if we appear to be the sole weak pointer
2321 // holder.
2322 //
2323 // The acquire label here ensures a happens-before relationship with any
2324 // writes to `strong` (in particular in `Weak::upgrade`) prior to decrements
2325 // of the `weak` count (via `Weak::drop`, which uses release). If the upgraded
2326 // weak ref was never dropped, the CAS here will fail so we do not care to synchronize.
2327 if self.inner().weak.compare_exchange(1, usize::MAX, Acquire, Relaxed).is_ok() {
2328 // This needs to be an `Acquire` to synchronize with the decrement of the `strong`
2329 // counter in `drop` -- the only access that happens when any but the last reference
2330 // is being dropped.
2331 let unique = self.inner().strong.load(Acquire) == 1;
2332
2333 // The release write here synchronizes with a read in `downgrade`,
2334 // effectively preventing the above read of `strong` from happening
2335 // after the write.
2336 self.inner().weak.store(1, Release); // release the lock
2337 unique
2338 } else {
2339 false
2340 }
2341 }
2342}
2343
2344#[stable(feature = "rust1", since = "1.0.0")]
2345unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for Arc<T, A> {
2346 /// Drops the `Arc`.
2347 ///
2348 /// This will decrement the strong reference count. If the strong reference
2349 /// count reaches zero then the only other references (if any) are
2350 /// [`Weak`], so we `drop` the inner value.
2351 ///
2352 /// # Examples
2353 ///
2354 /// ```
2355 /// use std::sync::Arc;
2356 ///
2357 /// struct Foo;
2358 ///
2359 /// impl Drop for Foo {
2360 /// fn drop(&mut self) {
2361 /// println!("dropped!");
2362 /// }
2363 /// }
2364 ///
2365 /// let foo = Arc::new(Foo);
2366 /// let foo2 = Arc::clone(&foo);
2367 ///
2368 /// drop(foo); // Doesn't print anything
2369 /// drop(foo2); // Prints "dropped!"
2370 /// ```
2371 #[inline]
2372 fn drop(&mut self) {
2373 // Because `fetch_sub` is already atomic, we do not need to synchronize
2374 // with other threads unless we are going to delete the object. This
2375 // same logic applies to the below `fetch_sub` to the `weak` count.
2376 if self.inner().strong.fetch_sub(1, Release) != 1 {
2377 return;
2378 }
2379
2380 // This fence is needed to prevent reordering of use of the data and
2381 // deletion of the data. Because it is marked `Release`, the decreasing
2382 // of the reference count synchronizes with this `Acquire` fence. This
2383 // means that use of the data happens before decreasing the reference
2384 // count, which happens before this fence, which happens before the
2385 // deletion of the data.
2386 //
2387 // As explained in the [Boost documentation][1],
2388 //
2389 // > It is important to enforce any possible access to the object in one
2390 // > thread (through an existing reference) to *happen before* deleting
2391 // > the object in a different thread. This is achieved by a "release"
2392 // > operation after dropping a reference (any access to the object
2393 // > through this reference must obviously happened before), and an
2394 // > "acquire" operation before deleting the object.
2395 //
2396 // In particular, while the contents of an Arc are usually immutable, it's
2397 // possible to have interior writes to something like a Mutex<T>. Since a
2398 // Mutex is not acquired when it is deleted, we can't rely on its
2399 // synchronization logic to make writes in thread A visible to a destructor
2400 // running in thread B.
2401 //
2402 // Also note that the Acquire fence here could probably be replaced with an
2403 // Acquire load, which could improve performance in highly-contended
2404 // situations. See [2].
2405 //
2406 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
2407 // [2]: (https://github.com/rust-lang/rust/pull/41714)
2408 acquire!(self.inner().strong);
2409
2410 unsafe {
2411 self.drop_slow();
2412 }
2413 }
2414}
2415
2416impl<A: Allocator> Arc<dyn Any + Send + Sync, A> {
2417 /// Attempt to downcast the `Arc<dyn Any + Send + Sync>` to a concrete type.
2418 ///
2419 /// # Examples
2420 ///
2421 /// ```
2422 /// use std::any::Any;
2423 /// use std::sync::Arc;
2424 ///
2425 /// fn print_if_string(value: Arc<dyn Any + Send + Sync>) {
2426 /// if let Ok(string) = value.downcast::<String>() {
2427 /// println!("String ({}): {}", string.len(), string);
2428 /// }
2429 /// }
2430 ///
2431 /// let my_string = "Hello World".to_string();
2432 /// print_if_string(Arc::new(my_string));
2433 /// print_if_string(Arc::new(0i8));
2434 /// ```
2435 #[inline]
2436 #[stable(feature = "rc_downcast", since = "1.29.0")]
2437 pub fn downcast<T>(self) -> Result<Arc<T, A>, Self>
2438 where
2439 T: Any + Send + Sync,
2440 {
2441 if (*self).is::<T>() {
2442 unsafe {
2443 let (ptr, alloc) = self.internal_into_inner_with_allocator();
2444 Ok(Arc::from_inner_in(ptr.cast(), alloc))
2445 }
2446 } else {
2447 Err(self)
2448 }
2449 }
2450
2451 /// Downcasts the `Arc<dyn Any + Send + Sync>` to a concrete type.
2452 ///
2453 /// For a safe alternative see [`downcast`].
2454 ///
2455 /// # Examples
2456 ///
2457 /// ```
2458 /// #![feature(downcast_unchecked)]
2459 ///
2460 /// use std::any::Any;
2461 /// use std::sync::Arc;
2462 ///
2463 /// let x: Arc<dyn Any + Send + Sync> = Arc::new(1_usize);
2464 ///
2465 /// unsafe {
2466 /// assert_eq!(*x.downcast_unchecked::<usize>(), 1);
2467 /// }
2468 /// ```
2469 ///
2470 /// # Safety
2471 ///
2472 /// The contained value must be of type `T`. Calling this method
2473 /// with the incorrect type is *undefined behavior*.
2474 ///
2475 ///
2476 /// [`downcast`]: Self::downcast
2477 #[inline]
2478 #[unstable(feature = "downcast_unchecked", issue = "90850")]
2479 pub unsafe fn downcast_unchecked<T>(self) -> Arc<T, A>
2480 where
2481 T: Any + Send + Sync,
2482 {
2483 unsafe {
2484 let (ptr, alloc) = self.internal_into_inner_with_allocator();
2485 Arc::from_inner_in(ptr.cast(), alloc)
2486 }
2487 }
2488}
2489
2490impl<T> Weak<T> {
2491 /// Constructs a new `Weak<T>`, without allocating any memory.
2492 /// Calling [`upgrade`] on the return value always gives [`None`].
2493 ///
2494 /// [`upgrade`]: Weak::upgrade
2495 ///
2496 /// # Examples
2497 ///
2498 /// ```
2499 /// use std::sync::Weak;
2500 ///
2501 /// let empty: Weak<i64> = Weak::new();
2502 /// assert!(empty.upgrade().is_none());
2503 /// ```
2504 #[inline]
2505 #[stable(feature = "downgraded_weak", since = "1.10.0")]
2506 #[rustc_const_stable(feature = "const_weak_new", since = "1.73.0")]
2507 #[must_use]
2508 pub const fn new() -> Weak<T> {
2509 Weak {
2510 ptr: unsafe { NonNull::new_unchecked(ptr::invalid_mut::<ArcInner<T>>(usize::MAX)) },
2511 alloc: Global,
2512 }
2513 }
2514}
2515
2516impl<T, A: Allocator> Weak<T, A> {
2517 /// Constructs a new `Weak<T, A>`, without allocating any memory, technically in the provided
2518 /// allocator.
2519 /// Calling [`upgrade`] on the return value always gives [`None`].
2520 ///
2521 /// [`upgrade`]: Weak::upgrade
2522 ///
2523 /// # Examples
2524 ///
2525 /// ```
2526 /// #![feature(allocator_api)]
2527 ///
2528 /// use std::sync::Weak;
2529 /// use std::alloc::System;
2530 ///
2531 /// let empty: Weak<i64, _> = Weak::new_in(System);
2532 /// assert!(empty.upgrade().is_none());
2533 /// ```
2534 #[inline]
2535 #[unstable(feature = "allocator_api", issue = "32838")]
2536 pub fn new_in(alloc: A) -> Weak<T, A> {
2537 Weak {
2538 ptr: unsafe { NonNull::new_unchecked(ptr::invalid_mut::<ArcInner<T>>(usize::MAX)) },
2539 alloc,
2540 }
2541 }
2542}
2543
2544/// Helper type to allow accessing the reference counts without
2545/// making any assertions about the data field.
2546struct WeakInner<'a> {
2547 weak: &'a atomic::AtomicUsize,
2548 strong: &'a atomic::AtomicUsize,
2549}
2550
2551impl<T: ?Sized> Weak<T> {
2552 /// Converts a raw pointer previously created by [`into_raw`] back into `Weak<T>`.
2553 ///
2554 /// This can be used to safely get a strong reference (by calling [`upgrade`]
2555 /// later) or to deallocate the weak count by dropping the `Weak<T>`.
2556 ///
2557 /// It takes ownership of one weak reference (with the exception of pointers created by [`new`],
2558 /// as these don't own anything; the method still works on them).
2559 ///
2560 /// # Safety
2561 ///
2562 /// The pointer must have originated from the [`into_raw`] and must still own its potential
2563 /// weak reference.
2564 ///
2565 /// It is allowed for the strong count to be 0 at the time of calling this. Nevertheless, this
2566 /// takes ownership of one weak reference currently represented as a raw pointer (the weak
2567 /// count is not modified by this operation) and therefore it must be paired with a previous
2568 /// call to [`into_raw`].
2569 /// # Examples
2570 ///
2571 /// ```
2572 /// use std::sync::{Arc, Weak};
2573 ///
2574 /// let strong = Arc::new("hello".to_owned());
2575 ///
2576 /// let raw_1 = Arc::downgrade(&strong).into_raw();
2577 /// let raw_2 = Arc::downgrade(&strong).into_raw();
2578 ///
2579 /// assert_eq!(2, Arc::weak_count(&strong));
2580 ///
2581 /// assert_eq!("hello", &*unsafe { Weak::from_raw(raw_1) }.upgrade().unwrap());
2582 /// assert_eq!(1, Arc::weak_count(&strong));
2583 ///
2584 /// drop(strong);
2585 ///
2586 /// // Decrement the last weak count.
2587 /// assert!(unsafe { Weak::from_raw(raw_2) }.upgrade().is_none());
2588 /// ```
2589 ///
2590 /// [`new`]: Weak::new
2591 /// [`into_raw`]: Weak::into_raw
2592 /// [`upgrade`]: Weak::upgrade
2593 #[inline]
2594 #[stable(feature = "weak_into_raw", since = "1.45.0")]
2595 pub unsafe fn from_raw(ptr: *const T) -> Self {
2596 unsafe { Weak::from_raw_in(ptr, Global) }
2597 }
2598}
2599
2600impl<T: ?Sized, A: Allocator> Weak<T, A> {
2601 /// Returns a raw pointer to the object `T` pointed to by this `Weak<T>`.
2602 ///
2603 /// The pointer is valid only if there are some strong references. The pointer may be dangling,
2604 /// unaligned or even [`null`] otherwise.
2605 ///
2606 /// # Examples
2607 ///
2608 /// ```
2609 /// use std::sync::Arc;
2610 /// use std::ptr;
2611 ///
2612 /// let strong = Arc::new("hello".to_owned());
2613 /// let weak = Arc::downgrade(&strong);
2614 /// // Both point to the same object
2615 /// assert!(ptr::eq(&*strong, weak.as_ptr()));
2616 /// // The strong here keeps it alive, so we can still access the object.
2617 /// assert_eq!("hello", unsafe { &*weak.as_ptr() });
2618 ///
2619 /// drop(strong);
2620 /// // But not any more. We can do weak.as_ptr(), but accessing the pointer would lead to
2621 /// // undefined behaviour.
2622 /// // assert_eq!("hello", unsafe { &*weak.as_ptr() });
2623 /// ```
2624 ///
2625 /// [`null`]: core::ptr::null "ptr::null"
2626 #[must_use]
2627 #[stable(feature = "weak_into_raw", since = "1.45.0")]
2628 pub fn as_ptr(&self) -> *const T {
2629 let ptr: *mut ArcInner<T> = NonNull::as_ptr(self.ptr);
2630
2631 if is_dangling(ptr) {
2632 // If the pointer is dangling, we return the sentinel directly. This cannot be
2633 // a valid payload address, as the payload is at least as aligned as ArcInner (usize).
2634 ptr as *const T
2635 } else {
2636 // SAFETY: if is_dangling returns false, then the pointer is dereferenceable.
2637 // The payload may be dropped at this point, and we have to maintain provenance,
2638 // so use raw pointer manipulation.
2639 unsafe { ptr::addr_of_mut!((*ptr).data) }
2640 }
2641 }
2642
2643 /// Consumes the `Weak<T>` and turns it into a raw pointer.
2644 ///
2645 /// This converts the weak pointer into a raw pointer, while still preserving the ownership of
2646 /// one weak reference (the weak count is not modified by this operation). It can be turned
2647 /// back into the `Weak<T>` with [`from_raw`].
2648 ///
2649 /// The same restrictions of accessing the target of the pointer as with
2650 /// [`as_ptr`] apply.
2651 ///
2652 /// # Examples
2653 ///
2654 /// ```
2655 /// use std::sync::{Arc, Weak};
2656 ///
2657 /// let strong = Arc::new("hello".to_owned());
2658 /// let weak = Arc::downgrade(&strong);
2659 /// let raw = weak.into_raw();
2660 ///
2661 /// assert_eq!(1, Arc::weak_count(&strong));
2662 /// assert_eq!("hello", unsafe { &*raw });
2663 ///
2664 /// drop(unsafe { Weak::from_raw(raw) });
2665 /// assert_eq!(0, Arc::weak_count(&strong));
2666 /// ```
2667 ///
2668 /// [`from_raw`]: Weak::from_raw
2669 /// [`as_ptr`]: Weak::as_ptr
2670 #[must_use = "`self` will be dropped if the result is not used"]
2671 #[stable(feature = "weak_into_raw", since = "1.45.0")]
2672 pub fn into_raw(self) -> *const T {
2673 let result = self.as_ptr();
2674 mem::forget(self);
2675 result
2676 }
2677
2678 /// Converts a raw pointer previously created by [`into_raw`] back into `Weak<T>` in the provided
2679 /// allocator.
2680 ///
2681 /// This can be used to safely get a strong reference (by calling [`upgrade`]
2682 /// later) or to deallocate the weak count by dropping the `Weak<T>`.
2683 ///
2684 /// It takes ownership of one weak reference (with the exception of pointers created by [`new`],
2685 /// as these don't own anything; the method still works on them).
2686 ///
2687 /// # Safety
2688 ///
2689 /// The pointer must have originated from the [`into_raw`] and must still own its potential
2690 /// weak reference, and must point to a block of memory allocated by `alloc`.
2691 ///
2692 /// It is allowed for the strong count to be 0 at the time of calling this. Nevertheless, this
2693 /// takes ownership of one weak reference currently represented as a raw pointer (the weak
2694 /// count is not modified by this operation) and therefore it must be paired with a previous
2695 /// call to [`into_raw`].
2696 /// # Examples
2697 ///
2698 /// ```
2699 /// use std::sync::{Arc, Weak};
2700 ///
2701 /// let strong = Arc::new("hello".to_owned());
2702 ///
2703 /// let raw_1 = Arc::downgrade(&strong).into_raw();
2704 /// let raw_2 = Arc::downgrade(&strong).into_raw();
2705 ///
2706 /// assert_eq!(2, Arc::weak_count(&strong));
2707 ///
2708 /// assert_eq!("hello", &*unsafe { Weak::from_raw(raw_1) }.upgrade().unwrap());
2709 /// assert_eq!(1, Arc::weak_count(&strong));
2710 ///
2711 /// drop(strong);
2712 ///
2713 /// // Decrement the last weak count.
2714 /// assert!(unsafe { Weak::from_raw(raw_2) }.upgrade().is_none());
2715 /// ```
2716 ///
2717 /// [`new`]: Weak::new
2718 /// [`into_raw`]: Weak::into_raw
2719 /// [`upgrade`]: Weak::upgrade
2720 #[inline]
2721 #[unstable(feature = "allocator_api", issue = "32838")]
2722 pub unsafe fn from_raw_in(ptr: *const T, alloc: A) -> Self {
2723 // See Weak::as_ptr for context on how the input pointer is derived.
2724
2725 let ptr = if is_dangling(ptr) {
2726 // This is a dangling Weak.
2727 ptr as *mut ArcInner<T>
2728 } else {
2729 // Otherwise, we're guaranteed the pointer came from a nondangling Weak.
2730 // SAFETY: data_offset is safe to call, as ptr references a real (potentially dropped) T.
2731 let offset = unsafe { data_offset(ptr) };
2732 // Thus, we reverse the offset to get the whole RcBox.
2733 // SAFETY: the pointer originated from a Weak, so this offset is safe.
2734 unsafe { ptr.byte_sub(offset) as *mut ArcInner<T> }
2735 };
2736
2737 // SAFETY: we now have recovered the original Weak pointer, so can create the Weak.
2738 Weak { ptr: unsafe { NonNull::new_unchecked(ptr) }, alloc }
2739 }
2740}
2741
2742impl<T: ?Sized, A: Allocator> Weak<T, A> {
2743 /// Attempts to upgrade the `Weak` pointer to an [`Arc`], delaying
2744 /// dropping of the inner value if successful.
2745 ///
2746 /// Returns [`None`] if the inner value has since been dropped.
2747 ///
2748 /// # Examples
2749 ///
2750 /// ```
2751 /// use std::sync::Arc;
2752 ///
2753 /// let five = Arc::new(5);
2754 ///
2755 /// let weak_five = Arc::downgrade(&five);
2756 ///
2757 /// let strong_five: Option<Arc<_>> = weak_five.upgrade();
2758 /// assert!(strong_five.is_some());
2759 ///
2760 /// // Destroy all strong pointers.
2761 /// drop(strong_five);
2762 /// drop(five);
2763 ///
2764 /// assert!(weak_five.upgrade().is_none());
2765 /// ```
2766 #[must_use = "this returns a new `Arc`, \
2767 without modifying the original weak pointer"]
2768 #[stable(feature = "arc_weak", since = "1.4.0")]
2769 pub fn upgrade(&self) -> Option<Arc<T, A>>
2770 where
2771 A: Clone,
2772 {
2773 #[inline]
2774 fn checked_increment(n: usize) -> Option<usize> {
2775 // Any write of 0 we can observe leaves the field in permanently zero state.
2776 if n == 0 {
2777 return None;
2778 }
2779 // See comments in `Arc::clone` for why we do this (for `mem::forget`).
2780 assert!(n <= MAX_REFCOUNT, "{}", INTERNAL_OVERFLOW_ERROR);
2781 Some(n + 1)
2782 }
2783
2784 // We use a CAS loop to increment the strong count instead of a
2785 // fetch_add as this function should never take the reference count
2786 // from zero to one.
2787 //
2788 // Relaxed is fine for the failure case because we don't have any expectations about the new state.
2789 // Acquire is necessary for the success case to synchronise with `Arc::new_cyclic`, when the inner
2790 // value can be initialized after `Weak` references have already been created. In that case, we
2791 // expect to observe the fully initialized value.
2792 if self.inner()?.strong.fetch_update(Acquire, Relaxed, checked_increment).is_ok() {
2793 // SAFETY: pointer is not null, verified in checked_increment
2794 unsafe { Some(Arc::from_inner_in(self.ptr, self.alloc.clone())) }
2795 } else {
2796 None
2797 }
2798 }
2799
2800 /// Gets the number of strong (`Arc`) pointers pointing to this allocation.
2801 ///
2802 /// If `self` was created using [`Weak::new`], this will return 0.
2803 #[must_use]
2804 #[stable(feature = "weak_counts", since = "1.41.0")]
2805 pub fn strong_count(&self) -> usize {
2806 if let Some(inner) = self.inner() { inner.strong.load(Relaxed) } else { 0 }
2807 }
2808
2809 /// Gets an approximation of the number of `Weak` pointers pointing to this
2810 /// allocation.
2811 ///
2812 /// If `self` was created using [`Weak::new`], or if there are no remaining
2813 /// strong pointers, this will return 0.
2814 ///
2815 /// # Accuracy
2816 ///
2817 /// Due to implementation details, the returned value can be off by 1 in
2818 /// either direction when other threads are manipulating any `Arc`s or
2819 /// `Weak`s pointing to the same allocation.
2820 #[must_use]
2821 #[stable(feature = "weak_counts", since = "1.41.0")]
2822 pub fn weak_count(&self) -> usize {
2823 if let Some(inner) = self.inner() {
2824 let weak = inner.weak.load(Acquire);
2825 let strong = inner.strong.load(Relaxed);
2826 if strong == 0 {
2827 0
2828 } else {
2829 // Since we observed that there was at least one strong pointer
2830 // after reading the weak count, we know that the implicit weak
2831 // reference (present whenever any strong references are alive)
2832 // was still around when we observed the weak count, and can
2833 // therefore safely subtract it.
2834 weak - 1
2835 }
2836 } else {
2837 0
2838 }
2839 }
2840
2841 /// Returns `None` when the pointer is dangling and there is no allocated `ArcInner`,
2842 /// (i.e., when this `Weak` was created by `Weak::new`).
2843 #[inline]
2844 fn inner(&self) -> Option<WeakInner<'_>> {
2845 let ptr = self.ptr.as_ptr();
2846 if is_dangling(ptr) {
2847 None
2848 } else {
2849 // We are careful to *not* create a reference covering the "data" field, as
2850 // the field may be mutated concurrently (for example, if the last `Arc`
2851 // is dropped, the data field will be dropped in-place).
2852 Some(unsafe { WeakInner { strong: &(*ptr).strong, weak: &(*ptr).weak } })
2853 }
2854 }
2855
2856 /// Returns `true` if the two `Weak`s point to the same allocation similar to [`ptr::eq`], or if
2857 /// both don't point to any allocation (because they were created with `Weak::new()`). However,
2858 /// this function ignores the metadata of `dyn Trait` pointers.
2859 ///
2860 /// # Notes
2861 ///
2862 /// Since this compares pointers it means that `Weak::new()` will equal each
2863 /// other, even though they don't point to any allocation.
2864 ///
2865 /// # Examples
2866 ///
2867 /// ```
2868 /// use std::sync::Arc;
2869 ///
2870 /// let first_rc = Arc::new(5);
2871 /// let first = Arc::downgrade(&first_rc);
2872 /// let second = Arc::downgrade(&first_rc);
2873 ///
2874 /// assert!(first.ptr_eq(&second));
2875 ///
2876 /// let third_rc = Arc::new(5);
2877 /// let third = Arc::downgrade(&third_rc);
2878 ///
2879 /// assert!(!first.ptr_eq(&third));
2880 /// ```
2881 ///
2882 /// Comparing `Weak::new`.
2883 ///
2884 /// ```
2885 /// use std::sync::{Arc, Weak};
2886 ///
2887 /// let first = Weak::new();
2888 /// let second = Weak::new();
2889 /// assert!(first.ptr_eq(&second));
2890 ///
2891 /// let third_rc = Arc::new(());
2892 /// let third = Arc::downgrade(&third_rc);
2893 /// assert!(!first.ptr_eq(&third));
2894 /// ```
2895 ///
2896 /// [`ptr::eq`]: core::ptr::eq "ptr::eq"
2897 #[inline]
2898 #[must_use]
2899 #[stable(feature = "weak_ptr_eq", since = "1.39.0")]
2900 pub fn ptr_eq(&self, other: &Self) -> bool {
2901 ptr::addr_eq(self.ptr.as_ptr(), other.ptr.as_ptr())
2902 }
2903}
2904
2905#[stable(feature = "arc_weak", since = "1.4.0")]
2906impl<T: ?Sized, A: Allocator + Clone> Clone for Weak<T, A> {
2907 /// Makes a clone of the `Weak` pointer that points to the same allocation.
2908 ///
2909 /// # Examples
2910 ///
2911 /// ```
2912 /// use std::sync::{Arc, Weak};
2913 ///
2914 /// let weak_five = Arc::downgrade(&Arc::new(5));
2915 ///
2916 /// let _ = Weak::clone(&weak_five);
2917 /// ```
2918 #[inline]
2919 fn clone(&self) -> Weak<T, A> {
2920 if let Some(inner) = self.inner() {
2921 // See comments in Arc::clone() for why this is relaxed. This can use a
2922 // fetch_add (ignoring the lock) because the weak count is only locked
2923 // where are *no other* weak pointers in existence. (So we can't be
2924 // running this code in that case).
2925 let old_size = inner.weak.fetch_add(1, Relaxed);
2926
2927 // See comments in Arc::clone() for why we do this (for mem::forget).
2928 if old_size > MAX_REFCOUNT {
2929 abort();
2930 }
2931 }
2932
2933 Weak { ptr: self.ptr, alloc: self.alloc.clone() }
2934 }
2935}
2936
2937#[stable(feature = "downgraded_weak", since = "1.10.0")]
2938impl<T> Default for Weak<T> {
2939 /// Constructs a new `Weak<T>`, without allocating memory.
2940 /// Calling [`upgrade`] on the return value always
2941 /// gives [`None`].
2942 ///
2943 /// [`upgrade`]: Weak::upgrade
2944 ///
2945 /// # Examples
2946 ///
2947 /// ```
2948 /// use std::sync::Weak;
2949 ///
2950 /// let empty: Weak<i64> = Default::default();
2951 /// assert!(empty.upgrade().is_none());
2952 /// ```
2953 fn default() -> Weak<T> {
2954 Weak::new()
2955 }
2956}
2957
2958#[stable(feature = "arc_weak", since = "1.4.0")]
2959unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for Weak<T, A> {
2960 /// Drops the `Weak` pointer.
2961 ///
2962 /// # Examples
2963 ///
2964 /// ```
2965 /// use std::sync::{Arc, Weak};
2966 ///
2967 /// struct Foo;
2968 ///
2969 /// impl Drop for Foo {
2970 /// fn drop(&mut self) {
2971 /// println!("dropped!");
2972 /// }
2973 /// }
2974 ///
2975 /// let foo = Arc::new(Foo);
2976 /// let weak_foo = Arc::downgrade(&foo);
2977 /// let other_weak_foo = Weak::clone(&weak_foo);
2978 ///
2979 /// drop(weak_foo); // Doesn't print anything
2980 /// drop(foo); // Prints "dropped!"
2981 ///
2982 /// assert!(other_weak_foo.upgrade().is_none());
2983 /// ```
2984 fn drop(&mut self) {
2985 // If we find out that we were the last weak pointer, then its time to
2986 // deallocate the data entirely. See the discussion in Arc::drop() about
2987 // the memory orderings
2988 //
2989 // It's not necessary to check for the locked state here, because the
2990 // weak count can only be locked if there was precisely one weak ref,
2991 // meaning that drop could only subsequently run ON that remaining weak
2992 // ref, which can only happen after the lock is released.
2993 let inner = if let Some(inner) = self.inner() { inner } else { return };
2994
2995 if inner.weak.fetch_sub(1, Release) == 1 {
2996 acquire!(inner.weak);
2997 unsafe {
2998 self.alloc.deallocate(self.ptr.cast(), Layout::for_value_raw(self.ptr.as_ptr()))
2999 }
3000 }
3001 }
3002}
3003
3004#[stable(feature = "rust1", since = "1.0.0")]
3005trait ArcEqIdent<T: ?Sized + PartialEq, A: Allocator> {
3006 fn eq(&self, other: &Arc<T, A>) -> bool;
3007 fn ne(&self, other: &Arc<T, A>) -> bool;
3008}
3009
3010#[stable(feature = "rust1", since = "1.0.0")]
3011impl<T: ?Sized + PartialEq, A: Allocator> ArcEqIdent<T, A> for Arc<T, A> {
3012 #[inline]
3013 default fn eq(&self, other: &Arc<T, A>) -> bool {
3014 **self == **other
3015 }
3016 #[inline]
3017 default fn ne(&self, other: &Arc<T, A>) -> bool {
3018 **self != **other
3019 }
3020}
3021
3022/// We're doing this specialization here, and not as a more general optimization on `&T`, because it
3023/// would otherwise add a cost to all equality checks on refs. We assume that `Arc`s are used to
3024/// store large values, that are slow to clone, but also heavy to check for equality, causing this
3025/// cost to pay off more easily. It's also more likely to have two `Arc` clones, that point to
3026/// the same value, than two `&T`s.
3027///
3028/// We can only do this when `T: Eq` as a `PartialEq` might be deliberately irreflexive.
3029#[stable(feature = "rust1", since = "1.0.0")]
3030impl<T: ?Sized + crate::rc::MarkerEq, A: Allocator> ArcEqIdent<T, A> for Arc<T, A> {
3031 #[inline]
3032 fn eq(&self, other: &Arc<T, A>) -> bool {
3033 Arc::ptr_eq(self, other) || **self == **other
3034 }
3035
3036 #[inline]
3037 fn ne(&self, other: &Arc<T, A>) -> bool {
3038 !Arc::ptr_eq(self, other) && **self != **other
3039 }
3040}
3041
3042#[stable(feature = "rust1", since = "1.0.0")]
3043impl<T: ?Sized + PartialEq, A: Allocator> PartialEq for Arc<T, A> {
3044 /// Equality for two `Arc`s.
3045 ///
3046 /// Two `Arc`s are equal if their inner values are equal, even if they are
3047 /// stored in different allocation.
3048 ///
3049 /// If `T` also implements `Eq` (implying reflexivity of equality),
3050 /// two `Arc`s that point to the same allocation are always equal.
3051 ///
3052 /// # Examples
3053 ///
3054 /// ```
3055 /// use std::sync::Arc;
3056 ///
3057 /// let five = Arc::new(5);
3058 ///
3059 /// assert!(five == Arc::new(5));
3060 /// ```
3061 #[inline]
3062 fn eq(&self, other: &Arc<T, A>) -> bool {
3063 ArcEqIdent::eq(self, other)
3064 }
3065
3066 /// Inequality for two `Arc`s.
3067 ///
3068 /// Two `Arc`s are not equal if their inner values are not equal.
3069 ///
3070 /// If `T` also implements `Eq` (implying reflexivity of equality),
3071 /// two `Arc`s that point to the same value are always equal.
3072 ///
3073 /// # Examples
3074 ///
3075 /// ```
3076 /// use std::sync::Arc;
3077 ///
3078 /// let five = Arc::new(5);
3079 ///
3080 /// assert!(five != Arc::new(6));
3081 /// ```
3082 #[inline]
3083 fn ne(&self, other: &Arc<T, A>) -> bool {
3084 ArcEqIdent::ne(self, other)
3085 }
3086}
3087
3088#[stable(feature = "rust1", since = "1.0.0")]
3089impl<T: ?Sized + PartialOrd, A: Allocator> PartialOrd for Arc<T, A> {
3090 /// Partial comparison for two `Arc`s.
3091 ///
3092 /// The two are compared by calling `partial_cmp()` on their inner values.
3093 ///
3094 /// # Examples
3095 ///
3096 /// ```
3097 /// use std::sync::Arc;
3098 /// use std::cmp::Ordering;
3099 ///
3100 /// let five = Arc::new(5);
3101 ///
3102 /// assert_eq!(Some(Ordering::Less), five.partial_cmp(&Arc::new(6)));
3103 /// ```
3104 fn partial_cmp(&self, other: &Arc<T, A>) -> Option<Ordering> {
3105 (**self).partial_cmp(&**other)
3106 }
3107
3108 /// Less-than comparison for two `Arc`s.
3109 ///
3110 /// The two are compared by calling `<` on their inner values.
3111 ///
3112 /// # Examples
3113 ///
3114 /// ```
3115 /// use std::sync::Arc;
3116 ///
3117 /// let five = Arc::new(5);
3118 ///
3119 /// assert!(five < Arc::new(6));
3120 /// ```
3121 fn lt(&self, other: &Arc<T, A>) -> bool {
3122 *(*self) < *(*other)
3123 }
3124
3125 /// 'Less than or equal to' comparison for two `Arc`s.
3126 ///
3127 /// The two are compared by calling `<=` on their inner values.
3128 ///
3129 /// # Examples
3130 ///
3131 /// ```
3132 /// use std::sync::Arc;
3133 ///
3134 /// let five = Arc::new(5);
3135 ///
3136 /// assert!(five <= Arc::new(5));
3137 /// ```
3138 fn le(&self, other: &Arc<T, A>) -> bool {
3139 *(*self) <= *(*other)
3140 }
3141
3142 /// Greater-than comparison for two `Arc`s.
3143 ///
3144 /// The two are compared by calling `>` on their inner values.
3145 ///
3146 /// # Examples
3147 ///
3148 /// ```
3149 /// use std::sync::Arc;
3150 ///
3151 /// let five = Arc::new(5);
3152 ///
3153 /// assert!(five > Arc::new(4));
3154 /// ```
3155 fn gt(&self, other: &Arc<T, A>) -> bool {
3156 *(*self) > *(*other)
3157 }
3158
3159 /// 'Greater than or equal to' comparison for two `Arc`s.
3160 ///
3161 /// The two are compared by calling `>=` on their inner values.
3162 ///
3163 /// # Examples
3164 ///
3165 /// ```
3166 /// use std::sync::Arc;
3167 ///
3168 /// let five = Arc::new(5);
3169 ///
3170 /// assert!(five >= Arc::new(5));
3171 /// ```
3172 fn ge(&self, other: &Arc<T, A>) -> bool {
3173 *(*self) >= *(*other)
3174 }
3175}
3176#[stable(feature = "rust1", since = "1.0.0")]
3177impl<T: ?Sized + Ord, A: Allocator> Ord for Arc<T, A> {
3178 /// Comparison for two `Arc`s.
3179 ///
3180 /// The two are compared by calling `cmp()` on their inner values.
3181 ///
3182 /// # Examples
3183 ///
3184 /// ```
3185 /// use std::sync::Arc;
3186 /// use std::cmp::Ordering;
3187 ///
3188 /// let five = Arc::new(5);
3189 ///
3190 /// assert_eq!(Ordering::Less, five.cmp(&Arc::new(6)));
3191 /// ```
3192 fn cmp(&self, other: &Arc<T, A>) -> Ordering {
3193 (**self).cmp(&**other)
3194 }
3195}
3196#[stable(feature = "rust1", since = "1.0.0")]
3197impl<T: ?Sized + Eq, A: Allocator> Eq for Arc<T, A> {}
3198
3199#[stable(feature = "rust1", since = "1.0.0")]
3200impl<T: ?Sized + fmt::Display, A: Allocator> fmt::Display for Arc<T, A> {
3201 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3202 fmt::Display::fmt(&**self, f)
3203 }
3204}
3205
3206#[stable(feature = "rust1", since = "1.0.0")]
3207impl<T: ?Sized + fmt::Debug, A: Allocator> fmt::Debug for Arc<T, A> {
3208 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3209 fmt::Debug::fmt(&**self, f)
3210 }
3211}
3212
3213#[stable(feature = "rust1", since = "1.0.0")]
3214impl<T: ?Sized, A: Allocator> fmt::Pointer for Arc<T, A> {
3215 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3216 fmt::Pointer::fmt(&(&**self as *const T), f)
3217 }
3218}
3219
3220#[cfg(not(no_global_oom_handling))]
3221#[stable(feature = "rust1", since = "1.0.0")]
3222impl<T: Default> Default for Arc<T> {
3223 /// Creates a new `Arc<T>`, with the `Default` value for `T`.
3224 ///
3225 /// # Examples
3226 ///
3227 /// ```
3228 /// use std::sync::Arc;
3229 ///
3230 /// let x: Arc<i32> = Default::default();
3231 /// assert_eq!(*x, 0);
3232 /// ```
3233 fn default() -> Arc<T> {
3234 Arc::new(data:Default::default())
3235 }
3236}
3237
3238#[stable(feature = "rust1", since = "1.0.0")]
3239impl<T: ?Sized + Hash, A: Allocator> Hash for Arc<T, A> {
3240 fn hash<H: Hasher>(&self, state: &mut H) {
3241 (**self).hash(state)
3242 }
3243}
3244
3245#[cfg(not(no_global_oom_handling))]
3246#[stable(feature = "from_for_ptrs", since = "1.6.0")]
3247impl<T> From<T> for Arc<T> {
3248 /// Converts a `T` into an `Arc<T>`
3249 ///
3250 /// The conversion moves the value into a
3251 /// newly allocated `Arc`. It is equivalent to
3252 /// calling `Arc::new(t)`.
3253 ///
3254 /// # Example
3255 /// ```rust
3256 /// # use std::sync::Arc;
3257 /// let x = 5;
3258 /// let arc = Arc::new(5);
3259 ///
3260 /// assert_eq!(Arc::from(x), arc);
3261 /// ```
3262 fn from(t: T) -> Self {
3263 Arc::new(data:t)
3264 }
3265}
3266
3267#[cfg(not(no_global_oom_handling))]
3268#[stable(feature = "shared_from_array", since = "1.74.0")]
3269impl<T, const N: usize> From<[T; N]> for Arc<[T]> {
3270 /// Converts a [`[T; N]`](prim@array) into an `Arc<[T]>`.
3271 ///
3272 /// The conversion moves the array into a newly allocated `Arc`.
3273 ///
3274 /// # Example
3275 ///
3276 /// ```
3277 /// # use std::sync::Arc;
3278 /// let original: [i32; 3] = [1, 2, 3];
3279 /// let shared: Arc<[i32]> = Arc::from(original);
3280 /// assert_eq!(&[1, 2, 3], &shared[..]);
3281 /// ```
3282 #[inline]
3283 fn from(v: [T; N]) -> Arc<[T]> {
3284 Arc::<[T; N]>::from(v)
3285 }
3286}
3287
3288#[cfg(not(no_global_oom_handling))]
3289#[stable(feature = "shared_from_slice", since = "1.21.0")]
3290impl<T: Clone> From<&[T]> for Arc<[T]> {
3291 /// Allocate a reference-counted slice and fill it by cloning `v`'s items.
3292 ///
3293 /// # Example
3294 ///
3295 /// ```
3296 /// # use std::sync::Arc;
3297 /// let original: &[i32] = &[1, 2, 3];
3298 /// let shared: Arc<[i32]> = Arc::from(original);
3299 /// assert_eq!(&[1, 2, 3], &shared[..]);
3300 /// ```
3301 #[inline]
3302 fn from(v: &[T]) -> Arc<[T]> {
3303 <Self as ArcFromSlice<T>>::from_slice(v)
3304 }
3305}
3306
3307#[cfg(not(no_global_oom_handling))]
3308#[stable(feature = "shared_from_slice", since = "1.21.0")]
3309impl From<&str> for Arc<str> {
3310 /// Allocate a reference-counted `str` and copy `v` into it.
3311 ///
3312 /// # Example
3313 ///
3314 /// ```
3315 /// # use std::sync::Arc;
3316 /// let shared: Arc<str> = Arc::from("eggplant");
3317 /// assert_eq!("eggplant", &shared[..]);
3318 /// ```
3319 #[inline]
3320 fn from(v: &str) -> Arc<str> {
3321 let arc: Arc<[u8]> = Arc::<[u8]>::from(v.as_bytes());
3322 unsafe { Arc::from_raw(ptr:Arc::into_raw(this:arc) as *const str) }
3323 }
3324}
3325
3326#[cfg(not(no_global_oom_handling))]
3327#[stable(feature = "shared_from_slice", since = "1.21.0")]
3328impl From<String> for Arc<str> {
3329 /// Allocate a reference-counted `str` and copy `v` into it.
3330 ///
3331 /// # Example
3332 ///
3333 /// ```
3334 /// # use std::sync::Arc;
3335 /// let unique: String = "eggplant".to_owned();
3336 /// let shared: Arc<str> = Arc::from(unique);
3337 /// assert_eq!("eggplant", &shared[..]);
3338 /// ```
3339 #[inline]
3340 fn from(v: String) -> Arc<str> {
3341 Arc::from(&v[..])
3342 }
3343}
3344
3345#[cfg(not(no_global_oom_handling))]
3346#[stable(feature = "shared_from_slice", since = "1.21.0")]
3347impl<T: ?Sized, A: Allocator> From<Box<T, A>> for Arc<T, A> {
3348 /// Move a boxed object to a new, reference-counted allocation.
3349 ///
3350 /// # Example
3351 ///
3352 /// ```
3353 /// # use std::sync::Arc;
3354 /// let unique: Box<str> = Box::from("eggplant");
3355 /// let shared: Arc<str> = Arc::from(unique);
3356 /// assert_eq!("eggplant", &shared[..]);
3357 /// ```
3358 #[inline]
3359 fn from(v: Box<T, A>) -> Arc<T, A> {
3360 Arc::from_box_in(src:v)
3361 }
3362}
3363
3364#[cfg(not(no_global_oom_handling))]
3365#[stable(feature = "shared_from_slice", since = "1.21.0")]
3366impl<T, A: Allocator + Clone> From<Vec<T, A>> for Arc<[T], A> {
3367 /// Allocate a reference-counted slice and move `v`'s items into it.
3368 ///
3369 /// # Example
3370 ///
3371 /// ```
3372 /// # use std::sync::Arc;
3373 /// let unique: Vec<i32> = vec![1, 2, 3];
3374 /// let shared: Arc<[i32]> = Arc::from(unique);
3375 /// assert_eq!(&[1, 2, 3], &shared[..]);
3376 /// ```
3377 #[inline]
3378 fn from(v: Vec<T, A>) -> Arc<[T], A> {
3379 unsafe {
3380 let (vec_ptr, len, cap, alloc) = v.into_raw_parts_with_alloc();
3381
3382 let rc_ptr = Self::allocate_for_slice_in(len, &alloc);
3383 ptr::copy_nonoverlapping(vec_ptr, ptr::addr_of_mut!((*rc_ptr).data) as *mut T, len);
3384
3385 // Create a `Vec<T, &A>` with length 0, to deallocate the buffer
3386 // without dropping its contents or the allocator
3387 let _ = Vec::from_raw_parts_in(vec_ptr, 0, cap, &alloc);
3388
3389 Self::from_ptr_in(rc_ptr, alloc)
3390 }
3391 }
3392}
3393
3394#[stable(feature = "shared_from_cow", since = "1.45.0")]
3395impl<'a, B> From<Cow<'a, B>> for Arc<B>
3396where
3397 B: ToOwned + ?Sized,
3398 Arc<B>: From<&'a B> + From<B::Owned>,
3399{
3400 /// Create an atomically reference-counted pointer from
3401 /// a clone-on-write pointer by copying its content.
3402 ///
3403 /// # Example
3404 ///
3405 /// ```rust
3406 /// # use std::sync::Arc;
3407 /// # use std::borrow::Cow;
3408 /// let cow: Cow<'_, str> = Cow::Borrowed("eggplant");
3409 /// let shared: Arc<str> = Arc::from(cow);
3410 /// assert_eq!("eggplant", &shared[..]);
3411 /// ```
3412 #[inline]
3413 fn from(cow: Cow<'a, B>) -> Arc<B> {
3414 match cow {
3415 Cow::Borrowed(s: &B) => Arc::from(s),
3416 Cow::Owned(s: ::Owned) => Arc::from(s),
3417 }
3418 }
3419}
3420
3421#[stable(feature = "shared_from_str", since = "1.62.0")]
3422impl From<Arc<str>> for Arc<[u8]> {
3423 /// Converts an atomically reference-counted string slice into a byte slice.
3424 ///
3425 /// # Example
3426 ///
3427 /// ```
3428 /// # use std::sync::Arc;
3429 /// let string: Arc<str> = Arc::from("eggplant");
3430 /// let bytes: Arc<[u8]> = Arc::from(string);
3431 /// assert_eq!("eggplant".as_bytes(), bytes.as_ref());
3432 /// ```
3433 #[inline]
3434 fn from(rc: Arc<str>) -> Self {
3435 // SAFETY: `str` has the same layout as `[u8]`.
3436 unsafe { Arc::from_raw(ptr:Arc::into_raw(this:rc) as *const [u8]) }
3437 }
3438}
3439
3440#[stable(feature = "boxed_slice_try_from", since = "1.43.0")]
3441impl<T, A: Allocator, const N: usize> TryFrom<Arc<[T], A>> for Arc<[T; N], A> {
3442 type Error = Arc<[T], A>;
3443
3444 fn try_from(boxed_slice: Arc<[T], A>) -> Result<Self, Self::Error> {
3445 if boxed_slice.len() == N {
3446 let (ptr: NonNull>, alloc: A) = boxed_slice.internal_into_inner_with_allocator();
3447 Ok(unsafe { Arc::from_inner_in(ptr:ptr.cast(), alloc) })
3448 } else {
3449 Err(boxed_slice)
3450 }
3451 }
3452}
3453
3454#[cfg(not(no_global_oom_handling))]
3455#[stable(feature = "shared_from_iter", since = "1.37.0")]
3456impl<T> FromIterator<T> for Arc<[T]> {
3457 /// Takes each element in the `Iterator` and collects it into an `Arc<[T]>`.
3458 ///
3459 /// # Performance characteristics
3460 ///
3461 /// ## The general case
3462 ///
3463 /// In the general case, collecting into `Arc<[T]>` is done by first
3464 /// collecting into a `Vec<T>`. That is, when writing the following:
3465 ///
3466 /// ```rust
3467 /// # use std::sync::Arc;
3468 /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0).collect();
3469 /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
3470 /// ```
3471 ///
3472 /// this behaves as if we wrote:
3473 ///
3474 /// ```rust
3475 /// # use std::sync::Arc;
3476 /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0)
3477 /// .collect::<Vec<_>>() // The first set of allocations happens here.
3478 /// .into(); // A second allocation for `Arc<[T]>` happens here.
3479 /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
3480 /// ```
3481 ///
3482 /// This will allocate as many times as needed for constructing the `Vec<T>`
3483 /// and then it will allocate once for turning the `Vec<T>` into the `Arc<[T]>`.
3484 ///
3485 /// ## Iterators of known length
3486 ///
3487 /// When your `Iterator` implements `TrustedLen` and is of an exact size,
3488 /// a single allocation will be made for the `Arc<[T]>`. For example:
3489 ///
3490 /// ```rust
3491 /// # use std::sync::Arc;
3492 /// let evens: Arc<[u8]> = (0..10).collect(); // Just a single allocation happens here.
3493 /// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>());
3494 /// ```
3495 fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
3496 ToArcSlice::to_arc_slice(iter.into_iter())
3497 }
3498}
3499
3500#[cfg(not(no_global_oom_handling))]
3501/// Specialization trait used for collecting into `Arc<[T]>`.
3502trait ToArcSlice<T>: Iterator<Item = T> + Sized {
3503 fn to_arc_slice(self) -> Arc<[T]>;
3504}
3505
3506#[cfg(not(no_global_oom_handling))]
3507impl<T, I: Iterator<Item = T>> ToArcSlice<T> for I {
3508 default fn to_arc_slice(self) -> Arc<[T]> {
3509 self.collect::<Vec<T>>().into()
3510 }
3511}
3512
3513#[cfg(not(no_global_oom_handling))]
3514impl<T, I: iter::TrustedLen<Item = T>> ToArcSlice<T> for I {
3515 fn to_arc_slice(self) -> Arc<[T]> {
3516 // This is the case for a `TrustedLen` iterator.
3517 let (low, high) = self.size_hint();
3518 if let Some(high) = high {
3519 debug_assert_eq!(
3520 low,
3521 high,
3522 "TrustedLen iterator's size hint is not exact: {:?}",
3523 (low, high)
3524 );
3525
3526 unsafe {
3527 // SAFETY: We need to ensure that the iterator has an exact length and we have.
3528 Arc::from_iter_exact(self, low)
3529 }
3530 } else {
3531 // TrustedLen contract guarantees that `upper_bound == None` implies an iterator
3532 // length exceeding `usize::MAX`.
3533 // The default implementation would collect into a vec which would panic.
3534 // Thus we panic here immediately without invoking `Vec` code.
3535 panic!("capacity overflow");
3536 }
3537 }
3538}
3539
3540#[stable(feature = "rust1", since = "1.0.0")]
3541impl<T: ?Sized, A: Allocator> borrow::Borrow<T> for Arc<T, A> {
3542 fn borrow(&self) -> &T {
3543 &**self
3544 }
3545}
3546
3547#[stable(since = "1.5.0", feature = "smart_ptr_as_ref")]
3548impl<T: ?Sized, A: Allocator> AsRef<T> for Arc<T, A> {
3549 fn as_ref(&self) -> &T {
3550 &**self
3551 }
3552}
3553
3554#[stable(feature = "pin", since = "1.33.0")]
3555impl<T: ?Sized, A: Allocator> Unpin for Arc<T, A> {}
3556
3557/// Get the offset within an `ArcInner` for the payload behind a pointer.
3558///
3559/// # Safety
3560///
3561/// The pointer must point to (and have valid metadata for) a previously
3562/// valid instance of T, but the T is allowed to be dropped.
3563unsafe fn data_offset<T: ?Sized>(ptr: *const T) -> usize {
3564 // Align the unsized value to the end of the ArcInner.
3565 // Because RcBox is repr(C), it will always be the last field in memory.
3566 // SAFETY: since the only unsized types possible are slices, trait objects,
3567 // and extern types, the input safety requirement is currently enough to
3568 // satisfy the requirements of align_of_val_raw; this is an implementation
3569 // detail of the language that must not be relied upon outside of std.
3570 unsafe { data_offset_align(align_of_val_raw(val:ptr)) }
3571}
3572
3573#[inline]
3574fn data_offset_align(align: usize) -> usize {
3575 let layout: Layout = Layout::new::<ArcInner<()>>();
3576 layout.size() + layout.padding_needed_for(align)
3577}
3578
3579#[stable(feature = "arc_error", since = "1.52.0")]
3580impl<T: core::error::Error + ?Sized> core::error::Error for Arc<T> {
3581 #[allow(deprecated, deprecated_in_future)]
3582 fn description(&self) -> &str {
3583 core::error::Error::description(&**self)
3584 }
3585
3586 #[allow(deprecated)]
3587 fn cause(&self) -> Option<&dyn core::error::Error> {
3588 core::error::Error::cause(&**self)
3589 }
3590
3591 fn source(&self) -> Option<&(dyn core::error::Error + 'static)> {
3592 core::error::Error::source(&**self)
3593 }
3594
3595 fn provide<'a>(&'a self, req: &mut core::error::Request<'a>) {
3596 core::error::Error::provide(&**self, request:req);
3597 }
3598}
3599