1 | //! Vendored and stripped down version of triomphe |
2 | use std::{ |
3 | alloc::{self, Layout}, |
4 | cmp::Ordering, |
5 | hash::{Hash, Hasher}, |
6 | marker::PhantomData, |
7 | mem::{self, offset_of, ManuallyDrop}, |
8 | ops::Deref, |
9 | ptr, |
10 | sync::atomic::{ |
11 | self, |
12 | Ordering::{Acquire, Relaxed, Release}, |
13 | }, |
14 | }; |
15 | |
16 | /// A soft limit on the amount of references that may be made to an `Arc`. |
17 | /// |
18 | /// Going above this limit will abort your program (although not |
19 | /// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references. |
20 | const MAX_REFCOUNT: usize = (isize::MAX) as usize; |
21 | |
22 | /// The object allocated by an Arc<T> |
23 | #[repr (C)] |
24 | pub(crate) struct ArcInner<T: ?Sized> { |
25 | pub(crate) count: atomic::AtomicUsize, |
26 | pub(crate) data: T, |
27 | } |
28 | |
29 | unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {} |
30 | unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {} |
31 | |
32 | /// An atomically reference counted shared pointer |
33 | /// |
34 | /// See the documentation for [`Arc`] in the standard library. Unlike the |
35 | /// standard library `Arc`, this `Arc` does not support weak reference counting. |
36 | /// |
37 | /// [`Arc`]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html |
38 | #[repr (transparent)] |
39 | pub(crate) struct Arc<T: ?Sized> { |
40 | pub(crate) p: ptr::NonNull<ArcInner<T>>, |
41 | pub(crate) phantom: PhantomData<T>, |
42 | } |
43 | |
44 | unsafe impl<T: ?Sized + Sync + Send> Send for Arc<T> {} |
45 | unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {} |
46 | |
47 | impl<T> Arc<T> { |
48 | /// Reconstruct the Arc<T> from a raw pointer obtained from into_raw() |
49 | /// |
50 | /// Note: This raw pointer will be offset in the allocation and must be preceded |
51 | /// by the atomic count. |
52 | /// |
53 | /// It is recommended to use OffsetArc for this |
54 | #[inline ] |
55 | pub(crate) unsafe fn from_raw(ptr: *const T) -> Self { |
56 | // To find the corresponding pointer to the `ArcInner` we need |
57 | // to subtract the offset of the `data` field from the pointer. |
58 | let ptr: *const u8 = (ptr as *const u8).sub(count:offset_of!(ArcInner<T>, data)); |
59 | Arc { p: ptr::NonNull::new_unchecked(ptr as *mut ArcInner<T>), phantom: PhantomData } |
60 | } |
61 | } |
62 | |
63 | impl<T: ?Sized> Arc<T> { |
64 | #[inline ] |
65 | fn inner(&self) -> &ArcInner<T> { |
66 | // This unsafety is ok because while this arc is alive we're guaranteed |
67 | // that the inner pointer is valid. Furthermore, we know that the |
68 | // `ArcInner` structure itself is `Sync` because the inner data is |
69 | // `Sync` as well, so we're ok loaning out an immutable pointer to these |
70 | // contents. |
71 | unsafe { &*self.ptr() } |
72 | } |
73 | |
74 | // Non-inlined part of `drop`. Just invokes the destructor. |
75 | #[inline (never)] |
76 | unsafe fn drop_slow(&mut self) { |
77 | let _ = Box::from_raw(self.ptr()); |
78 | } |
79 | |
80 | /// Test pointer equality between the two Arcs, i.e. they must be the _same_ |
81 | /// allocation |
82 | #[inline ] |
83 | pub(crate) fn ptr_eq(this: &Self, other: &Self) -> bool { |
84 | std::ptr::addr_eq(this.ptr(), other.ptr()) |
85 | } |
86 | |
87 | pub(crate) fn ptr(&self) -> *mut ArcInner<T> { |
88 | self.p.as_ptr() |
89 | } |
90 | } |
91 | |
92 | impl<T: ?Sized> Clone for Arc<T> { |
93 | #[inline ] |
94 | fn clone(&self) -> Self { |
95 | // Using a relaxed ordering is alright here, as knowledge of the |
96 | // original reference prevents other threads from erroneously deleting |
97 | // the object. |
98 | // |
99 | // As explained in the [Boost documentation][1], Increasing the |
100 | // reference counter can always be done with memory_order_relaxed: New |
101 | // references to an object can only be formed from an existing |
102 | // reference, and passing an existing reference from one thread to |
103 | // another must already provide any required synchronization. |
104 | // |
105 | // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) |
106 | let old_size = self.inner().count.fetch_add(1, Relaxed); |
107 | |
108 | // However we need to guard against massive refcounts in case someone |
109 | // is `mem::forget`ing Arcs. If we don't do this the count can overflow |
110 | // and users will use-after free. We racily saturate to `isize::MAX` on |
111 | // the assumption that there aren't ~2 billion threads incrementing |
112 | // the reference count at once. This branch will never be taken in |
113 | // any realistic program. |
114 | // |
115 | // We abort because such a program is incredibly degenerate, and we |
116 | // don't care to support it. |
117 | if old_size > MAX_REFCOUNT { |
118 | std::process::abort(); |
119 | } |
120 | |
121 | unsafe { Arc { p: ptr::NonNull::new_unchecked(self.ptr()), phantom: PhantomData } } |
122 | } |
123 | } |
124 | |
125 | impl<T: ?Sized> Deref for Arc<T> { |
126 | type Target = T; |
127 | |
128 | #[inline ] |
129 | fn deref(&self) -> &T { |
130 | &self.inner().data |
131 | } |
132 | } |
133 | |
134 | impl<T: ?Sized> Arc<T> { |
135 | /// Provides mutable access to the contents _if_ the `Arc` is uniquely owned. |
136 | #[inline ] |
137 | pub(crate) fn get_mut(this: &mut Self) -> Option<&mut T> { |
138 | if this.is_unique() { |
139 | unsafe { |
140 | // See make_mut() for documentation of the threadsafety here. |
141 | Some(&mut (*this.ptr()).data) |
142 | } |
143 | } else { |
144 | None |
145 | } |
146 | } |
147 | |
148 | /// Whether or not the `Arc` is uniquely owned (is the refcount 1?). |
149 | pub(crate) fn is_unique(&self) -> bool { |
150 | // See the extensive discussion in [1] for why this needs to be Acquire. |
151 | // |
152 | // [1] https://github.com/servo/servo/issues/21186 |
153 | self.inner().count.load(order:Acquire) == 1 |
154 | } |
155 | } |
156 | |
157 | impl<T: ?Sized> Drop for Arc<T> { |
158 | #[inline ] |
159 | fn drop(&mut self) { |
160 | // Because `fetch_sub` is already atomic, we do not need to synchronize |
161 | // with other threads unless we are going to delete the object. |
162 | if self.inner().count.fetch_sub(1, Release) != 1 { |
163 | return; |
164 | } |
165 | |
166 | // FIXME(bholley): Use the updated comment when [2] is merged. |
167 | // |
168 | // This load is needed to prevent reordering of use of the data and |
169 | // deletion of the data. Because it is marked `Release`, the decreasing |
170 | // of the reference count synchronizes with this `Acquire` load. This |
171 | // means that use of the data happens before decreasing the reference |
172 | // count, which happens before this load, which happens before the |
173 | // deletion of the data. |
174 | // |
175 | // As explained in the [Boost documentation][1], |
176 | // |
177 | // > It is important to enforce any possible access to the object in one |
178 | // > thread (through an existing reference) to *happen before* deleting |
179 | // > the object in a different thread. This is achieved by a "release" |
180 | // > operation after dropping a reference (any access to the object |
181 | // > through this reference must obviously happened before), and an |
182 | // > "acquire" operation before deleting the object. |
183 | // |
184 | // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) |
185 | // [2]: https://github.com/rust-lang/rust/pull/41714 |
186 | self.inner().count.load(Acquire); |
187 | |
188 | unsafe { |
189 | self.drop_slow(); |
190 | } |
191 | } |
192 | } |
193 | |
194 | impl<T: ?Sized + PartialEq> PartialEq for Arc<T> { |
195 | fn eq(&self, other: &Arc<T>) -> bool { |
196 | Self::ptr_eq(self, other) || *(*self) == *(*other) |
197 | } |
198 | |
199 | fn ne(&self, other: &Arc<T>) -> bool { |
200 | !Self::ptr_eq(self, other) && *(*self) != *(*other) |
201 | } |
202 | } |
203 | |
204 | impl<T: ?Sized + PartialOrd> PartialOrd for Arc<T> { |
205 | fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> { |
206 | (**self).partial_cmp(&**other) |
207 | } |
208 | |
209 | fn lt(&self, other: &Arc<T>) -> bool { |
210 | *(*self) < *(*other) |
211 | } |
212 | |
213 | fn le(&self, other: &Arc<T>) -> bool { |
214 | *(*self) <= *(*other) |
215 | } |
216 | |
217 | fn gt(&self, other: &Arc<T>) -> bool { |
218 | *(*self) > *(*other) |
219 | } |
220 | |
221 | fn ge(&self, other: &Arc<T>) -> bool { |
222 | *(*self) >= *(*other) |
223 | } |
224 | } |
225 | |
226 | impl<T: ?Sized + Ord> Ord for Arc<T> { |
227 | fn cmp(&self, other: &Arc<T>) -> Ordering { |
228 | (**self).cmp(&**other) |
229 | } |
230 | } |
231 | |
232 | impl<T: ?Sized + Eq> Eq for Arc<T> {} |
233 | |
234 | impl<T: ?Sized + Hash> Hash for Arc<T> { |
235 | fn hash<H: Hasher>(&self, state: &mut H) { |
236 | (**self).hash(state) |
237 | } |
238 | } |
239 | |
240 | #[derive (Debug, Eq, PartialEq, Hash, PartialOrd)] |
241 | #[repr (C)] |
242 | pub(crate) struct HeaderSlice<H, T: ?Sized> { |
243 | pub(crate) header: H, |
244 | length: usize, |
245 | slice: T, |
246 | } |
247 | |
248 | impl<H, T> HeaderSlice<H, [T]> { |
249 | pub(crate) fn slice(&self) -> &[T] { |
250 | &self.slice |
251 | } |
252 | } |
253 | |
254 | impl<H, T> Deref for HeaderSlice<H, [T; 0]> { |
255 | type Target = HeaderSlice<H, [T]>; |
256 | |
257 | fn deref(&self) -> &Self::Target { |
258 | let len: usize = self.length; |
259 | let fake_slice: *const [T] = ptr::slice_from_raw_parts(self as *const _ as *const T, len); |
260 | unsafe { &*(fake_slice as *const HeaderSlice<H, [T]>) } |
261 | } |
262 | } |
263 | |
264 | /// A "thin" `Arc` containing dynamically sized data |
265 | /// |
266 | /// This is functionally equivalent to `Arc<(H, [T])>` |
267 | /// |
268 | /// When you create an `Arc` containing a dynamically sized type |
269 | /// like `HeaderSlice<H, [T]>`, the `Arc` is represented on the stack |
270 | /// as a "fat pointer", where the length of the slice is stored |
271 | /// alongside the `Arc`'s pointer. In some situations you may wish to |
272 | /// have a thin pointer instead, perhaps for FFI compatibility |
273 | /// or space efficiency. |
274 | /// |
275 | /// Note that we use `[T; 0]` in order to have the right alignment for `T`. |
276 | /// |
277 | /// `ThinArc` solves this by storing the length in the allocation itself, |
278 | /// via `HeaderSlice`. |
279 | #[repr (transparent)] |
280 | pub(crate) struct ThinArc<H, T> { |
281 | ptr: ptr::NonNull<ArcInner<HeaderSlice<H, [T; 0]>>>, |
282 | phantom: PhantomData<(H, T)>, |
283 | } |
284 | |
285 | unsafe impl<H: Sync + Send, T: Sync + Send> Send for ThinArc<H, T> {} |
286 | unsafe impl<H: Sync + Send, T: Sync + Send> Sync for ThinArc<H, T> {} |
287 | |
288 | // Synthesize a fat pointer from a thin pointer. |
289 | fn thin_to_thick<H, T>( |
290 | thin: *mut ArcInner<HeaderSlice<H, [T; 0]>>, |
291 | ) -> *mut ArcInner<HeaderSlice<H, [T]>> { |
292 | let len: usize = unsafe { (*thin).data.length }; |
293 | let fake_slice: *mut [T] = ptr::slice_from_raw_parts_mut(data:thin as *mut T, len); |
294 | // Transplants metadata. |
295 | fake_slice as *mut ArcInner<HeaderSlice<H, [T]>> |
296 | } |
297 | |
298 | impl<H, T> ThinArc<H, T> { |
299 | /// Temporarily converts |self| into a bonafide Arc and exposes it to the |
300 | /// provided callback. The refcount is not modified. |
301 | #[inline ] |
302 | pub(crate) fn with_arc<F, U>(&self, f: F) -> U |
303 | where |
304 | F: FnOnce(&Arc<HeaderSlice<H, [T]>>) -> U, |
305 | { |
306 | // Synthesize transient Arc, which never touches the refcount of the ArcInner. |
307 | let transient = unsafe { |
308 | ManuallyDrop::new(Arc { |
309 | p: ptr::NonNull::new_unchecked(thin_to_thick(self.ptr.as_ptr())), |
310 | phantom: PhantomData, |
311 | }) |
312 | }; |
313 | |
314 | // Expose the transient Arc to the callback, which may clone it if it wants. |
315 | let result = f(&transient); |
316 | |
317 | // Forward the result. |
318 | result |
319 | } |
320 | |
321 | /// Creates a `ThinArc` for a HeaderSlice using the given header struct and |
322 | /// iterator to generate the slice. |
323 | pub(crate) fn from_header_and_iter<I>(header: H, mut items: I) -> Self |
324 | where |
325 | I: Iterator<Item = T> + ExactSizeIterator, |
326 | { |
327 | assert_ne!(mem::size_of::<T>(), 0, "Need to think about ZST" ); |
328 | |
329 | let num_items = items.len(); |
330 | |
331 | // Offset of the start of the slice in the allocation. |
332 | let inner_to_data_offset = offset_of!(ArcInner<HeaderSlice<H, [T; 0]>>, data); |
333 | let data_to_slice_offset = offset_of!(HeaderSlice<H, [T; 0]>, slice); |
334 | let slice_offset = inner_to_data_offset + data_to_slice_offset; |
335 | |
336 | // Compute the size of the real payload. |
337 | let slice_size = mem::size_of::<T>().checked_mul(num_items).expect("size overflows" ); |
338 | let usable_size = slice_offset.checked_add(slice_size).expect("size overflows" ); |
339 | |
340 | // Round up size to alignment. |
341 | let align = mem::align_of::<ArcInner<HeaderSlice<H, [T; 0]>>>(); |
342 | let size = usable_size.wrapping_add(align - 1) & !(align - 1); |
343 | assert!(size >= usable_size, "size overflows" ); |
344 | let layout = Layout::from_size_align(size, align).expect("invalid layout" ); |
345 | |
346 | let ptr: *mut ArcInner<HeaderSlice<H, [T; 0]>>; |
347 | unsafe { |
348 | let buffer = alloc::alloc(layout); |
349 | |
350 | if buffer.is_null() { |
351 | alloc::handle_alloc_error(layout); |
352 | } |
353 | |
354 | // // Synthesize the fat pointer. We do this by claiming we have a direct |
355 | // // pointer to a [T], and then changing the type of the borrow. The key |
356 | // // point here is that the length portion of the fat pointer applies |
357 | // // only to the number of elements in the dynamically-sized portion of |
358 | // // the type, so the value will be the same whether it points to a [T] |
359 | // // or something else with a [T] as its last member. |
360 | // let fake_slice: &mut [T] = slice::from_raw_parts_mut(buffer as *mut T, num_items); |
361 | // ptr = fake_slice as *mut [T] as *mut ArcInner<HeaderSlice<H, [T]>>; |
362 | ptr = buffer as *mut _; |
363 | |
364 | let count = atomic::AtomicUsize::new(1); |
365 | |
366 | // Write the data. |
367 | // |
368 | // Note that any panics here (i.e. from the iterator) are safe, since |
369 | // we'll just leak the uninitialized memory. |
370 | ptr::write(ptr::addr_of_mut!((*ptr).count), count); |
371 | ptr::write(ptr::addr_of_mut!((*ptr).data.header), header); |
372 | ptr::write(ptr::addr_of_mut!((*ptr).data.length), num_items); |
373 | if num_items != 0 { |
374 | let mut current = ptr::addr_of_mut!((*ptr).data.slice) as *mut T; |
375 | debug_assert_eq!(current as usize - buffer as usize, slice_offset); |
376 | for _ in 0..num_items { |
377 | ptr::write( |
378 | current, |
379 | items.next().expect("ExactSizeIterator over-reported length" ), |
380 | ); |
381 | current = current.offset(1); |
382 | } |
383 | assert!(items.next().is_none(), "ExactSizeIterator under-reported length" ); |
384 | |
385 | // We should have consumed the buffer exactly. |
386 | debug_assert_eq!(current as *mut u8, buffer.add(usable_size)); |
387 | } |
388 | assert!(items.next().is_none(), "ExactSizeIterator under-reported length" ); |
389 | } |
390 | |
391 | ThinArc { ptr: unsafe { ptr::NonNull::new_unchecked(ptr) }, phantom: PhantomData } |
392 | } |
393 | } |
394 | |
395 | impl<H, T> Deref for ThinArc<H, T> { |
396 | type Target = HeaderSlice<H, [T]>; |
397 | |
398 | #[inline ] |
399 | fn deref(&self) -> &Self::Target { |
400 | unsafe { &(*thin_to_thick(self.ptr.as_ptr())).data } |
401 | } |
402 | } |
403 | |
404 | impl<H, T> Clone for ThinArc<H, T> { |
405 | #[inline ] |
406 | fn clone(&self) -> Self { |
407 | ThinArc::with_arc(self, |a: &Arc>| Arc::into_thin(a.clone())) |
408 | } |
409 | } |
410 | |
411 | impl<H, T> Drop for ThinArc<H, T> { |
412 | #[inline ] |
413 | fn drop(&mut self) { |
414 | let _ = Arc::from_thin(ThinArc { ptr: self.ptr, phantom: PhantomData }); |
415 | } |
416 | } |
417 | |
418 | impl<H, T> Arc<HeaderSlice<H, [T]>> { |
419 | /// Converts an `Arc` into a `ThinArc`. This consumes the `Arc`, so the refcount |
420 | /// is not modified. |
421 | #[inline ] |
422 | pub(crate) fn into_thin(a: Self) -> ThinArc<H, T> { |
423 | assert_eq!(a.length, a.slice.len(), "Length needs to be correct for ThinArc to work" ); |
424 | let fat_ptr: *mut ArcInner<HeaderSlice<H, [T]>> = a.ptr(); |
425 | mem::forget(a); |
426 | let thin_ptr = fat_ptr as *mut [usize] as *mut usize; |
427 | ThinArc { |
428 | ptr: unsafe { |
429 | ptr::NonNull::new_unchecked(thin_ptr as *mut ArcInner<HeaderSlice<H, [T; 0]>>) |
430 | }, |
431 | phantom: PhantomData, |
432 | } |
433 | } |
434 | |
435 | /// Converts a `ThinArc` into an `Arc`. This consumes the `ThinArc`, so the refcount |
436 | /// is not modified. |
437 | #[inline ] |
438 | pub(crate) fn from_thin(a: ThinArc<H, T>) -> Self { |
439 | let ptr = thin_to_thick(a.ptr.as_ptr()); |
440 | mem::forget(a); |
441 | unsafe { Arc { p: ptr::NonNull::new_unchecked(ptr), phantom: PhantomData } } |
442 | } |
443 | } |
444 | |
445 | impl<H: PartialEq, T: PartialEq> PartialEq for ThinArc<H, T> { |
446 | #[inline ] |
447 | fn eq(&self, other: &ThinArc<H, T>) -> bool { |
448 | **self == **other |
449 | } |
450 | } |
451 | |
452 | impl<H: Eq, T: Eq> Eq for ThinArc<H, T> {} |
453 | |
454 | impl<H: Hash, T: Hash> Hash for ThinArc<H, T> { |
455 | fn hash<HSR: Hasher>(&self, state: &mut HSR) { |
456 | (**self).hash(state) |
457 | } |
458 | } |
459 | |