1 | //! Vendored and stripped down version of triomphe |
2 | use std::{ |
3 | alloc::{self, Layout}, |
4 | cmp::Ordering, |
5 | hash::{Hash, Hasher}, |
6 | marker::PhantomData, |
7 | mem::{self, ManuallyDrop}, |
8 | ops::Deref, |
9 | ptr, |
10 | sync::atomic::{ |
11 | self, |
12 | Ordering::{Acquire, Relaxed, Release}, |
13 | }, |
14 | }; |
15 | |
16 | use memoffset::offset_of; |
17 | |
18 | /// A soft limit on the amount of references that may be made to an `Arc`. |
19 | /// |
20 | /// Going above this limit will abort your program (although not |
21 | /// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references. |
22 | const MAX_REFCOUNT: usize = (isize::MAX) as usize; |
23 | |
24 | /// The object allocated by an Arc<T> |
25 | #[repr (C)] |
26 | pub(crate) struct ArcInner<T: ?Sized> { |
27 | pub(crate) count: atomic::AtomicUsize, |
28 | pub(crate) data: T, |
29 | } |
30 | |
31 | unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {} |
32 | unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {} |
33 | |
34 | /// An atomically reference counted shared pointer |
35 | /// |
36 | /// See the documentation for [`Arc`] in the standard library. Unlike the |
37 | /// standard library `Arc`, this `Arc` does not support weak reference counting. |
38 | /// |
39 | /// [`Arc`]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html |
40 | #[repr (transparent)] |
41 | pub(crate) struct Arc<T: ?Sized> { |
42 | pub(crate) p: ptr::NonNull<ArcInner<T>>, |
43 | pub(crate) phantom: PhantomData<T>, |
44 | } |
45 | |
46 | unsafe impl<T: ?Sized + Sync + Send> Send for Arc<T> {} |
47 | unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {} |
48 | |
49 | impl<T> Arc<T> { |
50 | /// Reconstruct the Arc<T> from a raw pointer obtained from into_raw() |
51 | /// |
52 | /// Note: This raw pointer will be offset in the allocation and must be preceded |
53 | /// by the atomic count. |
54 | /// |
55 | /// It is recommended to use OffsetArc for this |
56 | #[inline ] |
57 | pub(crate) unsafe fn from_raw(ptr: *const T) -> Self { |
58 | // To find the corresponding pointer to the `ArcInner` we need |
59 | // to subtract the offset of the `data` field from the pointer. |
60 | let ptr: *const u8 = (ptr as *const u8).sub(count:offset_of!(ArcInner<T>, data)); |
61 | Arc { p: ptr::NonNull::new_unchecked(ptr as *mut ArcInner<T>), phantom: PhantomData } |
62 | } |
63 | } |
64 | |
65 | impl<T: ?Sized> Arc<T> { |
66 | #[inline ] |
67 | fn inner(&self) -> &ArcInner<T> { |
68 | // This unsafety is ok because while this arc is alive we're guaranteed |
69 | // that the inner pointer is valid. Furthermore, we know that the |
70 | // `ArcInner` structure itself is `Sync` because the inner data is |
71 | // `Sync` as well, so we're ok loaning out an immutable pointer to these |
72 | // contents. |
73 | unsafe { &*self.ptr() } |
74 | } |
75 | |
76 | // Non-inlined part of `drop`. Just invokes the destructor. |
77 | #[inline (never)] |
78 | unsafe fn drop_slow(&mut self) { |
79 | let _ = Box::from_raw(self.ptr()); |
80 | } |
81 | |
82 | /// Test pointer equality between the two Arcs, i.e. they must be the _same_ |
83 | /// allocation |
84 | #[inline ] |
85 | pub(crate) fn ptr_eq(this: &Self, other: &Self) -> bool { |
86 | this.ptr() == other.ptr() |
87 | } |
88 | |
89 | pub(crate) fn ptr(&self) -> *mut ArcInner<T> { |
90 | self.p.as_ptr() |
91 | } |
92 | } |
93 | |
94 | impl<T: ?Sized> Clone for Arc<T> { |
95 | #[inline ] |
96 | fn clone(&self) -> Self { |
97 | // Using a relaxed ordering is alright here, as knowledge of the |
98 | // original reference prevents other threads from erroneously deleting |
99 | // the object. |
100 | // |
101 | // As explained in the [Boost documentation][1], Increasing the |
102 | // reference counter can always be done with memory_order_relaxed: New |
103 | // references to an object can only be formed from an existing |
104 | // reference, and passing an existing reference from one thread to |
105 | // another must already provide any required synchronization. |
106 | // |
107 | // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) |
108 | let old_size = self.inner().count.fetch_add(1, Relaxed); |
109 | |
110 | // However we need to guard against massive refcounts in case someone |
111 | // is `mem::forget`ing Arcs. If we don't do this the count can overflow |
112 | // and users will use-after free. We racily saturate to `isize::MAX` on |
113 | // the assumption that there aren't ~2 billion threads incrementing |
114 | // the reference count at once. This branch will never be taken in |
115 | // any realistic program. |
116 | // |
117 | // We abort because such a program is incredibly degenerate, and we |
118 | // don't care to support it. |
119 | if old_size > MAX_REFCOUNT { |
120 | std::process::abort(); |
121 | } |
122 | |
123 | unsafe { Arc { p: ptr::NonNull::new_unchecked(self.ptr()), phantom: PhantomData } } |
124 | } |
125 | } |
126 | |
127 | impl<T: ?Sized> Deref for Arc<T> { |
128 | type Target = T; |
129 | |
130 | #[inline ] |
131 | fn deref(&self) -> &T { |
132 | &self.inner().data |
133 | } |
134 | } |
135 | |
136 | impl<T: ?Sized> Arc<T> { |
137 | /// Provides mutable access to the contents _if_ the `Arc` is uniquely owned. |
138 | #[inline ] |
139 | pub(crate) fn get_mut(this: &mut Self) -> Option<&mut T> { |
140 | if this.is_unique() { |
141 | unsafe { |
142 | // See make_mut() for documentation of the threadsafety here. |
143 | Some(&mut (*this.ptr()).data) |
144 | } |
145 | } else { |
146 | None |
147 | } |
148 | } |
149 | |
150 | /// Whether or not the `Arc` is uniquely owned (is the refcount 1?). |
151 | pub(crate) fn is_unique(&self) -> bool { |
152 | // See the extensive discussion in [1] for why this needs to be Acquire. |
153 | // |
154 | // [1] https://github.com/servo/servo/issues/21186 |
155 | self.inner().count.load(order:Acquire) == 1 |
156 | } |
157 | } |
158 | |
159 | impl<T: ?Sized> Drop for Arc<T> { |
160 | #[inline ] |
161 | fn drop(&mut self) { |
162 | // Because `fetch_sub` is already atomic, we do not need to synchronize |
163 | // with other threads unless we are going to delete the object. |
164 | if self.inner().count.fetch_sub(1, Release) != 1 { |
165 | return; |
166 | } |
167 | |
168 | // FIXME(bholley): Use the updated comment when [2] is merged. |
169 | // |
170 | // This load is needed to prevent reordering of use of the data and |
171 | // deletion of the data. Because it is marked `Release`, the decreasing |
172 | // of the reference count synchronizes with this `Acquire` load. This |
173 | // means that use of the data happens before decreasing the reference |
174 | // count, which happens before this load, which happens before the |
175 | // deletion of the data. |
176 | // |
177 | // As explained in the [Boost documentation][1], |
178 | // |
179 | // > It is important to enforce any possible access to the object in one |
180 | // > thread (through an existing reference) to *happen before* deleting |
181 | // > the object in a different thread. This is achieved by a "release" |
182 | // > operation after dropping a reference (any access to the object |
183 | // > through this reference must obviously happened before), and an |
184 | // > "acquire" operation before deleting the object. |
185 | // |
186 | // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) |
187 | // [2]: https://github.com/rust-lang/rust/pull/41714 |
188 | self.inner().count.load(Acquire); |
189 | |
190 | unsafe { |
191 | self.drop_slow(); |
192 | } |
193 | } |
194 | } |
195 | |
196 | impl<T: ?Sized + PartialEq> PartialEq for Arc<T> { |
197 | fn eq(&self, other: &Arc<T>) -> bool { |
198 | Self::ptr_eq(self, other) || *(*self) == *(*other) |
199 | } |
200 | |
201 | fn ne(&self, other: &Arc<T>) -> bool { |
202 | !Self::ptr_eq(self, other) && *(*self) != *(*other) |
203 | } |
204 | } |
205 | |
206 | impl<T: ?Sized + PartialOrd> PartialOrd for Arc<T> { |
207 | fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> { |
208 | (**self).partial_cmp(&**other) |
209 | } |
210 | |
211 | fn lt(&self, other: &Arc<T>) -> bool { |
212 | *(*self) < *(*other) |
213 | } |
214 | |
215 | fn le(&self, other: &Arc<T>) -> bool { |
216 | *(*self) <= *(*other) |
217 | } |
218 | |
219 | fn gt(&self, other: &Arc<T>) -> bool { |
220 | *(*self) > *(*other) |
221 | } |
222 | |
223 | fn ge(&self, other: &Arc<T>) -> bool { |
224 | *(*self) >= *(*other) |
225 | } |
226 | } |
227 | |
228 | impl<T: ?Sized + Ord> Ord for Arc<T> { |
229 | fn cmp(&self, other: &Arc<T>) -> Ordering { |
230 | (**self).cmp(&**other) |
231 | } |
232 | } |
233 | |
234 | impl<T: ?Sized + Eq> Eq for Arc<T> {} |
235 | |
236 | impl<T: ?Sized + Hash> Hash for Arc<T> { |
237 | fn hash<H: Hasher>(&self, state: &mut H) { |
238 | (**self).hash(state) |
239 | } |
240 | } |
241 | |
242 | #[derive (Debug, Eq, PartialEq, Hash, PartialOrd)] |
243 | #[repr (C)] |
244 | pub(crate) struct HeaderSlice<H, T: ?Sized> { |
245 | pub(crate) header: H, |
246 | length: usize, |
247 | slice: T, |
248 | } |
249 | |
250 | impl<H, T> HeaderSlice<H, [T]> { |
251 | pub(crate) fn slice(&self) -> &[T] { |
252 | &self.slice |
253 | } |
254 | } |
255 | |
256 | impl<H, T> Deref for HeaderSlice<H, [T; 0]> { |
257 | type Target = HeaderSlice<H, [T]>; |
258 | |
259 | fn deref(&self) -> &Self::Target { |
260 | unsafe { |
261 | let len: usize = self.length; |
262 | let fake_slice: *const [T] = |
263 | ptr::slice_from_raw_parts(self as *const _ as *const T, len); |
264 | &*(fake_slice as *const HeaderSlice<H, [T]>) |
265 | } |
266 | } |
267 | } |
268 | |
269 | /// A "thin" `Arc` containing dynamically sized data |
270 | /// |
271 | /// This is functionally equivalent to `Arc<(H, [T])>` |
272 | /// |
273 | /// When you create an `Arc` containing a dynamically sized type |
274 | /// like `HeaderSlice<H, [T]>`, the `Arc` is represented on the stack |
275 | /// as a "fat pointer", where the length of the slice is stored |
276 | /// alongside the `Arc`'s pointer. In some situations you may wish to |
277 | /// have a thin pointer instead, perhaps for FFI compatibility |
278 | /// or space efficiency. |
279 | /// |
280 | /// Note that we use `[T; 0]` in order to have the right alignment for `T`. |
281 | /// |
282 | /// `ThinArc` solves this by storing the length in the allocation itself, |
283 | /// via `HeaderSlice`. |
284 | #[repr (transparent)] |
285 | pub(crate) struct ThinArc<H, T> { |
286 | ptr: ptr::NonNull<ArcInner<HeaderSlice<H, [T; 0]>>>, |
287 | phantom: PhantomData<(H, T)>, |
288 | } |
289 | |
290 | unsafe impl<H: Sync + Send, T: Sync + Send> Send for ThinArc<H, T> {} |
291 | unsafe impl<H: Sync + Send, T: Sync + Send> Sync for ThinArc<H, T> {} |
292 | |
293 | // Synthesize a fat pointer from a thin pointer. |
294 | fn thin_to_thick<H, T>( |
295 | thin: *mut ArcInner<HeaderSlice<H, [T; 0]>>, |
296 | ) -> *mut ArcInner<HeaderSlice<H, [T]>> { |
297 | let len: usize = unsafe { (*thin).data.length }; |
298 | let fake_slice: *mut [T] = ptr::slice_from_raw_parts_mut(data:thin as *mut T, len); |
299 | // Transplants metadata. |
300 | fake_slice as *mut ArcInner<HeaderSlice<H, [T]>> |
301 | } |
302 | |
303 | impl<H, T> ThinArc<H, T> { |
304 | /// Temporarily converts |self| into a bonafide Arc and exposes it to the |
305 | /// provided callback. The refcount is not modified. |
306 | #[inline ] |
307 | pub(crate) fn with_arc<F, U>(&self, f: F) -> U |
308 | where |
309 | F: FnOnce(&Arc<HeaderSlice<H, [T]>>) -> U, |
310 | { |
311 | // Synthesize transient Arc, which never touches the refcount of the ArcInner. |
312 | let transient = unsafe { |
313 | ManuallyDrop::new(Arc { |
314 | p: ptr::NonNull::new_unchecked(thin_to_thick(self.ptr.as_ptr())), |
315 | phantom: PhantomData, |
316 | }) |
317 | }; |
318 | |
319 | // Expose the transient Arc to the callback, which may clone it if it wants. |
320 | let result = f(&transient); |
321 | |
322 | // Forward the result. |
323 | result |
324 | } |
325 | |
326 | /// Creates a `ThinArc` for a HeaderSlice using the given header struct and |
327 | /// iterator to generate the slice. |
328 | pub(crate) fn from_header_and_iter<I>(header: H, mut items: I) -> Self |
329 | where |
330 | I: Iterator<Item = T> + ExactSizeIterator, |
331 | { |
332 | assert_ne!(mem::size_of::<T>(), 0, "Need to think about ZST" ); |
333 | |
334 | let num_items = items.len(); |
335 | |
336 | // Offset of the start of the slice in the allocation. |
337 | let inner_to_data_offset = offset_of!(ArcInner<HeaderSlice<H, [T; 0]>>, data); |
338 | let data_to_slice_offset = offset_of!(HeaderSlice<H, [T; 0]>, slice); |
339 | let slice_offset = inner_to_data_offset + data_to_slice_offset; |
340 | |
341 | // Compute the size of the real payload. |
342 | let slice_size = mem::size_of::<T>().checked_mul(num_items).expect("size overflows" ); |
343 | let usable_size = slice_offset.checked_add(slice_size).expect("size overflows" ); |
344 | |
345 | // Round up size to alignment. |
346 | let align = mem::align_of::<ArcInner<HeaderSlice<H, [T; 0]>>>(); |
347 | let size = usable_size.wrapping_add(align - 1) & !(align - 1); |
348 | assert!(size >= usable_size, "size overflows" ); |
349 | let layout = Layout::from_size_align(size, align).expect("invalid layout" ); |
350 | |
351 | let ptr: *mut ArcInner<HeaderSlice<H, [T; 0]>>; |
352 | unsafe { |
353 | let buffer = alloc::alloc(layout); |
354 | |
355 | if buffer.is_null() { |
356 | alloc::handle_alloc_error(layout); |
357 | } |
358 | |
359 | // // Synthesize the fat pointer. We do this by claiming we have a direct |
360 | // // pointer to a [T], and then changing the type of the borrow. The key |
361 | // // point here is that the length portion of the fat pointer applies |
362 | // // only to the number of elements in the dynamically-sized portion of |
363 | // // the type, so the value will be the same whether it points to a [T] |
364 | // // or something else with a [T] as its last member. |
365 | // let fake_slice: &mut [T] = slice::from_raw_parts_mut(buffer as *mut T, num_items); |
366 | // ptr = fake_slice as *mut [T] as *mut ArcInner<HeaderSlice<H, [T]>>; |
367 | ptr = buffer as *mut _; |
368 | |
369 | let count = atomic::AtomicUsize::new(1); |
370 | |
371 | // Write the data. |
372 | // |
373 | // Note that any panics here (i.e. from the iterator) are safe, since |
374 | // we'll just leak the uninitialized memory. |
375 | ptr::write(ptr::addr_of_mut!((*ptr).count), count); |
376 | ptr::write(ptr::addr_of_mut!((*ptr).data.header), header); |
377 | ptr::write(ptr::addr_of_mut!((*ptr).data.length), num_items); |
378 | if num_items != 0 { |
379 | let mut current = ptr::addr_of_mut!((*ptr).data.slice) as *mut T; |
380 | debug_assert_eq!(current as usize - buffer as usize, slice_offset); |
381 | for _ in 0..num_items { |
382 | ptr::write( |
383 | current, |
384 | items.next().expect("ExactSizeIterator over-reported length" ), |
385 | ); |
386 | current = current.offset(1); |
387 | } |
388 | assert!(items.next().is_none(), "ExactSizeIterator under-reported length" ); |
389 | |
390 | // We should have consumed the buffer exactly. |
391 | debug_assert_eq!(current as *mut u8, buffer.add(usable_size)); |
392 | } |
393 | assert!(items.next().is_none(), "ExactSizeIterator under-reported length" ); |
394 | } |
395 | |
396 | ThinArc { ptr: unsafe { ptr::NonNull::new_unchecked(ptr) }, phantom: PhantomData } |
397 | } |
398 | } |
399 | |
400 | impl<H, T> Deref for ThinArc<H, T> { |
401 | type Target = HeaderSlice<H, [T]>; |
402 | |
403 | #[inline ] |
404 | fn deref(&self) -> &Self::Target { |
405 | unsafe { &(*thin_to_thick(self.ptr.as_ptr())).data } |
406 | } |
407 | } |
408 | |
409 | impl<H, T> Clone for ThinArc<H, T> { |
410 | #[inline ] |
411 | fn clone(&self) -> Self { |
412 | ThinArc::with_arc(self, |a: &Arc>| Arc::into_thin(a.clone())) |
413 | } |
414 | } |
415 | |
416 | impl<H, T> Drop for ThinArc<H, T> { |
417 | #[inline ] |
418 | fn drop(&mut self) { |
419 | let _ = Arc::from_thin(ThinArc { ptr: self.ptr, phantom: PhantomData }); |
420 | } |
421 | } |
422 | |
423 | impl<H, T> Arc<HeaderSlice<H, [T]>> { |
424 | /// Converts an `Arc` into a `ThinArc`. This consumes the `Arc`, so the refcount |
425 | /// is not modified. |
426 | #[inline ] |
427 | pub(crate) fn into_thin(a: Self) -> ThinArc<H, T> { |
428 | assert_eq!(a.length, a.slice.len(), "Length needs to be correct for ThinArc to work" ); |
429 | let fat_ptr: *mut ArcInner<HeaderSlice<H, [T]>> = a.ptr(); |
430 | mem::forget(a); |
431 | let thin_ptr = fat_ptr as *mut [usize] as *mut usize; |
432 | ThinArc { |
433 | ptr: unsafe { |
434 | ptr::NonNull::new_unchecked(thin_ptr as *mut ArcInner<HeaderSlice<H, [T; 0]>>) |
435 | }, |
436 | phantom: PhantomData, |
437 | } |
438 | } |
439 | |
440 | /// Converts a `ThinArc` into an `Arc`. This consumes the `ThinArc`, so the refcount |
441 | /// is not modified. |
442 | #[inline ] |
443 | pub(crate) fn from_thin(a: ThinArc<H, T>) -> Self { |
444 | let ptr = thin_to_thick(a.ptr.as_ptr()); |
445 | mem::forget(a); |
446 | unsafe { Arc { p: ptr::NonNull::new_unchecked(ptr), phantom: PhantomData } } |
447 | } |
448 | } |
449 | |
450 | impl<H: PartialEq, T: PartialEq> PartialEq for ThinArc<H, T> { |
451 | #[inline ] |
452 | fn eq(&self, other: &ThinArc<H, T>) -> bool { |
453 | **self == **other |
454 | } |
455 | } |
456 | |
457 | impl<H: Eq, T: Eq> Eq for ThinArc<H, T> {} |
458 | |
459 | impl<H: Hash, T: Hash> Hash for ThinArc<H, T> { |
460 | fn hash<HSR: Hasher>(&self, state: &mut HSR) { |
461 | (**self).hash(state) |
462 | } |
463 | } |
464 | |