1 | // Copyright 2023 The Fuchsia Authors |
2 | // |
3 | // Licensed under a BSD-style license <LICENSE-BSD>, Apache License, Version 2.0 |
4 | // <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT |
5 | // license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option. |
6 | // This file may not be copied, modified, or distributed except according to |
7 | // those terms. |
8 | |
9 | use core::{fmt, hash::Hash}; |
10 | |
11 | use super::*; |
12 | |
13 | /// A type with no alignment requirement. |
14 | /// |
15 | /// An `Unalign` wraps a `T`, removing any alignment requirement. `Unalign<T>` |
16 | /// has the same size and bit validity as `T`, but not necessarily the same |
17 | /// alignment [or ABI]. This is useful if a type with an alignment requirement |
18 | /// needs to be read from a chunk of memory which provides no alignment |
19 | /// guarantees. |
20 | /// |
21 | /// Since `Unalign` has no alignment requirement, the inner `T` may not be |
22 | /// properly aligned in memory. There are five ways to access the inner `T`: |
23 | /// - by value, using [`get`] or [`into_inner`] |
24 | /// - by reference inside of a callback, using [`update`] |
25 | /// - fallibly by reference, using [`try_deref`] or [`try_deref_mut`]; these can |
26 | /// fail if the `Unalign` does not satisfy `T`'s alignment requirement at |
27 | /// runtime |
28 | /// - unsafely by reference, using [`deref_unchecked`] or |
29 | /// [`deref_mut_unchecked`]; it is the caller's responsibility to ensure that |
30 | /// the `Unalign` satisfies `T`'s alignment requirement |
31 | /// - (where `T: Unaligned`) infallibly by reference, using [`Deref::deref`] or |
32 | /// [`DerefMut::deref_mut`] |
33 | /// |
34 | /// [or ABI]: https://github.com/google/zerocopy/issues/164 |
35 | /// [`get`]: Unalign::get |
36 | /// [`into_inner`]: Unalign::into_inner |
37 | /// [`update`]: Unalign::update |
38 | /// [`try_deref`]: Unalign::try_deref |
39 | /// [`try_deref_mut`]: Unalign::try_deref_mut |
40 | /// [`deref_unchecked`]: Unalign::deref_unchecked |
41 | /// [`deref_mut_unchecked`]: Unalign::deref_mut_unchecked |
42 | /// |
43 | /// # Example |
44 | /// |
45 | /// In this example, we need `EthernetFrame` to have no alignment requirement - |
46 | /// and thus implement [`Unaligned`]. `EtherType` is `#[repr(u16)]` and so |
47 | /// cannot implement `Unaligned`. We use `Unalign` to relax `EtherType`'s |
48 | /// alignment requirement so that `EthernetFrame` has no alignment requirement |
49 | /// and can implement `Unaligned`. |
50 | /// |
51 | /// ```rust |
52 | /// use zerocopy::*; |
53 | /// # use zerocopy_derive::*; |
54 | /// # #[derive(FromBytes, KnownLayout, Immutable, Unaligned)] #[repr (C)] struct Mac([u8; 6]); |
55 | /// |
56 | /// # #[derive(PartialEq, Copy, Clone, Debug)] |
57 | /// #[derive(TryFromBytes, KnownLayout, Immutable)] |
58 | /// #[repr(u16)] |
59 | /// enum EtherType { |
60 | /// Ipv4 = 0x0800u16.to_be(), |
61 | /// Arp = 0x0806u16.to_be(), |
62 | /// Ipv6 = 0x86DDu16.to_be(), |
63 | /// # /* |
64 | /// ... |
65 | /// # */ |
66 | /// } |
67 | /// |
68 | /// #[derive(TryFromBytes, KnownLayout, Immutable, Unaligned)] |
69 | /// #[repr(C)] |
70 | /// struct EthernetFrame { |
71 | /// src: Mac, |
72 | /// dst: Mac, |
73 | /// ethertype: Unalign<EtherType>, |
74 | /// payload: [u8], |
75 | /// } |
76 | /// |
77 | /// let bytes = &[ |
78 | /// # 0, 1, 2, 3, 4, 5, |
79 | /// # 6, 7, 8, 9, 10, 11, |
80 | /// # /* |
81 | /// ... |
82 | /// # */ |
83 | /// 0x86, 0xDD, // EtherType |
84 | /// 0xDE, 0xAD, 0xBE, 0xEF // Payload |
85 | /// ][..]; |
86 | /// |
87 | /// // PANICS: Guaranteed not to panic because `bytes` is of the right |
88 | /// // length, has the right contents, and `EthernetFrame` has no |
89 | /// // alignment requirement. |
90 | /// let packet = EthernetFrame::try_ref_from_bytes(&bytes).unwrap(); |
91 | /// |
92 | /// assert_eq!(packet.ethertype.get(), EtherType::Ipv6); |
93 | /// assert_eq!(packet.payload, [0xDE, 0xAD, 0xBE, 0xEF]); |
94 | /// ``` |
95 | /// |
96 | /// # Safety |
97 | /// |
98 | /// `Unalign<T>` is guaranteed to have the same size and bit validity as `T`, |
99 | /// and to have [`UnsafeCell`]s covering the same byte ranges as `T`. |
100 | /// `Unalign<T>` is guaranteed to have alignment 1. |
101 | // NOTE: This type is sound to use with types that need to be dropped. The |
102 | // reason is that the compiler-generated drop code automatically moves all |
103 | // values to aligned memory slots before dropping them in-place. This is not |
104 | // well-documented, but it's hinted at in places like [1] and [2]. However, this |
105 | // also means that `T` must be `Sized`; unless something changes, we can never |
106 | // support unsized `T`. [3] |
107 | // |
108 | // [1] https://github.com/rust-lang/rust/issues/54148#issuecomment-420529646 |
109 | // [2] https://github.com/google/zerocopy/pull/126#discussion_r1018512323 |
110 | // [3] https://github.com/google/zerocopy/issues/209 |
111 | #[allow (missing_debug_implementations)] |
112 | #[derive(Default, Copy)] |
113 | #[cfg_attr (any(feature = "derive" , test), derive(Immutable, FromBytes, IntoBytes, Unaligned))] |
114 | #[repr (C, packed)] |
115 | pub struct Unalign<T>(T); |
116 | |
117 | // We do not use `derive(KnownLayout)` on `Unalign`, because the derive is not |
118 | // smart enough to realize that `Unalign<T>` is always sized and thus emits a |
119 | // `KnownLayout` impl bounded on `T: KnownLayout.` This is overly restrictive. |
120 | impl_known_layout!(T => Unalign<T>); |
121 | |
122 | safety_comment! { |
123 | /// SAFETY: |
124 | /// - `Unalign<T>` promises to have alignment 1, and so we don't require |
125 | /// that `T: Unaligned`. |
126 | /// - `Unalign<T>` has the same bit validity as `T`, and so it is |
127 | /// `FromZeros`, `FromBytes`, or `IntoBytes` exactly when `T` is as well. |
128 | /// - `Immutable`: `Unalign<T>` has the same fields as `T`, so it contains |
129 | /// `UnsafeCell`s exactly when `T` does. |
130 | /// - `TryFromBytes`: `Unalign<T>` has the same the same bit validity as |
131 | /// `T`, so `T::is_bit_valid` is a sound implementation of `is_bit_valid`. |
132 | /// Furthermore: |
133 | /// - Since `T` and `Unalign<T>` have the same layout, they have the same |
134 | /// size (as required by `unsafe_impl!`). |
135 | /// - Since `T` and `Unalign<T>` have the same fields, they have |
136 | /// `UnsafeCell`s at the same byte ranges (as required by |
137 | /// `unsafe_impl!`). |
138 | impl_or_verify!(T => Unaligned for Unalign<T>); |
139 | impl_or_verify!(T: Immutable => Immutable for Unalign<T>); |
140 | impl_or_verify!( |
141 | T: TryFromBytes => TryFromBytes for Unalign<T>; |
142 | |c: Maybe<T>| T::is_bit_valid(c) |
143 | ); |
144 | impl_or_verify!(T: FromZeros => FromZeros for Unalign<T>); |
145 | impl_or_verify!(T: FromBytes => FromBytes for Unalign<T>); |
146 | impl_or_verify!(T: IntoBytes => IntoBytes for Unalign<T>); |
147 | } |
148 | |
149 | // Note that `Unalign: Clone` only if `T: Copy`. Since the inner `T` may not be |
150 | // aligned, there's no way to safely call `T::clone`, and so a `T: Clone` bound |
151 | // is not sufficient to implement `Clone` for `Unalign`. |
152 | impl<T: Copy> Clone for Unalign<T> { |
153 | #[inline (always)] |
154 | fn clone(&self) -> Unalign<T> { |
155 | *self |
156 | } |
157 | } |
158 | |
159 | impl<T> Unalign<T> { |
160 | /// Constructs a new `Unalign`. |
161 | #[inline (always)] |
162 | pub const fn new(val: T) -> Unalign<T> { |
163 | Unalign(val) |
164 | } |
165 | |
166 | /// Consumes `self`, returning the inner `T`. |
167 | #[inline (always)] |
168 | pub const fn into_inner(self) -> T { |
169 | // SAFETY: Since `Unalign` is `#[repr(C, packed)]`, it has the same size |
170 | // and bit validity as `T`. |
171 | // |
172 | // We do this instead of just destructuring in order to prevent |
173 | // `Unalign`'s `Drop::drop` from being run, since dropping is not |
174 | // supported in `const fn`s. |
175 | // |
176 | // TODO(https://github.com/rust-lang/rust/issues/73255): Destructure |
177 | // instead of using unsafe. |
178 | unsafe { crate::util::transmute_unchecked(self) } |
179 | } |
180 | |
181 | /// Attempts to return a reference to the wrapped `T`, failing if `self` is |
182 | /// not properly aligned. |
183 | /// |
184 | /// If `self` does not satisfy `align_of::<T>()`, then `try_deref` returns |
185 | /// `Err`. |
186 | /// |
187 | /// If `T: Unaligned`, then `Unalign<T>` implements [`Deref`], and callers |
188 | /// may prefer [`Deref::deref`], which is infallible. |
189 | #[inline (always)] |
190 | pub fn try_deref(&self) -> Result<&T, AlignmentError<&Self, T>> { |
191 | let inner = Ptr::from_ref(self).transparent_wrapper_into_inner(); |
192 | match inner.bikeshed_try_into_aligned() { |
193 | Ok(aligned) => Ok(aligned.as_ref()), |
194 | Err(err) => Err(err.map_src(|src| src.into_unalign().as_ref())), |
195 | } |
196 | } |
197 | |
198 | /// Attempts to return a mutable reference to the wrapped `T`, failing if |
199 | /// `self` is not properly aligned. |
200 | /// |
201 | /// If `self` does not satisfy `align_of::<T>()`, then `try_deref` returns |
202 | /// `Err`. |
203 | /// |
204 | /// If `T: Unaligned`, then `Unalign<T>` implements [`DerefMut`], and |
205 | /// callers may prefer [`DerefMut::deref_mut`], which is infallible. |
206 | #[inline (always)] |
207 | pub fn try_deref_mut(&mut self) -> Result<&mut T, AlignmentError<&mut Self, T>> { |
208 | let inner = Ptr::from_mut(self).transparent_wrapper_into_inner(); |
209 | match inner.bikeshed_try_into_aligned() { |
210 | Ok(aligned) => Ok(aligned.as_mut()), |
211 | Err(err) => Err(err.map_src(|src| src.into_unalign().as_mut())), |
212 | } |
213 | } |
214 | |
215 | /// Returns a reference to the wrapped `T` without checking alignment. |
216 | /// |
217 | /// If `T: Unaligned`, then `Unalign<T>` implements[ `Deref`], and callers |
218 | /// may prefer [`Deref::deref`], which is safe. |
219 | /// |
220 | /// # Safety |
221 | /// |
222 | /// The caller must guarantee that `self` satisfies `align_of::<T>()`. |
223 | #[inline (always)] |
224 | pub const unsafe fn deref_unchecked(&self) -> &T { |
225 | // SAFETY: `Unalign<T>` is `repr(transparent)`, so there is a valid `T` |
226 | // at the same memory location as `self`. It has no alignment guarantee, |
227 | // but the caller has promised that `self` is properly aligned, so we |
228 | // know that it is sound to create a reference to `T` at this memory |
229 | // location. |
230 | // |
231 | // We use `mem::transmute` instead of `&*self.get_ptr()` because |
232 | // dereferencing pointers is not stable in `const` on our current MSRV |
233 | // (1.56 as of this writing). |
234 | unsafe { mem::transmute(self) } |
235 | } |
236 | |
237 | /// Returns a mutable reference to the wrapped `T` without checking |
238 | /// alignment. |
239 | /// |
240 | /// If `T: Unaligned`, then `Unalign<T>` implements[ `DerefMut`], and |
241 | /// callers may prefer [`DerefMut::deref_mut`], which is safe. |
242 | /// |
243 | /// # Safety |
244 | /// |
245 | /// The caller must guarantee that `self` satisfies `align_of::<T>()`. |
246 | #[inline (always)] |
247 | pub unsafe fn deref_mut_unchecked(&mut self) -> &mut T { |
248 | // SAFETY: `self.get_mut_ptr()` returns a raw pointer to a valid `T` at |
249 | // the same memory location as `self`. It has no alignment guarantee, |
250 | // but the caller has promised that `self` is properly aligned, so we |
251 | // know that the pointer itself is aligned, and thus that it is sound to |
252 | // create a reference to a `T` at this memory location. |
253 | unsafe { &mut *self.get_mut_ptr() } |
254 | } |
255 | |
256 | /// Gets an unaligned raw pointer to the inner `T`. |
257 | /// |
258 | /// # Safety |
259 | /// |
260 | /// The returned raw pointer is not necessarily aligned to |
261 | /// `align_of::<T>()`. Most functions which operate on raw pointers require |
262 | /// those pointers to be aligned, so calling those functions with the result |
263 | /// of `get_ptr` will result in undefined behavior if alignment is not |
264 | /// guaranteed using some out-of-band mechanism. In general, the only |
265 | /// functions which are safe to call with this pointer are those which are |
266 | /// explicitly documented as being sound to use with an unaligned pointer, |
267 | /// such as [`read_unaligned`]. |
268 | /// |
269 | /// Even if the caller is permitted to mutate `self` (e.g. they have |
270 | /// ownership or a mutable borrow), it is not guaranteed to be sound to |
271 | /// write through the returned pointer. If writing is required, prefer |
272 | /// [`get_mut_ptr`] instead. |
273 | /// |
274 | /// [`read_unaligned`]: core::ptr::read_unaligned |
275 | /// [`get_mut_ptr`]: Unalign::get_mut_ptr |
276 | #[inline (always)] |
277 | pub const fn get_ptr(&self) -> *const T { |
278 | ptr::addr_of!(self.0) |
279 | } |
280 | |
281 | /// Gets an unaligned mutable raw pointer to the inner `T`. |
282 | /// |
283 | /// # Safety |
284 | /// |
285 | /// The returned raw pointer is not necessarily aligned to |
286 | /// `align_of::<T>()`. Most functions which operate on raw pointers require |
287 | /// those pointers to be aligned, so calling those functions with the result |
288 | /// of `get_ptr` will result in undefined behavior if alignment is not |
289 | /// guaranteed using some out-of-band mechanism. In general, the only |
290 | /// functions which are safe to call with this pointer are those which are |
291 | /// explicitly documented as being sound to use with an unaligned pointer, |
292 | /// such as [`read_unaligned`]. |
293 | /// |
294 | /// [`read_unaligned`]: core::ptr::read_unaligned |
295 | // TODO(https://github.com/rust-lang/rust/issues/57349): Make this `const`. |
296 | #[inline (always)] |
297 | pub fn get_mut_ptr(&mut self) -> *mut T { |
298 | ptr::addr_of_mut!(self.0) |
299 | } |
300 | |
301 | /// Sets the inner `T`, dropping the previous value. |
302 | // TODO(https://github.com/rust-lang/rust/issues/57349): Make this `const`. |
303 | #[inline (always)] |
304 | pub fn set(&mut self, t: T) { |
305 | *self = Unalign::new(t); |
306 | } |
307 | |
308 | /// Updates the inner `T` by calling a function on it. |
309 | /// |
310 | /// If [`T: Unaligned`], then `Unalign<T>` implements [`DerefMut`], and that |
311 | /// impl should be preferred over this method when performing updates, as it |
312 | /// will usually be faster and more ergonomic. |
313 | /// |
314 | /// For large types, this method may be expensive, as it requires copying |
315 | /// `2 * size_of::<T>()` bytes. \[1\] |
316 | /// |
317 | /// \[1\] Since the inner `T` may not be aligned, it would not be sound to |
318 | /// invoke `f` on it directly. Instead, `update` moves it into a |
319 | /// properly-aligned location in the local stack frame, calls `f` on it, and |
320 | /// then moves it back to its original location in `self`. |
321 | /// |
322 | /// [`T: Unaligned`]: Unaligned |
323 | #[inline ] |
324 | pub fn update<O, F: FnOnce(&mut T) -> O>(&mut self, f: F) -> O { |
325 | if mem::align_of::<T>() == 1 { |
326 | // While we advise callers to use `DerefMut` when `T: Unaligned`, |
327 | // not all callers will be able to guarantee `T: Unaligned` in all |
328 | // cases. In particular, callers who are themselves providing an API |
329 | // which is generic over `T` may sometimes be called by *their* |
330 | // callers with `T` such that `align_of::<T>() == 1`, but cannot |
331 | // guarantee this in the general case. Thus, this optimization may |
332 | // sometimes be helpful. |
333 | |
334 | // SAFETY: Since `T`'s alignment is 1, `self` satisfies its |
335 | // alignment by definition. |
336 | let t = unsafe { self.deref_mut_unchecked() }; |
337 | return f(t); |
338 | } |
339 | |
340 | // On drop, this moves `copy` out of itself and uses `ptr::write` to |
341 | // overwrite `slf`. |
342 | struct WriteBackOnDrop<T> { |
343 | copy: ManuallyDrop<T>, |
344 | slf: *mut Unalign<T>, |
345 | } |
346 | |
347 | impl<T> Drop for WriteBackOnDrop<T> { |
348 | fn drop(&mut self) { |
349 | // SAFETY: We never use `copy` again as required by |
350 | // `ManuallyDrop::take`. |
351 | let copy = unsafe { ManuallyDrop::take(&mut self.copy) }; |
352 | // SAFETY: `slf` is the raw pointer value of `self`. We know it |
353 | // is valid for writes and properly aligned because `self` is a |
354 | // mutable reference, which guarantees both of these properties. |
355 | unsafe { ptr::write(self.slf, Unalign::new(copy)) }; |
356 | } |
357 | } |
358 | |
359 | // SAFETY: We know that `self` is valid for reads, properly aligned, and |
360 | // points to an initialized `Unalign<T>` because it is a mutable |
361 | // reference, which guarantees all of these properties. |
362 | // |
363 | // Since `T: !Copy`, it would be unsound in the general case to allow |
364 | // both the original `Unalign<T>` and the copy to be used by safe code. |
365 | // We guarantee that the copy is used to overwrite the original in the |
366 | // `Drop::drop` impl of `WriteBackOnDrop`. So long as this `drop` is |
367 | // called before any other safe code executes, soundness is upheld. |
368 | // While this method can terminate in two ways (by returning normally or |
369 | // by unwinding due to a panic in `f`), in both cases, `write_back` is |
370 | // dropped - and its `drop` called - before any other safe code can |
371 | // execute. |
372 | let copy = unsafe { ptr::read(self) }.into_inner(); |
373 | let mut write_back = WriteBackOnDrop { copy: ManuallyDrop::new(copy), slf: self }; |
374 | |
375 | let ret = f(&mut write_back.copy); |
376 | |
377 | drop(write_back); |
378 | ret |
379 | } |
380 | } |
381 | |
382 | impl<T: Copy> Unalign<T> { |
383 | /// Gets a copy of the inner `T`. |
384 | // TODO(https://github.com/rust-lang/rust/issues/57349): Make this `const`. |
385 | #[inline (always)] |
386 | pub fn get(&self) -> T { |
387 | let Unalign(val: T) = *self; |
388 | val |
389 | } |
390 | } |
391 | |
392 | impl<T: Unaligned> Deref for Unalign<T> { |
393 | type Target = T; |
394 | |
395 | #[inline (always)] |
396 | fn deref(&self) -> &T { |
397 | Ptr::from_ref(self).transparent_wrapper_into_inner().bikeshed_recall_aligned().as_ref() |
398 | } |
399 | } |
400 | |
401 | impl<T: Unaligned> DerefMut for Unalign<T> { |
402 | #[inline (always)] |
403 | fn deref_mut(&mut self) -> &mut T { |
404 | Ptr::from_mut(self).transparent_wrapper_into_inner().bikeshed_recall_aligned().as_mut() |
405 | } |
406 | } |
407 | |
408 | impl<T: Unaligned + PartialOrd> PartialOrd<Unalign<T>> for Unalign<T> { |
409 | #[inline (always)] |
410 | fn partial_cmp(&self, other: &Unalign<T>) -> Option<Ordering> { |
411 | PartialOrd::partial_cmp(self.deref(), other.deref()) |
412 | } |
413 | } |
414 | |
415 | impl<T: Unaligned + Ord> Ord for Unalign<T> { |
416 | #[inline (always)] |
417 | fn cmp(&self, other: &Unalign<T>) -> Ordering { |
418 | Ord::cmp(self.deref(), other.deref()) |
419 | } |
420 | } |
421 | |
422 | impl<T: Unaligned + PartialEq> PartialEq<Unalign<T>> for Unalign<T> { |
423 | #[inline (always)] |
424 | fn eq(&self, other: &Unalign<T>) -> bool { |
425 | PartialEq::eq(self.deref(), other.deref()) |
426 | } |
427 | } |
428 | |
429 | impl<T: Unaligned + Eq> Eq for Unalign<T> {} |
430 | |
431 | impl<T: Unaligned + Hash> Hash for Unalign<T> { |
432 | #[inline (always)] |
433 | fn hash<H>(&self, state: &mut H) |
434 | where |
435 | H: Hasher, |
436 | { |
437 | self.deref().hash(state); |
438 | } |
439 | } |
440 | |
441 | impl<T: Unaligned + Debug> Debug for Unalign<T> { |
442 | #[inline (always)] |
443 | fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { |
444 | Debug::fmt(self.deref(), f) |
445 | } |
446 | } |
447 | |
448 | impl<T: Unaligned + Display> Display for Unalign<T> { |
449 | #[inline (always)] |
450 | fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { |
451 | Display::fmt(self.deref(), f) |
452 | } |
453 | } |
454 | |
455 | /// A wrapper type to construct uninitialized instances of `T`. |
456 | /// |
457 | /// `MaybeUninit` is identical to the [standard library |
458 | /// `MaybeUninit`][core-maybe-uninit] type except that it supports unsized |
459 | /// types. |
460 | /// |
461 | /// # Layout |
462 | /// |
463 | /// The same layout guarantees and caveats apply to `MaybeUninit<T>` as apply to |
464 | /// the [standard library `MaybeUninit`][core-maybe-uninit] with one exception: |
465 | /// for `T: !Sized`, there is no single value for `T`'s size. Instead, for such |
466 | /// types, the following are guaranteed: |
467 | /// - Every [valid size][valid-size] for `T` is a valid size for |
468 | /// `MaybeUninit<T>` and vice versa |
469 | /// - Given `t: *const T` and `m: *const MaybeUninit<T>` with identical fat |
470 | /// pointer metadata, `t` and `m` address the same number of bytes (and |
471 | /// likewise for `*mut`) |
472 | /// |
473 | /// [core-maybe-uninit]: core::mem::MaybeUninit |
474 | /// [valid-size]: crate::KnownLayout#what-is-a-valid-size |
475 | #[repr (transparent)] |
476 | #[doc (hidden)] |
477 | pub struct MaybeUninit<T: ?Sized + KnownLayout>( |
478 | // SAFETY: `MaybeUninit<T>` has the same size as `T`, because (by invariant |
479 | // on `T::MaybeUninit`) `T::MaybeUninit` has `T::LAYOUT` identical to `T`, |
480 | // and because (invariant on `T::LAYOUT`) we can trust that `LAYOUT` |
481 | // accurately reflects the layout of `T`. By invariant on `T::MaybeUninit`, |
482 | // it admits uninitialized bytes in all positions. Because `MabyeUninit` is |
483 | // marked `repr(transparent)`, these properties additionally hold true for |
484 | // `Self`. |
485 | T::MaybeUninit, |
486 | ); |
487 | |
488 | #[doc (hidden)] |
489 | impl<T: ?Sized + KnownLayout> MaybeUninit<T> { |
490 | /// Constructs a `MaybeUninit<T>` initialized with the given value. |
491 | #[inline (always)] |
492 | pub fn new(val: T) -> Self |
493 | where |
494 | T: Sized, |
495 | Self: Sized, |
496 | { |
497 | // SAFETY: It is valid to transmute `val` to `MaybeUninit<T>` because it |
498 | // is both valid to transmute `val` to `T::MaybeUninit`, and it is valid |
499 | // to transmute from `T::MaybeUninit` to `MaybeUninit<T>`. |
500 | // |
501 | // First, it is valid to transmute `val` to `T::MaybeUninit` because, by |
502 | // invariant on `T::MaybeUninit`: |
503 | // - For `T: Sized`, `T` and `T::MaybeUninit` have the same size. |
504 | // - All byte sequences of the correct size are valid values of |
505 | // `T::MaybeUninit`. |
506 | // |
507 | // Second, it is additionally valid to transmute from `T::MaybeUninit` |
508 | // to `MaybeUninit<T>`, because `MaybeUninit<T>` is a |
509 | // `repr(transparent)` wrapper around `T::MaybeUninit`. |
510 | // |
511 | // These two transmutes are collapsed into one so we don't need to add a |
512 | // `T::MaybeUninit: Sized` bound to this function's `where` clause. |
513 | unsafe { crate::util::transmute_unchecked(val) } |
514 | } |
515 | |
516 | /// Constructs an uninitialized `MaybeUninit<T>`. |
517 | #[must_use ] |
518 | #[inline (always)] |
519 | pub fn uninit() -> Self |
520 | where |
521 | T: Sized, |
522 | Self: Sized, |
523 | { |
524 | let uninit = CoreMaybeUninit::<T>::uninit(); |
525 | // SAFETY: It is valid to transmute from `CoreMaybeUninit<T>` to |
526 | // `MaybeUninit<T>` since they both admit uninitialized bytes in all |
527 | // positions, and they have the same size (i.e., that of `T`). |
528 | // |
529 | // `MaybeUninit<T>` has the same size as `T`, because (by invariant on |
530 | // `T::MaybeUninit`) `T::MaybeUninit` has `T::LAYOUT` identical to `T`, |
531 | // and because (invariant on `T::LAYOUT`) we can trust that `LAYOUT` |
532 | // accurately reflects the layout of `T`. |
533 | // |
534 | // `CoreMaybeUninit<T>` has the same size as `T` [1] and admits |
535 | // uninitialized bytes in all positions. |
536 | // |
537 | // [1] Per https://doc.rust-lang.org/1.81.0/std/mem/union.MaybeUninit.html#layout-1: |
538 | // |
539 | // `MaybeUninit<T>` is guaranteed to have the same size, alignment, |
540 | // and ABI as `T` |
541 | unsafe { crate::util::transmute_unchecked(uninit) } |
542 | } |
543 | |
544 | /// Creates a `Box<MaybeUninit<T>>`. |
545 | /// |
546 | /// This function is useful for allocating large, uninit values on the heap |
547 | /// without ever creating a temporary instance of `Self` on the stack. |
548 | /// |
549 | /// # Errors |
550 | /// |
551 | /// Returns an error on allocation failure. Allocation failure is guaranteed |
552 | /// never to cause a panic or an abort. |
553 | #[cfg (feature = "alloc" )] |
554 | #[inline ] |
555 | pub fn new_boxed_uninit(meta: T::PointerMetadata) -> Result<Box<Self>, AllocError> { |
556 | // SAFETY: `alloc::alloc::alloc_zeroed` is a valid argument of |
557 | // `new_box`. The referent of the pointer returned by `alloc` (and, |
558 | // consequently, the `Box` derived from it) is a valid instance of |
559 | // `Self`, because `Self` is `MaybeUninit` and thus admits arbitrary |
560 | // (un)initialized bytes. |
561 | unsafe { crate::util::new_box(meta, alloc::alloc::alloc) } |
562 | } |
563 | |
564 | /// Extracts the value from the `MaybeUninit<T>` container. |
565 | /// |
566 | /// # Safety |
567 | /// |
568 | /// The caller must ensure that `self` is in an bit-valid state. Depending |
569 | /// on subsequent use, it may also need to be in a library-valid state. |
570 | #[inline (always)] |
571 | pub unsafe fn assume_init(self) -> T |
572 | where |
573 | T: Sized, |
574 | Self: Sized, |
575 | { |
576 | // SAFETY: The caller guarantees that `self` is in an bit-valid state. |
577 | unsafe { crate::util::transmute_unchecked(self) } |
578 | } |
579 | } |
580 | |
581 | impl<T: ?Sized + KnownLayout> fmt::Debug for MaybeUninit<T> { |
582 | #[inline ] |
583 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
584 | f.pad(core::any::type_name::<Self>()) |
585 | } |
586 | } |
587 | |
588 | #[cfg (test)] |
589 | mod tests { |
590 | use core::panic::AssertUnwindSafe; |
591 | |
592 | use super::*; |
593 | use crate::util::testutil::*; |
594 | |
595 | #[test] |
596 | fn test_unalign() { |
597 | // Test methods that don't depend on alignment. |
598 | let mut u = Unalign::new(AU64(123)); |
599 | assert_eq!(u.get(), AU64(123)); |
600 | assert_eq!(u.into_inner(), AU64(123)); |
601 | assert_eq!(u.get_ptr(), <*const _>::cast::<AU64>(&u)); |
602 | assert_eq!(u.get_mut_ptr(), <*mut _>::cast::<AU64>(&mut u)); |
603 | u.set(AU64(321)); |
604 | assert_eq!(u.get(), AU64(321)); |
605 | |
606 | // Test methods that depend on alignment (when alignment is satisfied). |
607 | let mut u: Align<_, AU64> = Align::new(Unalign::new(AU64(123))); |
608 | assert_eq!(u.t.try_deref().unwrap(), &AU64(123)); |
609 | assert_eq!(u.t.try_deref_mut().unwrap(), &mut AU64(123)); |
610 | // SAFETY: The `Align<_, AU64>` guarantees proper alignment. |
611 | assert_eq!(unsafe { u.t.deref_unchecked() }, &AU64(123)); |
612 | // SAFETY: The `Align<_, AU64>` guarantees proper alignment. |
613 | assert_eq!(unsafe { u.t.deref_mut_unchecked() }, &mut AU64(123)); |
614 | *u.t.try_deref_mut().unwrap() = AU64(321); |
615 | assert_eq!(u.t.get(), AU64(321)); |
616 | |
617 | // Test methods that depend on alignment (when alignment is not |
618 | // satisfied). |
619 | let mut u: ForceUnalign<_, AU64> = ForceUnalign::new(Unalign::new(AU64(123))); |
620 | assert!(matches!(u.t.try_deref(), Err(AlignmentError { .. }))); |
621 | assert!(matches!(u.t.try_deref_mut(), Err(AlignmentError { .. }))); |
622 | |
623 | // Test methods that depend on `T: Unaligned`. |
624 | let mut u = Unalign::new(123u8); |
625 | assert_eq!(u.try_deref(), Ok(&123)); |
626 | assert_eq!(u.try_deref_mut(), Ok(&mut 123)); |
627 | assert_eq!(u.deref(), &123); |
628 | assert_eq!(u.deref_mut(), &mut 123); |
629 | *u = 21; |
630 | assert_eq!(u.get(), 21); |
631 | |
632 | // Test that some `Unalign` functions and methods are `const`. |
633 | const _UNALIGN: Unalign<u64> = Unalign::new(0); |
634 | const _UNALIGN_PTR: *const u64 = _UNALIGN.get_ptr(); |
635 | const _U64: u64 = _UNALIGN.into_inner(); |
636 | // Make sure all code is considered "used". |
637 | // |
638 | // TODO(https://github.com/rust-lang/rust/issues/104084): Remove this |
639 | // attribute. |
640 | #[allow (dead_code)] |
641 | const _: () = { |
642 | let x: Align<_, AU64> = Align::new(Unalign::new(AU64(123))); |
643 | // Make sure that `deref_unchecked` is `const`. |
644 | // |
645 | // SAFETY: The `Align<_, AU64>` guarantees proper alignment. |
646 | let au64 = unsafe { x.t.deref_unchecked() }; |
647 | match au64 { |
648 | AU64(123) => {} |
649 | _ => const_unreachable!(), |
650 | } |
651 | }; |
652 | } |
653 | |
654 | #[test] |
655 | fn test_unalign_update() { |
656 | let mut u = Unalign::new(AU64(123)); |
657 | u.update(|a| a.0 += 1); |
658 | assert_eq!(u.get(), AU64(124)); |
659 | |
660 | // Test that, even if the callback panics, the original is still |
661 | // correctly overwritten. Use a `Box` so that Miri is more likely to |
662 | // catch any unsoundness (which would likely result in two `Box`es for |
663 | // the same heap object, which is the sort of thing that Miri would |
664 | // probably catch). |
665 | let mut u = Unalign::new(Box::new(AU64(123))); |
666 | let res = std::panic::catch_unwind(AssertUnwindSafe(|| { |
667 | u.update(|a| { |
668 | a.0 += 1; |
669 | panic!(); |
670 | }) |
671 | })); |
672 | assert!(res.is_err()); |
673 | assert_eq!(u.into_inner(), Box::new(AU64(124))); |
674 | |
675 | // Test the align_of::<T>() == 1 optimization. |
676 | let mut u = Unalign::new([0u8, 1]); |
677 | u.update(|a| a[0] += 1); |
678 | assert_eq!(u.get(), [1u8, 1]); |
679 | } |
680 | |
681 | #[test] |
682 | fn test_unalign_copy_clone() { |
683 | // Test that `Copy` and `Clone` do not cause soundness issues. This test |
684 | // is mainly meant to exercise UB that would be caught by Miri. |
685 | |
686 | // `u.t` is definitely not validly-aligned for `AU64`'s alignment of 8. |
687 | let u = ForceUnalign::<_, AU64>::new(Unalign::new(AU64(123))); |
688 | #[allow (clippy::clone_on_copy)] |
689 | let v = u.t.clone(); |
690 | let w = u.t; |
691 | assert_eq!(u.t.get(), v.get()); |
692 | assert_eq!(u.t.get(), w.get()); |
693 | assert_eq!(v.get(), w.get()); |
694 | } |
695 | |
696 | #[test] |
697 | fn test_unalign_trait_impls() { |
698 | let zero = Unalign::new(0u8); |
699 | let one = Unalign::new(1u8); |
700 | |
701 | assert!(zero < one); |
702 | assert_eq!(PartialOrd::partial_cmp(&zero, &one), Some(Ordering::Less)); |
703 | assert_eq!(Ord::cmp(&zero, &one), Ordering::Less); |
704 | |
705 | assert_ne!(zero, one); |
706 | assert_eq!(zero, zero); |
707 | assert!(!PartialEq::eq(&zero, &one)); |
708 | assert!(PartialEq::eq(&zero, &zero)); |
709 | |
710 | fn hash<T: Hash>(t: &T) -> u64 { |
711 | let mut h = std::collections::hash_map::DefaultHasher::new(); |
712 | t.hash(&mut h); |
713 | h.finish() |
714 | } |
715 | |
716 | assert_eq!(hash(&zero), hash(&0u8)); |
717 | assert_eq!(hash(&one), hash(&1u8)); |
718 | |
719 | assert_eq!(format!("{:?}" , zero), format!("{:?}" , 0u8)); |
720 | assert_eq!(format!("{:?}" , one), format!("{:?}" , 1u8)); |
721 | assert_eq!(format!("{}" , zero), format!("{}" , 0u8)); |
722 | assert_eq!(format!("{}" , one), format!("{}" , 1u8)); |
723 | } |
724 | |
725 | #[test] |
726 | #[allow (clippy::as_conversions)] |
727 | fn test_maybe_uninit() { |
728 | // int |
729 | { |
730 | let input = 42; |
731 | let uninit = MaybeUninit::new(input); |
732 | // SAFETY: `uninit` is in an initialized state |
733 | let output = unsafe { uninit.assume_init() }; |
734 | assert_eq!(input, output); |
735 | } |
736 | |
737 | // thin ref |
738 | { |
739 | let input = 42; |
740 | let uninit = MaybeUninit::new(&input); |
741 | // SAFETY: `uninit` is in an initialized state |
742 | let output = unsafe { uninit.assume_init() }; |
743 | assert_eq!(&input as *const _, output as *const _); |
744 | assert_eq!(input, *output); |
745 | } |
746 | |
747 | // wide ref |
748 | { |
749 | let input = [1, 2, 3, 4]; |
750 | let uninit = MaybeUninit::new(&input[..]); |
751 | // SAFETY: `uninit` is in an initialized state |
752 | let output = unsafe { uninit.assume_init() }; |
753 | assert_eq!(&input[..] as *const _, output as *const _); |
754 | assert_eq!(input, *output); |
755 | } |
756 | } |
757 | } |
758 | |