| 1 | // Copyright 2023 The Fuchsia Authors |
| 2 | // |
| 3 | // Licensed under a BSD-style license <LICENSE-BSD>, Apache License, Version 2.0 |
| 4 | // <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT |
| 5 | // license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option. |
| 6 | // This file may not be copied, modified, or distributed except according to |
| 7 | // those terms. |
| 8 | |
| 9 | #[macro_use ] |
| 10 | mod macros; |
| 11 | |
| 12 | #[doc (hidden)] |
| 13 | pub mod macro_util; |
| 14 | |
| 15 | use core::{ |
| 16 | cell::UnsafeCell, |
| 17 | marker::PhantomData, |
| 18 | mem::{self, ManuallyDrop, MaybeUninit}, |
| 19 | num::{NonZeroUsize, Wrapping}, |
| 20 | ptr::NonNull, |
| 21 | }; |
| 22 | |
| 23 | use crate::{ |
| 24 | error::AlignmentError, |
| 25 | pointer::invariant::{self, Invariants}, |
| 26 | Unalign, |
| 27 | }; |
| 28 | |
| 29 | /// A type which has the same layout as the type it wraps. |
| 30 | /// |
| 31 | /// # Safety |
| 32 | /// |
| 33 | /// `T: TransparentWrapper` implies that `T` has the same size as [`T::Inner`]. |
| 34 | /// Further, `T: TransparentWrapper<I>` implies that: |
| 35 | /// - If `T::UnsafeCellVariance = Covariant`, then `T` has `UnsafeCell`s |
| 36 | /// covering the same byte ranges as `T::Inner`. |
| 37 | /// - If a `T` pointer satisfies the alignment invariant `I::Alignment`, then |
| 38 | /// that same pointer, cast to `T::Inner`, satisfies the alignment invariant |
| 39 | /// `<T::AlignmentVariance as AlignmentVariance<I::Alignment>>::Applied`. |
| 40 | /// - If a `T` pointer satisfies the validity invariant `I::Validity`, then that |
| 41 | /// same pointer, cast to `T::Inner`, satisfies the validity invariant |
| 42 | /// `<T::ValidityVariance as ValidityVariance<I::Validity>>::Applied`. |
| 43 | /// |
| 44 | /// [`T::Inner`]: TransparentWrapper::Inner |
| 45 | /// [`UnsafeCell`]: core::cell::UnsafeCell |
| 46 | /// [`T::AlignmentVariance`]: TransparentWrapper::AlignmentVariance |
| 47 | /// [`T::ValidityVariance`]: TransparentWrapper::ValidityVariance |
| 48 | #[doc (hidden)] |
| 49 | pub unsafe trait TransparentWrapper<I: Invariants> { |
| 50 | type Inner: ?Sized; |
| 51 | |
| 52 | type UnsafeCellVariance; |
| 53 | type AlignmentVariance: AlignmentVariance<I::Alignment>; |
| 54 | type ValidityVariance: ValidityVariance<I::Validity>; |
| 55 | |
| 56 | /// Casts a wrapper pointer to an inner pointer. |
| 57 | /// |
| 58 | /// # Safety |
| 59 | /// |
| 60 | /// The resulting pointer has the same address and provenance as `ptr`, and |
| 61 | /// addresses the same number of bytes. |
| 62 | fn cast_into_inner(ptr: *mut Self) -> *mut Self::Inner; |
| 63 | |
| 64 | /// Casts an inner pointer to a wrapper pointer. |
| 65 | /// |
| 66 | /// # Safety |
| 67 | /// |
| 68 | /// The resulting pointer has the same address and provenance as `ptr`, and |
| 69 | /// addresses the same number of bytes. |
| 70 | fn cast_from_inner(ptr: *mut Self::Inner) -> *mut Self; |
| 71 | } |
| 72 | |
| 73 | #[allow (unreachable_pub)] |
| 74 | #[doc (hidden)] |
| 75 | pub trait AlignmentVariance<I: invariant::Alignment> { |
| 76 | type Applied: invariant::Alignment; |
| 77 | } |
| 78 | |
| 79 | #[allow (unreachable_pub)] |
| 80 | #[doc (hidden)] |
| 81 | pub trait ValidityVariance<I: invariant::Validity> { |
| 82 | type Applied: invariant::Validity; |
| 83 | } |
| 84 | |
| 85 | #[doc (hidden)] |
| 86 | #[allow (missing_copy_implementations, missing_debug_implementations)] |
| 87 | pub enum Covariant {} |
| 88 | |
| 89 | impl<I: invariant::Alignment> AlignmentVariance<I> for Covariant { |
| 90 | type Applied = I; |
| 91 | } |
| 92 | |
| 93 | impl<I: invariant::Validity> ValidityVariance<I> for Covariant { |
| 94 | type Applied = I; |
| 95 | } |
| 96 | |
| 97 | #[doc (hidden)] |
| 98 | #[allow (missing_copy_implementations, missing_debug_implementations)] |
| 99 | pub enum Invariant {} |
| 100 | |
| 101 | impl<I: invariant::Alignment> AlignmentVariance<I> for Invariant { |
| 102 | type Applied = invariant::Any; |
| 103 | } |
| 104 | |
| 105 | impl<I: invariant::Validity> ValidityVariance<I> for Invariant { |
| 106 | type Applied = invariant::Any; |
| 107 | } |
| 108 | |
| 109 | // SAFETY: |
| 110 | // - Per [1], `MaybeUninit<T>` has the same size as `T`. |
| 111 | // - See inline comments for other safety justifications. |
| 112 | // |
| 113 | // [1] Per https://doc.rust-lang.org/1.81.0/std/mem/union.MaybeUninit.html#layout-1: |
| 114 | // |
| 115 | // `MaybeUninit<T>` is guaranteed to have the same size, alignment, and ABI as |
| 116 | // `T` |
| 117 | unsafe impl<T, I: Invariants> TransparentWrapper<I> for MaybeUninit<T> { |
| 118 | type Inner = T; |
| 119 | |
| 120 | // SAFETY: `MaybeUninit<T>` has `UnsafeCell`s covering the same byte ranges |
| 121 | // as `Inner = T`. This is not explicitly documented, but it can be |
| 122 | // inferred. Per [1] in the preceding safety comment, `MaybeUninit<T>` has |
| 123 | // the same size as `T`. Further, note the signature of |
| 124 | // `MaybeUninit::assume_init_ref` [2]: |
| 125 | // |
| 126 | // pub unsafe fn assume_init_ref(&self) -> &T |
| 127 | // |
| 128 | // If the argument `&MaybeUninit<T>` and the returned `&T` had `UnsafeCell`s |
| 129 | // at different offsets, this would be unsound. Its existence is proof that |
| 130 | // this is not the case. |
| 131 | // |
| 132 | // [2] https://doc.rust-lang.org/1.81.0/std/mem/union.MaybeUninit.html#method.assume_init_ref |
| 133 | type UnsafeCellVariance = Covariant; |
| 134 | // SAFETY: Per [1], `MaybeUninit<T>` has the same layout as `T`, and thus |
| 135 | // has the same alignment as `T`. |
| 136 | // |
| 137 | // [1] Per https://doc.rust-lang.org/std/mem/union.MaybeUninit.html#layout-1: |
| 138 | // |
| 139 | // `MaybeUninit<T>` is guaranteed to have the same size, alignment, and |
| 140 | // ABI as `T`. |
| 141 | type AlignmentVariance = Covariant; |
| 142 | // SAFETY: `MaybeUninit` has no validity invariants. Thus, a valid |
| 143 | // `MaybeUninit<T>` is not necessarily a valid `T`. |
| 144 | type ValidityVariance = Invariant; |
| 145 | |
| 146 | #[inline (always)] |
| 147 | fn cast_into_inner(ptr: *mut MaybeUninit<T>) -> *mut T { |
| 148 | // SAFETY: Per [1] (from comment above), `MaybeUninit<T>` has the same |
| 149 | // layout as `T`. Thus, this cast preserves size. |
| 150 | // |
| 151 | // This cast trivially preserves provenance. |
| 152 | ptr.cast::<T>() |
| 153 | } |
| 154 | |
| 155 | #[inline (always)] |
| 156 | fn cast_from_inner(ptr: *mut T) -> *mut MaybeUninit<T> { |
| 157 | // SAFETY: Per [1] (from comment above), `MaybeUninit<T>` has the same |
| 158 | // layout as `T`. Thus, this cast preserves size. |
| 159 | // |
| 160 | // This cast trivially preserves provenance. |
| 161 | ptr.cast::<MaybeUninit<T>>() |
| 162 | } |
| 163 | } |
| 164 | |
| 165 | // SAFETY: |
| 166 | // - Per [1], `ManuallyDrop<T>` has the same size as `T`. |
| 167 | // - See inline comments for other safety justifications. |
| 168 | // |
| 169 | // [1] Per https://doc.rust-lang.org/1.81.0/std/mem/struct.ManuallyDrop.html: |
| 170 | // |
| 171 | // `ManuallyDrop<T>` is guaranteed to have the same layout and bit validity as |
| 172 | // `T` |
| 173 | unsafe impl<T: ?Sized, I: Invariants> TransparentWrapper<I> for ManuallyDrop<T> { |
| 174 | type Inner = T; |
| 175 | |
| 176 | // SAFETY: Per [1], `ManuallyDrop<T>` has `UnsafeCell`s covering the same |
| 177 | // byte ranges as `Inner = T`. |
| 178 | // |
| 179 | // [1] Per https://doc.rust-lang.org/1.81.0/std/mem/struct.ManuallyDrop.html: |
| 180 | // |
| 181 | // `ManuallyDrop<T>` is guaranteed to have the same layout and bit |
| 182 | // validity as `T`, and is subject to the same layout optimizations as |
| 183 | // `T`. As a consequence, it has no effect on the assumptions that the |
| 184 | // compiler makes about its contents. |
| 185 | type UnsafeCellVariance = Covariant; |
| 186 | // SAFETY: Per [1], `ManuallyDrop<T>` has the same layout as `T`, and thus |
| 187 | // has the same alignment as `T`. |
| 188 | // |
| 189 | // [1] Per https://doc.rust-lang.org/nightly/core/mem/struct.ManuallyDrop.html: |
| 190 | // |
| 191 | // `ManuallyDrop<T>` is guaranteed to have the same layout and bit |
| 192 | // validity as `T` |
| 193 | type AlignmentVariance = Covariant; |
| 194 | |
| 195 | // SAFETY: Per [1] (from comment above), `ManuallyDrop<T>` has the same bit |
| 196 | // validity as `T`. |
| 197 | type ValidityVariance = Covariant; |
| 198 | |
| 199 | #[inline (always)] |
| 200 | fn cast_into_inner(ptr: *mut ManuallyDrop<T>) -> *mut T { |
| 201 | // SAFETY: Per [1] (from comment above), `ManuallyDrop<T>` has the same |
| 202 | // layout as `T`. Thus, this cast preserves size even if `T` is unsized. |
| 203 | // |
| 204 | // This cast trivially preserves provenance. |
| 205 | #[allow (clippy::as_conversions)] |
| 206 | return ptr as *mut T; |
| 207 | } |
| 208 | |
| 209 | #[inline (always)] |
| 210 | fn cast_from_inner(ptr: *mut T) -> *mut ManuallyDrop<T> { |
| 211 | // SAFETY: Per [1] (from comment above), `ManuallyDrop<T>` has the same |
| 212 | // layout as `T`. Thus, this cast preserves size even if `T` is unsized. |
| 213 | // |
| 214 | // This cast trivially preserves provenance. |
| 215 | #[allow (clippy::as_conversions)] |
| 216 | return ptr as *mut ManuallyDrop<T>; |
| 217 | } |
| 218 | } |
| 219 | |
| 220 | // SAFETY: |
| 221 | // - Per [1], `Wrapping<T>` has the same size as `T`. |
| 222 | // - See inline comments for other safety justifications. |
| 223 | // |
| 224 | // [1] Per https://doc.rust-lang.org/1.81.0/std/num/struct.Wrapping.html#layout-1: |
| 225 | // |
| 226 | // `Wrapping<T>` is guaranteed to have the same layout and ABI as `T`. |
| 227 | unsafe impl<T, I: Invariants> TransparentWrapper<I> for Wrapping<T> { |
| 228 | type Inner = T; |
| 229 | |
| 230 | // SAFETY: Per [1], `Wrapping<T>` has the same layout as `T`. Since its |
| 231 | // single field (of type `T`) is public, it would be a breaking change to |
| 232 | // add or remove fields. Thus, we know that `Wrapping<T>` contains a `T` (as |
| 233 | // opposed to just having the same size and alignment as `T`) with no pre- |
| 234 | // or post-padding. Thus, `Wrapping<T>` must have `UnsafeCell`s covering the |
| 235 | // same byte ranges as `Inner = T`. |
| 236 | // |
| 237 | // [1] Per https://doc.rust-lang.org/1.81.0/std/num/struct.Wrapping.html#layout-1: |
| 238 | // |
| 239 | // `Wrapping<T>` is guaranteed to have the same layout and ABI as `T`. |
| 240 | type UnsafeCellVariance = Covariant; |
| 241 | // SAFETY: Per [1], `Wrapping<T>` has the same layout as `T`, and thus has |
| 242 | // the same alignment as `T`. |
| 243 | // |
| 244 | // [1] Per https://doc.rust-lang.org/core/num/struct.Wrapping.html#layout-1: |
| 245 | // |
| 246 | // `Wrapping<T>` is guaranteed to have the same layout and ABI as `T`. |
| 247 | type AlignmentVariance = Covariant; |
| 248 | |
| 249 | // SAFETY: `Wrapping<T>` has only one field, which is `pub` [2]. We are also |
| 250 | // guaranteed per [1] (from the comment above) that `Wrapping<T>` has the |
| 251 | // same layout as `T`. The only way for both of these to be true |
| 252 | // simultaneously is for `Wrapping<T>` to have the same bit validity as `T`. |
| 253 | // In particular, in order to change the bit validity, one of the following |
| 254 | // would need to happen: |
| 255 | // - `Wrapping` could change its `repr`, but this would violate the layout |
| 256 | // guarantee. |
| 257 | // - `Wrapping` could add or change its fields, but this would be a |
| 258 | // stability-breaking change. |
| 259 | // |
| 260 | // [2] https://doc.rust-lang.org/core/num/struct.Wrapping.html |
| 261 | type ValidityVariance = Covariant; |
| 262 | |
| 263 | #[inline (always)] |
| 264 | fn cast_into_inner(ptr: *mut Wrapping<T>) -> *mut T { |
| 265 | // SAFETY: Per [1] (from comment above), `Wrapping<T>` has the same |
| 266 | // layout as `T`. Thus, this cast preserves size. |
| 267 | // |
| 268 | // This cast trivially preserves provenance. |
| 269 | ptr.cast::<T>() |
| 270 | } |
| 271 | |
| 272 | #[inline (always)] |
| 273 | fn cast_from_inner(ptr: *mut T) -> *mut Wrapping<T> { |
| 274 | // SAFETY: Per [1] (from comment above), `Wrapping<T>` has the same |
| 275 | // layout as `T`. Thus, this cast preserves size. |
| 276 | // |
| 277 | // This cast trivially preserves provenance. |
| 278 | ptr.cast::<Wrapping<T>>() |
| 279 | } |
| 280 | } |
| 281 | |
| 282 | // SAFETY: |
| 283 | // - Per [1], `UnsafeCell<T>` has the same size as `T`. |
| 284 | // - See inline comments for other safety justifications. |
| 285 | // |
| 286 | // [1] Per https://doc.rust-lang.org/1.81.0/core/cell/struct.UnsafeCell.html#memory-layout: |
| 287 | // |
| 288 | // `UnsafeCell<T>` has the same in-memory representation as its inner type |
| 289 | // `T`. |
| 290 | unsafe impl<T: ?Sized, I: Invariants> TransparentWrapper<I> for UnsafeCell<T> { |
| 291 | type Inner = T; |
| 292 | |
| 293 | // SAFETY: Since we set this to `Invariant`, we make no safety claims. |
| 294 | type UnsafeCellVariance = Invariant; |
| 295 | |
| 296 | // SAFETY: Per [1] (from comment on impl), `Unalign<T>` has the same |
| 297 | // representation as `T`, and thus has the same alignment as `T`. |
| 298 | type AlignmentVariance = Covariant; |
| 299 | |
| 300 | // SAFETY: Per [1], `Unalign<T>` has the same bit validity as `T`. |
| 301 | // Technically the term "representation" doesn't guarantee this, but the |
| 302 | // subsequent sentence in the documentation makes it clear that this is the |
| 303 | // intention. |
| 304 | // |
| 305 | // [1] Per https://doc.rust-lang.org/1.81.0/core/cell/struct.UnsafeCell.html#memory-layout: |
| 306 | // |
| 307 | // `UnsafeCell<T>` has the same in-memory representation as its inner type |
| 308 | // `T`. A consequence of this guarantee is that it is possible to convert |
| 309 | // between `T` and `UnsafeCell<T>`. |
| 310 | type ValidityVariance = Covariant; |
| 311 | |
| 312 | #[inline (always)] |
| 313 | fn cast_into_inner(ptr: *mut UnsafeCell<T>) -> *mut T { |
| 314 | // SAFETY: Per [1] (from comment above), `UnsafeCell<T>` has the same |
| 315 | // representation as `T`. Thus, this cast preserves size. |
| 316 | // |
| 317 | // This cast trivially preserves provenance. |
| 318 | #[allow (clippy::as_conversions)] |
| 319 | return ptr as *mut T; |
| 320 | } |
| 321 | |
| 322 | #[inline (always)] |
| 323 | fn cast_from_inner(ptr: *mut T) -> *mut UnsafeCell<T> { |
| 324 | // SAFETY: Per [1] (from comment above), `UnsafeCell<T>` has the same |
| 325 | // representation as `T`. Thus, this cast preserves size. |
| 326 | // |
| 327 | // This cast trivially preserves provenance. |
| 328 | #[allow (clippy::as_conversions)] |
| 329 | return ptr as *mut UnsafeCell<T>; |
| 330 | } |
| 331 | } |
| 332 | |
| 333 | // SAFETY: `Unalign<T>` promises to have the same size as `T`. |
| 334 | // |
| 335 | // See inline comments for other safety justifications. |
| 336 | unsafe impl<T, I: Invariants> TransparentWrapper<I> for Unalign<T> { |
| 337 | type Inner = T; |
| 338 | |
| 339 | // SAFETY: `Unalign<T>` promises to have `UnsafeCell`s covering the same |
| 340 | // byte ranges as `Inner = T`. |
| 341 | type UnsafeCellVariance = Covariant; |
| 342 | |
| 343 | // SAFETY: Since `Unalign<T>` promises to have alignment 1 regardless of |
| 344 | // `T`'s alignment. Thus, an aligned pointer to `Unalign<T>` is not |
| 345 | // necessarily an aligned pointer to `T`. |
| 346 | type AlignmentVariance = Invariant; |
| 347 | |
| 348 | // SAFETY: `Unalign<T>` promises to have the same validity as `T`. |
| 349 | type ValidityVariance = Covariant; |
| 350 | |
| 351 | #[inline (always)] |
| 352 | fn cast_into_inner(ptr: *mut Unalign<T>) -> *mut T { |
| 353 | // SAFETY: Per the safety comment on the impl block, `Unalign<T>` has |
| 354 | // the size as `T`. Thus, this cast preserves size. |
| 355 | // |
| 356 | // This cast trivially preserves provenance. |
| 357 | ptr.cast::<T>() |
| 358 | } |
| 359 | |
| 360 | #[inline (always)] |
| 361 | fn cast_from_inner(ptr: *mut T) -> *mut Unalign<T> { |
| 362 | // SAFETY: Per the safety comment on the impl block, `Unalign<T>` has |
| 363 | // the size as `T`. Thus, this cast preserves size. |
| 364 | // |
| 365 | // This cast trivially preserves provenance. |
| 366 | ptr.cast::<Unalign<T>>() |
| 367 | } |
| 368 | } |
| 369 | |
| 370 | /// Implements `TransparentWrapper` for an atomic type. |
| 371 | /// |
| 372 | /// # Safety |
| 373 | /// |
| 374 | /// The caller promises that `$atomic` is an atomic type whose natie equivalent |
| 375 | /// is `$native`. |
| 376 | #[cfg (all( |
| 377 | zerocopy_target_has_atomics_1_60_0, |
| 378 | any( |
| 379 | target_has_atomic = "8" , |
| 380 | target_has_atomic = "16" , |
| 381 | target_has_atomic = "32" , |
| 382 | target_has_atomic = "64" , |
| 383 | target_has_atomic = "ptr" |
| 384 | ) |
| 385 | ))] |
| 386 | macro_rules! unsafe_impl_transparent_wrapper_for_atomic { |
| 387 | ($(#[$attr:meta])* $(,)?) => {}; |
| 388 | ($(#[$attr:meta])* $atomic:ty [$native:ty], $($atomics:ty [$natives:ty]),* $(,)?) => { |
| 389 | $(#[$attr])* |
| 390 | // SAFETY: See safety comment in next match arm. |
| 391 | unsafe impl<I: crate::invariant::Invariants> crate::util::TransparentWrapper<I> for $atomic { |
| 392 | unsafe_impl_transparent_wrapper_for_atomic!(@inner $atomic [$native]); |
| 393 | } |
| 394 | unsafe_impl_transparent_wrapper_for_atomic!($(#[$attr])* $($atomics [$natives],)*); |
| 395 | }; |
| 396 | ($(#[$attr:meta])* $tyvar:ident => $atomic:ty [$native:ty]) => { |
| 397 | // We implement for `$atomic` and set `Inner = $native`. The caller has |
| 398 | // promised that `$atomic` and `$native` are an atomic type and its |
| 399 | // native counterpart, respectively. Per [1], `$atomic` and `$native` |
| 400 | // have the same size. |
| 401 | // |
| 402 | // [1] Per (for example) https://doc.rust-lang.org/1.81.0/std/sync/atomic/struct.AtomicU64.html: |
| 403 | // |
| 404 | // This type has the same size and bit validity as the underlying |
| 405 | // integer type |
| 406 | $(#[$attr])* |
| 407 | unsafe impl<$tyvar, I: crate::invariant::Invariants> crate::util::TransparentWrapper<I> for $atomic { |
| 408 | unsafe_impl_transparent_wrapper_for_atomic!(@inner $atomic [$native]); |
| 409 | } |
| 410 | }; |
| 411 | (@inner $atomic:ty [$native:ty]) => { |
| 412 | type Inner = UnsafeCell<$native>; |
| 413 | |
| 414 | // SAFETY: It is "obvious" that each atomic type contains a single |
| 415 | // `UnsafeCell` that covers all bytes of the type, but we can also prove |
| 416 | // it: |
| 417 | // - Since `$atomic` provides an API which permits loading and storing |
| 418 | // values of type `$native` via a `&self` (shared) reference, *some* |
| 419 | // interior mutation must be happening, and interior mutation can only |
| 420 | // happen via `UnsafeCell`. Further, there must be enough bytes in |
| 421 | // `$atomic` covered by an `UnsafeCell` to hold every possible value |
| 422 | // of `$native`. |
| 423 | // - Per [1], `$atomic` has the same size as `$native`. This on its own |
| 424 | // isn't enough: it would still be possible for `$atomic` to store |
| 425 | // `$native` using a compact representation (for `$native` types for |
| 426 | // which some bit patterns are illegal). However, this is ruled out by |
| 427 | // the fact that `$atomic` has the same bit validity as `$native` [1]. |
| 428 | // Thus, we can conclude that every byte of `$atomic` must be covered |
| 429 | // by an `UnsafeCell`. |
| 430 | // |
| 431 | // Thus, every byte of `$atomic` is covered by an `UnsafeCell`, and we |
| 432 | // set `type Inner = UnsafeCell<$native>`. Thus, `Self` and |
| 433 | // `Self::Inner` have `UnsafeCell`s covering the same byte ranges. |
| 434 | // |
| 435 | // [1] Per (for example) https://doc.rust-lang.org/1.81.0/std/sync/atomic/struct.AtomicU64.html: |
| 436 | // |
| 437 | // This type has the same size and bit validity as the underlying |
| 438 | // integer type |
| 439 | type UnsafeCellVariance = crate::util::Covariant; |
| 440 | |
| 441 | // SAFETY: No safety justification is required for an invariant |
| 442 | // variance. |
| 443 | type AlignmentVariance = crate::util::Invariant; |
| 444 | |
| 445 | // SAFETY: Per [1], all atomic types have the same bit validity as their |
| 446 | // native counterparts. The caller has promised that `$atomic` and |
| 447 | // `$native` are an atomic type and its native counterpart, |
| 448 | // respectively. |
| 449 | // |
| 450 | // [1] Per (for example) https://doc.rust-lang.org/1.81.0/std/sync/atomic/struct.AtomicU64.html: |
| 451 | // |
| 452 | // This type has the same size and bit validity as the underlying |
| 453 | // integer type |
| 454 | type ValidityVariance = crate::util::Covariant; |
| 455 | |
| 456 | #[inline(always)] |
| 457 | fn cast_into_inner(ptr: *mut $atomic) -> *mut UnsafeCell<$native> { |
| 458 | // SAFETY: Per [1] (from comment on impl block), `$atomic` has the |
| 459 | // same size as `$native`. Thus, this cast preserves size. |
| 460 | // |
| 461 | // This cast trivially preserves provenance. |
| 462 | ptr.cast::<UnsafeCell<$native>>() |
| 463 | } |
| 464 | |
| 465 | #[inline(always)] |
| 466 | fn cast_from_inner(ptr: *mut UnsafeCell<$native>) -> *mut $atomic { |
| 467 | // SAFETY: Per [1] (from comment on impl block), `$atomic` has the |
| 468 | // same size as `$native`. Thus, this cast preserves size. |
| 469 | // |
| 470 | // This cast trivially preserves provenance. |
| 471 | ptr.cast::<$atomic>() |
| 472 | } |
| 473 | }; |
| 474 | } |
| 475 | |
| 476 | /// Like [`PhantomData`], but [`Send`] and [`Sync`] regardless of whether the |
| 477 | /// wrapped `T` is. |
| 478 | pub(crate) struct SendSyncPhantomData<T: ?Sized>(PhantomData<T>); |
| 479 | |
| 480 | // SAFETY: `SendSyncPhantomData` does not enable any behavior which isn't sound |
| 481 | // to be called from multiple threads. |
| 482 | unsafe impl<T: ?Sized> Send for SendSyncPhantomData<T> {} |
| 483 | // SAFETY: `SendSyncPhantomData` does not enable any behavior which isn't sound |
| 484 | // to be called from multiple threads. |
| 485 | unsafe impl<T: ?Sized> Sync for SendSyncPhantomData<T> {} |
| 486 | |
| 487 | impl<T: ?Sized> Default for SendSyncPhantomData<T> { |
| 488 | fn default() -> SendSyncPhantomData<T> { |
| 489 | SendSyncPhantomData(PhantomData) |
| 490 | } |
| 491 | } |
| 492 | |
| 493 | impl<T: ?Sized> PartialEq for SendSyncPhantomData<T> { |
| 494 | fn eq(&self, other: &Self) -> bool { |
| 495 | self.0.eq(&other.0) |
| 496 | } |
| 497 | } |
| 498 | |
| 499 | impl<T: ?Sized> Eq for SendSyncPhantomData<T> {} |
| 500 | |
| 501 | pub(crate) trait AsAddress { |
| 502 | fn addr(self) -> usize; |
| 503 | } |
| 504 | |
| 505 | impl<T: ?Sized> AsAddress for &T { |
| 506 | #[inline (always)] |
| 507 | fn addr(self) -> usize { |
| 508 | let ptr: *const T = self; |
| 509 | AsAddress::addr(self:ptr) |
| 510 | } |
| 511 | } |
| 512 | |
| 513 | impl<T: ?Sized> AsAddress for &mut T { |
| 514 | #[inline (always)] |
| 515 | fn addr(self) -> usize { |
| 516 | let ptr: *const T = self; |
| 517 | AsAddress::addr(self:ptr) |
| 518 | } |
| 519 | } |
| 520 | |
| 521 | impl<T: ?Sized> AsAddress for NonNull<T> { |
| 522 | #[inline (always)] |
| 523 | fn addr(self) -> usize { |
| 524 | AsAddress::addr(self.as_ptr()) |
| 525 | } |
| 526 | } |
| 527 | |
| 528 | impl<T: ?Sized> AsAddress for *const T { |
| 529 | #[inline (always)] |
| 530 | fn addr(self) -> usize { |
| 531 | // TODO(#181), TODO(https://github.com/rust-lang/rust/issues/95228): Use |
| 532 | // `.addr()` instead of `as usize` once it's stable, and get rid of this |
| 533 | // `allow`. Currently, `as usize` is the only way to accomplish this. |
| 534 | #[allow (clippy::as_conversions)] |
| 535 | #[cfg_attr ( |
| 536 | __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS, |
| 537 | allow(lossy_provenance_casts) |
| 538 | )] |
| 539 | return self.cast::<()>() as usize; |
| 540 | } |
| 541 | } |
| 542 | |
| 543 | impl<T: ?Sized> AsAddress for *mut T { |
| 544 | #[inline (always)] |
| 545 | fn addr(self) -> usize { |
| 546 | let ptr: *const T = self; |
| 547 | AsAddress::addr(self:ptr) |
| 548 | } |
| 549 | } |
| 550 | |
| 551 | /// Validates that `t` is aligned to `align_of::<U>()`. |
| 552 | #[inline (always)] |
| 553 | pub(crate) fn validate_aligned_to<T: AsAddress, U>(t: T) -> Result<(), AlignmentError<(), U>> { |
| 554 | // `mem::align_of::<U>()` is guaranteed to return a non-zero value, which in |
| 555 | // turn guarantees that this mod operation will not panic. |
| 556 | #[allow (clippy::arithmetic_side_effects)] |
| 557 | let remainder: usize = t.addr() % mem::align_of::<U>(); |
| 558 | if remainder == 0 { |
| 559 | Ok(()) |
| 560 | } else { |
| 561 | // SAFETY: We just confirmed that `t.addr() % align_of::<U>() != 0`. |
| 562 | // That's only possible if `align_of::<U>() > 1`. |
| 563 | Err(unsafe { AlignmentError::new_unchecked(()) }) |
| 564 | } |
| 565 | } |
| 566 | |
| 567 | /// Returns the bytes needed to pad `len` to the next multiple of `align`. |
| 568 | /// |
| 569 | /// This function assumes that align is a power of two; there are no guarantees |
| 570 | /// on the answer it gives if this is not the case. |
| 571 | pub(crate) const fn padding_needed_for(len: usize, align: NonZeroUsize) -> usize { |
| 572 | // Abstractly, we want to compute: |
| 573 | // align - (len % align). |
| 574 | // Handling the case where len%align is 0. |
| 575 | // Because align is a power of two, len % align = len & (align-1). |
| 576 | // Guaranteed not to underflow as align is nonzero. |
| 577 | #[allow (clippy::arithmetic_side_effects)] |
| 578 | let mask = align.get() - 1; |
| 579 | |
| 580 | // To efficiently subtract this value from align, we can use the bitwise complement. |
| 581 | // Note that ((!len) & (align-1)) gives us a number that with (len & |
| 582 | // (align-1)) sums to align-1. So subtracting 1 from x before taking the |
| 583 | // complement subtracts `len` from `align`. Some quick inspection of |
| 584 | // cases shows that this also handles the case where `len % align = 0` |
| 585 | // correctly too: len-1 % align then equals align-1, so the complement mod |
| 586 | // align will be 0, as desired. |
| 587 | // |
| 588 | // The following reasoning can be verified quickly by an SMT solver |
| 589 | // supporting the theory of bitvectors: |
| 590 | // ```smtlib |
| 591 | // ; Naive implementation of padding |
| 592 | // (define-fun padding1 ( |
| 593 | // (len (_ BitVec 32)) |
| 594 | // (align (_ BitVec 32))) (_ BitVec 32) |
| 595 | // (ite |
| 596 | // (= (_ bv0 32) (bvand len (bvsub align (_ bv1 32)))) |
| 597 | // (_ bv0 32) |
| 598 | // (bvsub align (bvand len (bvsub align (_ bv1 32)))))) |
| 599 | // |
| 600 | // ; The implementation below |
| 601 | // (define-fun padding2 ( |
| 602 | // (len (_ BitVec 32)) |
| 603 | // (align (_ BitVec 32))) (_ BitVec 32) |
| 604 | // (bvand (bvnot (bvsub len (_ bv1 32))) (bvsub align (_ bv1 32)))) |
| 605 | // |
| 606 | // (define-fun is-power-of-two ((x (_ BitVec 32))) Bool |
| 607 | // (= (_ bv0 32) (bvand x (bvsub x (_ bv1 32))))) |
| 608 | // |
| 609 | // (declare-const len (_ BitVec 32)) |
| 610 | // (declare-const align (_ BitVec 32)) |
| 611 | // ; Search for a case where align is a power of two and padding2 disagrees with padding1 |
| 612 | // (assert (and (is-power-of-two align) |
| 613 | // (not (= (padding1 len align) (padding2 len align))))) |
| 614 | // (simplify (padding1 (_ bv300 32) (_ bv32 32))) ; 20 |
| 615 | // (simplify (padding2 (_ bv300 32) (_ bv32 32))) ; 20 |
| 616 | // (simplify (padding1 (_ bv322 32) (_ bv32 32))) ; 30 |
| 617 | // (simplify (padding2 (_ bv322 32) (_ bv32 32))) ; 30 |
| 618 | // (simplify (padding1 (_ bv8 32) (_ bv8 32))) ; 0 |
| 619 | // (simplify (padding2 (_ bv8 32) (_ bv8 32))) ; 0 |
| 620 | // (check-sat) ; unsat, also works for 64-bit bitvectors |
| 621 | // ``` |
| 622 | !(len.wrapping_sub(1)) & mask |
| 623 | } |
| 624 | |
| 625 | /// Rounds `n` down to the largest value `m` such that `m <= n` and `m % align |
| 626 | /// == 0`. |
| 627 | /// |
| 628 | /// # Panics |
| 629 | /// |
| 630 | /// May panic if `align` is not a power of two. Even if it doesn't panic in this |
| 631 | /// case, it will produce nonsense results. |
| 632 | #[inline (always)] |
| 633 | pub(crate) const fn round_down_to_next_multiple_of_alignment( |
| 634 | n: usize, |
| 635 | align: NonZeroUsize, |
| 636 | ) -> usize { |
| 637 | let align: usize = align.get(); |
| 638 | #[cfg (zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)] |
| 639 | debug_assert!(align.is_power_of_two()); |
| 640 | |
| 641 | // Subtraction can't underflow because `align.get() >= 1`. |
| 642 | #[allow (clippy::arithmetic_side_effects)] |
| 643 | let mask: usize = !(align - 1); |
| 644 | n & mask |
| 645 | } |
| 646 | |
| 647 | pub(crate) const fn max(a: NonZeroUsize, b: NonZeroUsize) -> NonZeroUsize { |
| 648 | if a.get() < b.get() { |
| 649 | b |
| 650 | } else { |
| 651 | a |
| 652 | } |
| 653 | } |
| 654 | |
| 655 | pub(crate) const fn min(a: NonZeroUsize, b: NonZeroUsize) -> NonZeroUsize { |
| 656 | if a.get() > b.get() { |
| 657 | b |
| 658 | } else { |
| 659 | a |
| 660 | } |
| 661 | } |
| 662 | |
| 663 | /// Copies `src` into the prefix of `dst`. |
| 664 | /// |
| 665 | /// # Safety |
| 666 | /// |
| 667 | /// The caller guarantees that `src.len() <= dst.len()`. |
| 668 | #[inline (always)] |
| 669 | pub(crate) unsafe fn copy_unchecked(src: &[u8], dst: &mut [u8]) { |
| 670 | debug_assert!(src.len() <= dst.len()); |
| 671 | // SAFETY: This invocation satisfies the safety contract of |
| 672 | // copy_nonoverlapping [1]: |
| 673 | // - `src.as_ptr()` is trivially valid for reads of `src.len()` bytes |
| 674 | // - `dst.as_ptr()` is valid for writes of `src.len()` bytes, because the |
| 675 | // caller has promised that `src.len() <= dst.len()` |
| 676 | // - `src` and `dst` are, trivially, properly aligned |
| 677 | // - the region of memory beginning at `src` with a size of `src.len()` |
| 678 | // bytes does not overlap with the region of memory beginning at `dst` |
| 679 | // with the same size, because `dst` is derived from an exclusive |
| 680 | // reference. |
| 681 | unsafe { |
| 682 | core::ptr::copy_nonoverlapping(src.as_ptr(), dst.as_mut_ptr(), count:src.len()); |
| 683 | }; |
| 684 | } |
| 685 | |
| 686 | /// Unsafely transmutes the given `src` into a type `Dst`. |
| 687 | /// |
| 688 | /// # Safety |
| 689 | /// |
| 690 | /// The value `src` must be a valid instance of `Dst`. |
| 691 | #[inline (always)] |
| 692 | pub(crate) const unsafe fn transmute_unchecked<Src, Dst>(src: Src) -> Dst { |
| 693 | static_assert!(Src, Dst => core::mem::size_of::<Src>() == core::mem::size_of::<Dst>()); |
| 694 | |
| 695 | #[repr (C)] |
| 696 | union Transmute<Src, Dst> { |
| 697 | src: ManuallyDrop<Src>, |
| 698 | dst: ManuallyDrop<Dst>, |
| 699 | } |
| 700 | |
| 701 | // SAFETY: Since `Transmute<Src, Dst>` is `#[repr(C)]`, its `src` and `dst` |
| 702 | // fields both start at the same offset and the types of those fields are |
| 703 | // transparent wrappers around `Src` and `Dst` [1]. Consequently, |
| 704 | // initializng `Transmute` with with `src` and then reading out `dst` is |
| 705 | // equivalent to transmuting from `Src` to `Dst` [2]. Transmuting from `src` |
| 706 | // to `Dst` is valid because — by contract on the caller — `src` is a valid |
| 707 | // instance of `Dst`. |
| 708 | // |
| 709 | // [1] Per https://doc.rust-lang.org/1.82.0/std/mem/struct.ManuallyDrop.html: |
| 710 | // |
| 711 | // `ManuallyDrop<T>` is guaranteed to have the same layout and bit |
| 712 | // validity as `T`, and is subject to the same layout optimizations as |
| 713 | // `T`. |
| 714 | // |
| 715 | // [2] Per https://doc.rust-lang.org/1.82.0/reference/items/unions.html#reading-and-writing-union-fields: |
| 716 | // |
| 717 | // Effectively, writing to and then reading from a union with the C |
| 718 | // representation is analogous to a transmute from the type used for |
| 719 | // writing to the type used for reading. |
| 720 | unsafe { ManuallyDrop::into_inner(Transmute { src: ManuallyDrop::new(src) }.dst) } |
| 721 | } |
| 722 | |
| 723 | /// Uses `allocate` to create a `Box<T>`. |
| 724 | /// |
| 725 | /// # Errors |
| 726 | /// |
| 727 | /// Returns an error on allocation failure. Allocation failure is guaranteed |
| 728 | /// never to cause a panic or an abort. |
| 729 | /// |
| 730 | /// # Safety |
| 731 | /// |
| 732 | /// `allocate` must be either `alloc::alloc::alloc` or |
| 733 | /// `alloc::alloc::alloc_zeroed`. The referent of the box returned by `new_box` |
| 734 | /// has the same bit-validity as the referent of the pointer returned by the |
| 735 | /// given `allocate` and sufficient size to store `T` with `meta`. |
| 736 | #[must_use = "has no side effects (other than allocation)" ] |
| 737 | #[cfg (feature = "alloc" )] |
| 738 | #[inline ] |
| 739 | pub(crate) unsafe fn new_box<T>( |
| 740 | meta: T::PointerMetadata, |
| 741 | allocate: unsafe fn(core::alloc::Layout) -> *mut u8, |
| 742 | ) -> Result<alloc::boxed::Box<T>, crate::error::AllocError> |
| 743 | where |
| 744 | T: ?Sized + crate::KnownLayout, |
| 745 | { |
| 746 | use crate::error::AllocError; |
| 747 | use crate::PointerMetadata; |
| 748 | use core::alloc::Layout; |
| 749 | |
| 750 | let size = match meta.size_for_metadata(T::LAYOUT) { |
| 751 | Some(size) => size, |
| 752 | None => return Err(AllocError), |
| 753 | }; |
| 754 | |
| 755 | let align = T::LAYOUT.align.get(); |
| 756 | // On stable Rust versions <= 1.64.0, `Layout::from_size_align` has a bug in |
| 757 | // which sufficiently-large allocations (those which, when rounded up to the |
| 758 | // alignment, overflow `isize`) are not rejected, which can cause undefined |
| 759 | // behavior. See #64 for details. |
| 760 | // |
| 761 | // TODO(#67): Once our MSRV is > 1.64.0, remove this assertion. |
| 762 | #[allow (clippy::as_conversions)] |
| 763 | let max_alloc = (isize::MAX as usize).saturating_sub(align); |
| 764 | if size > max_alloc { |
| 765 | return Err(AllocError); |
| 766 | } |
| 767 | |
| 768 | // TODO(https://github.com/rust-lang/rust/issues/55724): Use |
| 769 | // `Layout::repeat` once it's stabilized. |
| 770 | let layout = Layout::from_size_align(size, align).or(Err(AllocError))?; |
| 771 | |
| 772 | let ptr = if layout.size() != 0 { |
| 773 | // SAFETY: By contract on the caller, `allocate` is either |
| 774 | // `alloc::alloc::alloc` or `alloc::alloc::alloc_zeroed`. The above |
| 775 | // check ensures their shared safety precondition: that the supplied |
| 776 | // layout is not zero-sized type [1]. |
| 777 | // |
| 778 | // [1] Per https://doc.rust-lang.org/stable/std/alloc/trait.GlobalAlloc.html#tymethod.alloc: |
| 779 | // |
| 780 | // This function is unsafe because undefined behavior can result if |
| 781 | // the caller does not ensure that layout has non-zero size. |
| 782 | let ptr = unsafe { allocate(layout) }; |
| 783 | match NonNull::new(ptr) { |
| 784 | Some(ptr) => ptr, |
| 785 | None => return Err(AllocError), |
| 786 | } |
| 787 | } else { |
| 788 | let align = T::LAYOUT.align.get(); |
| 789 | // We use `transmute` instead of an `as` cast since Miri (with strict |
| 790 | // provenance enabled) notices and complains that an `as` cast creates a |
| 791 | // pointer with no provenance. Miri isn't smart enough to realize that |
| 792 | // we're only executing this branch when we're constructing a zero-sized |
| 793 | // `Box`, which doesn't require provenance. |
| 794 | // |
| 795 | // SAFETY: any initialized bit sequence is a bit-valid `*mut u8`. All |
| 796 | // bits of a `usize` are initialized. |
| 797 | #[allow (clippy::useless_transmute)] |
| 798 | let dangling = unsafe { mem::transmute::<usize, *mut u8>(align) }; |
| 799 | // SAFETY: `dangling` is constructed from `T::LAYOUT.align`, which is a |
| 800 | // `NonZeroUsize`, which is guaranteed to be non-zero. |
| 801 | // |
| 802 | // `Box<[T]>` does not allocate when `T` is zero-sized or when `len` is |
| 803 | // zero, but it does require a non-null dangling pointer for its |
| 804 | // allocation. |
| 805 | // |
| 806 | // TODO(https://github.com/rust-lang/rust/issues/95228): Use |
| 807 | // `std::ptr::without_provenance` once it's stable. That may optimize |
| 808 | // better. As written, Rust may assume that this consumes "exposed" |
| 809 | // provenance, and thus Rust may have to assume that this may consume |
| 810 | // provenance from any pointer whose provenance has been exposed. |
| 811 | unsafe { NonNull::new_unchecked(dangling) } |
| 812 | }; |
| 813 | |
| 814 | let ptr = T::raw_from_ptr_len(ptr, meta); |
| 815 | |
| 816 | // TODO(#429): Add a "SAFETY" comment and remove this `allow`. Make sure to |
| 817 | // include a justification that `ptr.as_ptr()` is validly-aligned in the ZST |
| 818 | // case (in which we manually construct a dangling pointer) and to justify |
| 819 | // why `Box` is safe to drop (it's because `allocate` uses the system |
| 820 | // allocator). |
| 821 | #[allow (clippy::undocumented_unsafe_blocks)] |
| 822 | Ok(unsafe { alloc::boxed::Box::from_raw(ptr.as_ptr()) }) |
| 823 | } |
| 824 | |
| 825 | /// Since we support multiple versions of Rust, there are often features which |
| 826 | /// have been stabilized in the most recent stable release which do not yet |
| 827 | /// exist (stably) on our MSRV. This module provides polyfills for those |
| 828 | /// features so that we can write more "modern" code, and just remove the |
| 829 | /// polyfill once our MSRV supports the corresponding feature. Without this, |
| 830 | /// we'd have to write worse/more verbose code and leave TODO comments sprinkled |
| 831 | /// throughout the codebase to update to the new pattern once it's stabilized. |
| 832 | /// |
| 833 | /// Each trait is imported as `_` at the crate root; each polyfill should "just |
| 834 | /// work" at usage sites. |
| 835 | pub(crate) mod polyfills { |
| 836 | use core::ptr::{self, NonNull}; |
| 837 | |
| 838 | // A polyfill for `NonNull::slice_from_raw_parts` that we can use before our |
| 839 | // MSRV is 1.70, when that function was stabilized. |
| 840 | // |
| 841 | // The `#[allow(unused)]` is necessary because, on sufficiently recent |
| 842 | // toolchain versions, `ptr.slice_from_raw_parts()` resolves to the inherent |
| 843 | // method rather than to this trait, and so this trait is considered unused. |
| 844 | // |
| 845 | // TODO(#67): Once our MSRV is 1.70, remove this. |
| 846 | #[allow (unused)] |
| 847 | pub(crate) trait NonNullExt<T> { |
| 848 | fn slice_from_raw_parts(data: Self, len: usize) -> NonNull<[T]>; |
| 849 | } |
| 850 | |
| 851 | impl<T> NonNullExt<T> for NonNull<T> { |
| 852 | // NOTE on coverage: this will never be tested in nightly since it's a |
| 853 | // polyfill for a feature which has been stabilized on our nightly |
| 854 | // toolchain. |
| 855 | #[cfg_attr (coverage_nightly, coverage(off))] |
| 856 | #[inline (always)] |
| 857 | fn slice_from_raw_parts(data: Self, len: usize) -> NonNull<[T]> { |
| 858 | let ptr = ptr::slice_from_raw_parts_mut(data.as_ptr(), len); |
| 859 | // SAFETY: `ptr` is converted from `data`, which is non-null. |
| 860 | unsafe { NonNull::new_unchecked(ptr) } |
| 861 | } |
| 862 | } |
| 863 | |
| 864 | // A polyfill for `Self::unchecked_sub` that we can use until methods like |
| 865 | // `usize::unchecked_sub` is stabilized. |
| 866 | // |
| 867 | // The `#[allow(unused)]` is necessary because, on sufficiently recent |
| 868 | // toolchain versions, `ptr.slice_from_raw_parts()` resolves to the inherent |
| 869 | // method rather than to this trait, and so this trait is considered unused. |
| 870 | // |
| 871 | // TODO(#67): Once our MSRV is high enough, remove this. |
| 872 | #[allow (unused)] |
| 873 | pub(crate) trait NumExt { |
| 874 | /// Subtract without checking for underflow. |
| 875 | /// |
| 876 | /// # Safety |
| 877 | /// |
| 878 | /// The caller promises that the subtraction will not underflow. |
| 879 | unsafe fn unchecked_sub(self, rhs: Self) -> Self; |
| 880 | } |
| 881 | |
| 882 | impl NumExt for usize { |
| 883 | // NOTE on coverage: this will never be tested in nightly since it's a |
| 884 | // polyfill for a feature which has been stabilized on our nightly |
| 885 | // toolchain. |
| 886 | #[cfg_attr (coverage_nightly, coverage(off))] |
| 887 | #[inline (always)] |
| 888 | unsafe fn unchecked_sub(self, rhs: usize) -> usize { |
| 889 | match self.checked_sub(rhs) { |
| 890 | Some(x) => x, |
| 891 | None => { |
| 892 | // SAFETY: The caller promises that the subtraction will not |
| 893 | // underflow. |
| 894 | unsafe { core::hint::unreachable_unchecked() } |
| 895 | } |
| 896 | } |
| 897 | } |
| 898 | } |
| 899 | } |
| 900 | |
| 901 | #[cfg (test)] |
| 902 | pub(crate) mod testutil { |
| 903 | use crate::*; |
| 904 | |
| 905 | /// A `T` which is aligned to at least `align_of::<A>()`. |
| 906 | #[derive (Default)] |
| 907 | pub(crate) struct Align<T, A> { |
| 908 | pub(crate) t: T, |
| 909 | _a: [A; 0], |
| 910 | } |
| 911 | |
| 912 | impl<T: Default, A> Align<T, A> { |
| 913 | pub(crate) fn set_default(&mut self) { |
| 914 | self.t = T::default(); |
| 915 | } |
| 916 | } |
| 917 | |
| 918 | impl<T, A> Align<T, A> { |
| 919 | pub(crate) const fn new(t: T) -> Align<T, A> { |
| 920 | Align { t, _a: [] } |
| 921 | } |
| 922 | } |
| 923 | |
| 924 | /// A `T` which is guaranteed not to satisfy `align_of::<A>()`. |
| 925 | /// |
| 926 | /// It must be the case that `align_of::<T>() < align_of::<A>()` in order |
| 927 | /// fot this type to work properly. |
| 928 | #[repr (C)] |
| 929 | pub(crate) struct ForceUnalign<T: Unaligned, A> { |
| 930 | // The outer struct is aligned to `A`, and, thanks to `repr(C)`, `t` is |
| 931 | // placed at the minimum offset that guarantees its alignment. If |
| 932 | // `align_of::<T>() < align_of::<A>()`, then that offset will be |
| 933 | // guaranteed *not* to satisfy `align_of::<A>()`. |
| 934 | // |
| 935 | // Note that we need `T: Unaligned` in order to guarantee that there is |
| 936 | // no padding between `_u` and `t`. |
| 937 | _u: u8, |
| 938 | pub(crate) t: T, |
| 939 | _a: [A; 0], |
| 940 | } |
| 941 | |
| 942 | impl<T: Unaligned, A> ForceUnalign<T, A> { |
| 943 | pub(crate) fn new(t: T) -> ForceUnalign<T, A> { |
| 944 | ForceUnalign { _u: 0, t, _a: [] } |
| 945 | } |
| 946 | } |
| 947 | // A `u64` with alignment 8. |
| 948 | // |
| 949 | // Though `u64` has alignment 8 on some platforms, it's not guaranteed. By |
| 950 | // contrast, `AU64` is guaranteed to have alignment 8 on all platforms. |
| 951 | #[derive ( |
| 952 | KnownLayout, |
| 953 | Immutable, |
| 954 | FromBytes, |
| 955 | IntoBytes, |
| 956 | Eq, |
| 957 | PartialEq, |
| 958 | Ord, |
| 959 | PartialOrd, |
| 960 | Default, |
| 961 | Debug, |
| 962 | Copy, |
| 963 | Clone, |
| 964 | )] |
| 965 | #[repr (C, align(8))] |
| 966 | pub(crate) struct AU64(pub(crate) u64); |
| 967 | |
| 968 | impl AU64 { |
| 969 | // Converts this `AU64` to bytes using this platform's endianness. |
| 970 | pub(crate) fn to_bytes(self) -> [u8; 8] { |
| 971 | crate::transmute!(self) |
| 972 | } |
| 973 | } |
| 974 | |
| 975 | impl Display for AU64 { |
| 976 | #[cfg_attr (coverage_nightly, coverage(off))] |
| 977 | fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { |
| 978 | Display::fmt(&self.0, f) |
| 979 | } |
| 980 | } |
| 981 | |
| 982 | #[derive (Immutable, FromBytes, Eq, PartialEq, Ord, PartialOrd, Default, Debug, Copy, Clone)] |
| 983 | #[repr (C)] |
| 984 | pub(crate) struct Nested<T, U: ?Sized> { |
| 985 | _t: T, |
| 986 | _u: U, |
| 987 | } |
| 988 | } |
| 989 | |
| 990 | #[cfg (test)] |
| 991 | mod tests { |
| 992 | use super::*; |
| 993 | |
| 994 | #[test ] |
| 995 | fn test_round_down_to_next_multiple_of_alignment() { |
| 996 | fn alt_impl(n: usize, align: NonZeroUsize) -> usize { |
| 997 | let mul = n / align.get(); |
| 998 | mul * align.get() |
| 999 | } |
| 1000 | |
| 1001 | for align in [1, 2, 4, 8, 16] { |
| 1002 | for n in 0..256 { |
| 1003 | let align = NonZeroUsize::new(align).unwrap(); |
| 1004 | let want = alt_impl(n, align); |
| 1005 | let got = round_down_to_next_multiple_of_alignment(n, align); |
| 1006 | assert_eq!(got, want, "round_down_to_next_multiple_of_alignment({}, {})" , n, align); |
| 1007 | } |
| 1008 | } |
| 1009 | } |
| 1010 | |
| 1011 | #[rustversion::since(1.57.0)] |
| 1012 | #[test ] |
| 1013 | #[should_panic ] |
| 1014 | fn test_round_down_to_next_multiple_of_alignment_zerocopy_panic_in_const_and_vec_try_reserve() { |
| 1015 | round_down_to_next_multiple_of_alignment(0, NonZeroUsize::new(3).unwrap()); |
| 1016 | } |
| 1017 | } |
| 1018 | |
| 1019 | #[cfg (kani)] |
| 1020 | mod proofs { |
| 1021 | use super::*; |
| 1022 | |
| 1023 | #[kani::proof] |
| 1024 | fn prove_round_down_to_next_multiple_of_alignment() { |
| 1025 | fn model_impl(n: usize, align: NonZeroUsize) -> usize { |
| 1026 | assert!(align.get().is_power_of_two()); |
| 1027 | let mul = n / align.get(); |
| 1028 | mul * align.get() |
| 1029 | } |
| 1030 | |
| 1031 | let align: NonZeroUsize = kani::any(); |
| 1032 | kani::assume(align.get().is_power_of_two()); |
| 1033 | let n: usize = kani::any(); |
| 1034 | |
| 1035 | let expected = model_impl(n, align); |
| 1036 | let actual = round_down_to_next_multiple_of_alignment(n, align); |
| 1037 | assert_eq!(expected, actual, "round_down_to_next_multiple_of_alignment({}, {})" , n, align); |
| 1038 | } |
| 1039 | |
| 1040 | // Restricted to nightly since we use the unstable `usize::next_multiple_of` |
| 1041 | // in our model implementation. |
| 1042 | #[cfg (__ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS)] |
| 1043 | #[kani::proof] |
| 1044 | fn prove_padding_needed_for() { |
| 1045 | fn model_impl(len: usize, align: NonZeroUsize) -> usize { |
| 1046 | let padded = len.next_multiple_of(align.get()); |
| 1047 | let padding = padded - len; |
| 1048 | padding |
| 1049 | } |
| 1050 | |
| 1051 | let align: NonZeroUsize = kani::any(); |
| 1052 | kani::assume(align.get().is_power_of_two()); |
| 1053 | let len: usize = kani::any(); |
| 1054 | // Constrain `len` to valid Rust lengths, since our model implementation |
| 1055 | // isn't robust to overflow. |
| 1056 | kani::assume(len <= isize::MAX as usize); |
| 1057 | kani::assume(align.get() < 1 << 29); |
| 1058 | |
| 1059 | let expected = model_impl(len, align); |
| 1060 | let actual = padding_needed_for(len, align); |
| 1061 | assert_eq!(expected, actual, "padding_needed_for({}, {})" , len, align); |
| 1062 | |
| 1063 | let padded_len = actual + len; |
| 1064 | assert_eq!(padded_len % align, 0); |
| 1065 | assert!(padded_len / align >= len / align); |
| 1066 | } |
| 1067 | } |
| 1068 | |