| 1 | // Copyright 2023 The Fuchsia Authors |
| 2 | // |
| 3 | // Licensed under a BSD-style license <LICENSE-BSD>, Apache License, Version 2.0 |
| 4 | // <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT |
| 5 | // license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option. |
| 6 | // This file may not be copied, modified, or distributed except according to |
| 7 | // those terms. |
| 8 | |
| 9 | #[macro_use ] |
| 10 | mod macros; |
| 11 | |
| 12 | #[doc (hidden)] |
| 13 | pub mod macro_util; |
| 14 | |
| 15 | use core::{ |
| 16 | marker::PhantomData, |
| 17 | mem::{self, ManuallyDrop}, |
| 18 | num::NonZeroUsize, |
| 19 | ptr::NonNull, |
| 20 | }; |
| 21 | |
| 22 | use super::*; |
| 23 | |
| 24 | /// Like [`PhantomData`], but [`Send`] and [`Sync`] regardless of whether the |
| 25 | /// wrapped `T` is. |
| 26 | pub(crate) struct SendSyncPhantomData<T: ?Sized>(PhantomData<T>); |
| 27 | |
| 28 | // SAFETY: `SendSyncPhantomData` does not enable any behavior which isn't sound |
| 29 | // to be called from multiple threads. |
| 30 | unsafe impl<T: ?Sized> Send for SendSyncPhantomData<T> {} |
| 31 | // SAFETY: `SendSyncPhantomData` does not enable any behavior which isn't sound |
| 32 | // to be called from multiple threads. |
| 33 | unsafe impl<T: ?Sized> Sync for SendSyncPhantomData<T> {} |
| 34 | |
| 35 | impl<T: ?Sized> Default for SendSyncPhantomData<T> { |
| 36 | fn default() -> SendSyncPhantomData<T> { |
| 37 | SendSyncPhantomData(PhantomData) |
| 38 | } |
| 39 | } |
| 40 | |
| 41 | impl<T: ?Sized> PartialEq for SendSyncPhantomData<T> { |
| 42 | fn eq(&self, other: &Self) -> bool { |
| 43 | self.0.eq(&other.0) |
| 44 | } |
| 45 | } |
| 46 | |
| 47 | impl<T: ?Sized> Eq for SendSyncPhantomData<T> {} |
| 48 | |
| 49 | pub(crate) trait AsAddress { |
| 50 | fn addr(self) -> usize; |
| 51 | } |
| 52 | |
| 53 | impl<T: ?Sized> AsAddress for &T { |
| 54 | #[inline (always)] |
| 55 | fn addr(self) -> usize { |
| 56 | let ptr: *const T = self; |
| 57 | AsAddress::addr(self:ptr) |
| 58 | } |
| 59 | } |
| 60 | |
| 61 | impl<T: ?Sized> AsAddress for &mut T { |
| 62 | #[inline (always)] |
| 63 | fn addr(self) -> usize { |
| 64 | let ptr: *const T = self; |
| 65 | AsAddress::addr(self:ptr) |
| 66 | } |
| 67 | } |
| 68 | |
| 69 | impl<T: ?Sized> AsAddress for NonNull<T> { |
| 70 | #[inline (always)] |
| 71 | fn addr(self) -> usize { |
| 72 | AsAddress::addr(self.as_ptr()) |
| 73 | } |
| 74 | } |
| 75 | |
| 76 | impl<T: ?Sized> AsAddress for *const T { |
| 77 | #[inline (always)] |
| 78 | fn addr(self) -> usize { |
| 79 | // TODO(#181), TODO(https://github.com/rust-lang/rust/issues/95228): Use |
| 80 | // `.addr()` instead of `as usize` once it's stable, and get rid of this |
| 81 | // `allow`. Currently, `as usize` is the only way to accomplish this. |
| 82 | #[allow (clippy::as_conversions)] |
| 83 | #[cfg_attr ( |
| 84 | __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS, |
| 85 | allow(lossy_provenance_casts) |
| 86 | )] |
| 87 | return self.cast::<()>() as usize; |
| 88 | } |
| 89 | } |
| 90 | |
| 91 | impl<T: ?Sized> AsAddress for *mut T { |
| 92 | #[inline (always)] |
| 93 | fn addr(self) -> usize { |
| 94 | let ptr: *const T = self; |
| 95 | AsAddress::addr(self:ptr) |
| 96 | } |
| 97 | } |
| 98 | |
| 99 | /// Validates that `t` is aligned to `align_of::<U>()`. |
| 100 | #[inline (always)] |
| 101 | pub(crate) fn validate_aligned_to<T: AsAddress, U>(t: T) -> Result<(), AlignmentError<(), U>> { |
| 102 | // `mem::align_of::<U>()` is guaranteed to return a non-zero value, which in |
| 103 | // turn guarantees that this mod operation will not panic. |
| 104 | #[allow (clippy::arithmetic_side_effects)] |
| 105 | let remainder: usize = t.addr() % mem::align_of::<U>(); |
| 106 | if remainder == 0 { |
| 107 | Ok(()) |
| 108 | } else { |
| 109 | // SAFETY: We just confirmed that `t.addr() % align_of::<U>() != 0`. |
| 110 | // That's only possible if `align_of::<U>() > 1`. |
| 111 | Err(unsafe { AlignmentError::new_unchecked(()) }) |
| 112 | } |
| 113 | } |
| 114 | |
| 115 | /// Returns the bytes needed to pad `len` to the next multiple of `align`. |
| 116 | /// |
| 117 | /// This function assumes that align is a power of two; there are no guarantees |
| 118 | /// on the answer it gives if this is not the case. |
| 119 | #[cfg_attr ( |
| 120 | kani, |
| 121 | kani::requires(len <= isize::MAX as usize), |
| 122 | kani::requires(align.is_power_of_two()), |
| 123 | kani::ensures(|&p| (len + p) % align.get() == 0), |
| 124 | // Ensures that we add the minimum required padding. |
| 125 | kani::ensures(|&p| p < align.get()), |
| 126 | )] |
| 127 | pub(crate) const fn padding_needed_for(len: usize, align: NonZeroUsize) -> usize { |
| 128 | #[cfg (kani)] |
| 129 | #[kani::proof_for_contract(padding_needed_for)] |
| 130 | fn proof() { |
| 131 | padding_needed_for(kani::any(), kani::any()); |
| 132 | } |
| 133 | |
| 134 | // Abstractly, we want to compute: |
| 135 | // align - (len % align). |
| 136 | // Handling the case where len%align is 0. |
| 137 | // Because align is a power of two, len % align = len & (align-1). |
| 138 | // Guaranteed not to underflow as align is nonzero. |
| 139 | #[allow (clippy::arithmetic_side_effects)] |
| 140 | let mask = align.get() - 1; |
| 141 | |
| 142 | // To efficiently subtract this value from align, we can use the bitwise complement. |
| 143 | // Note that ((!len) & (align-1)) gives us a number that with (len & |
| 144 | // (align-1)) sums to align-1. So subtracting 1 from x before taking the |
| 145 | // complement subtracts `len` from `align`. Some quick inspection of |
| 146 | // cases shows that this also handles the case where `len % align = 0` |
| 147 | // correctly too: len-1 % align then equals align-1, so the complement mod |
| 148 | // align will be 0, as desired. |
| 149 | // |
| 150 | // The following reasoning can be verified quickly by an SMT solver |
| 151 | // supporting the theory of bitvectors: |
| 152 | // ```smtlib |
| 153 | // ; Naive implementation of padding |
| 154 | // (define-fun padding1 ( |
| 155 | // (len (_ BitVec 32)) |
| 156 | // (align (_ BitVec 32))) (_ BitVec 32) |
| 157 | // (ite |
| 158 | // (= (_ bv0 32) (bvand len (bvsub align (_ bv1 32)))) |
| 159 | // (_ bv0 32) |
| 160 | // (bvsub align (bvand len (bvsub align (_ bv1 32)))))) |
| 161 | // |
| 162 | // ; The implementation below |
| 163 | // (define-fun padding2 ( |
| 164 | // (len (_ BitVec 32)) |
| 165 | // (align (_ BitVec 32))) (_ BitVec 32) |
| 166 | // (bvand (bvnot (bvsub len (_ bv1 32))) (bvsub align (_ bv1 32)))) |
| 167 | // |
| 168 | // (define-fun is-power-of-two ((x (_ BitVec 32))) Bool |
| 169 | // (= (_ bv0 32) (bvand x (bvsub x (_ bv1 32))))) |
| 170 | // |
| 171 | // (declare-const len (_ BitVec 32)) |
| 172 | // (declare-const align (_ BitVec 32)) |
| 173 | // ; Search for a case where align is a power of two and padding2 disagrees with padding1 |
| 174 | // (assert (and (is-power-of-two align) |
| 175 | // (not (= (padding1 len align) (padding2 len align))))) |
| 176 | // (simplify (padding1 (_ bv300 32) (_ bv32 32))) ; 20 |
| 177 | // (simplify (padding2 (_ bv300 32) (_ bv32 32))) ; 20 |
| 178 | // (simplify (padding1 (_ bv322 32) (_ bv32 32))) ; 30 |
| 179 | // (simplify (padding2 (_ bv322 32) (_ bv32 32))) ; 30 |
| 180 | // (simplify (padding1 (_ bv8 32) (_ bv8 32))) ; 0 |
| 181 | // (simplify (padding2 (_ bv8 32) (_ bv8 32))) ; 0 |
| 182 | // (check-sat) ; unsat, also works for 64-bit bitvectors |
| 183 | // ``` |
| 184 | !(len.wrapping_sub(1)) & mask |
| 185 | } |
| 186 | |
| 187 | /// Rounds `n` down to the largest value `m` such that `m <= n` and `m % align |
| 188 | /// == 0`. |
| 189 | /// |
| 190 | /// # Panics |
| 191 | /// |
| 192 | /// May panic if `align` is not a power of two. Even if it doesn't panic in this |
| 193 | /// case, it will produce nonsense results. |
| 194 | #[inline (always)] |
| 195 | #[cfg_attr ( |
| 196 | kani, |
| 197 | kani::requires(align.is_power_of_two()), |
| 198 | kani::ensures(|&m| m <= n && m % align.get() == 0), |
| 199 | // Guarantees that `m` is the *largest* value such that `m % align == 0`. |
| 200 | kani::ensures(|&m| { |
| 201 | // If this `checked_add` fails, then the next multiple would wrap |
| 202 | // around, which trivially satisfies the "largest value" requirement. |
| 203 | m.checked_add(align.get()).map(|next_mul| next_mul > n).unwrap_or(true) |
| 204 | }) |
| 205 | )] |
| 206 | pub(crate) const fn round_down_to_next_multiple_of_alignment( |
| 207 | n: usize, |
| 208 | align: NonZeroUsize, |
| 209 | ) -> usize { |
| 210 | #[cfg (kani)] |
| 211 | #[kani::proof_for_contract(round_down_to_next_multiple_of_alignment)] |
| 212 | fn proof() { |
| 213 | round_down_to_next_multiple_of_alignment(kani::any(), kani::any()); |
| 214 | } |
| 215 | |
| 216 | let align: usize = align.get(); |
| 217 | #[cfg (zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)] |
| 218 | debug_assert!(align.is_power_of_two()); |
| 219 | |
| 220 | // Subtraction can't underflow because `align.get() >= 1`. |
| 221 | #[allow (clippy::arithmetic_side_effects)] |
| 222 | let mask: usize = !(align - 1); |
| 223 | n & mask |
| 224 | } |
| 225 | |
| 226 | pub(crate) const fn max(a: NonZeroUsize, b: NonZeroUsize) -> NonZeroUsize { |
| 227 | if a.get() < b.get() { |
| 228 | b |
| 229 | } else { |
| 230 | a |
| 231 | } |
| 232 | } |
| 233 | |
| 234 | pub(crate) const fn min(a: NonZeroUsize, b: NonZeroUsize) -> NonZeroUsize { |
| 235 | if a.get() > b.get() { |
| 236 | b |
| 237 | } else { |
| 238 | a |
| 239 | } |
| 240 | } |
| 241 | |
| 242 | /// Copies `src` into the prefix of `dst`. |
| 243 | /// |
| 244 | /// # Safety |
| 245 | /// |
| 246 | /// The caller guarantees that `src.len() <= dst.len()`. |
| 247 | #[inline (always)] |
| 248 | pub(crate) unsafe fn copy_unchecked(src: &[u8], dst: &mut [u8]) { |
| 249 | debug_assert!(src.len() <= dst.len()); |
| 250 | // SAFETY: This invocation satisfies the safety contract of |
| 251 | // copy_nonoverlapping [1]: |
| 252 | // - `src.as_ptr()` is trivially valid for reads of `src.len()` bytes |
| 253 | // - `dst.as_ptr()` is valid for writes of `src.len()` bytes, because the |
| 254 | // caller has promised that `src.len() <= dst.len()` |
| 255 | // - `src` and `dst` are, trivially, properly aligned |
| 256 | // - the region of memory beginning at `src` with a size of `src.len()` |
| 257 | // bytes does not overlap with the region of memory beginning at `dst` |
| 258 | // with the same size, because `dst` is derived from an exclusive |
| 259 | // reference. |
| 260 | unsafe { |
| 261 | core::ptr::copy_nonoverlapping(src.as_ptr(), dst.as_mut_ptr(), count:src.len()); |
| 262 | }; |
| 263 | } |
| 264 | |
| 265 | /// Unsafely transmutes the given `src` into a type `Dst`. |
| 266 | /// |
| 267 | /// # Safety |
| 268 | /// |
| 269 | /// The value `src` must be a valid instance of `Dst`. |
| 270 | #[inline (always)] |
| 271 | pub(crate) const unsafe fn transmute_unchecked<Src, Dst>(src: Src) -> Dst { |
| 272 | static_assert!(Src, Dst => core::mem::size_of::<Src>() == core::mem::size_of::<Dst>()); |
| 273 | |
| 274 | #[repr (C)] |
| 275 | union Transmute<Src, Dst> { |
| 276 | src: ManuallyDrop<Src>, |
| 277 | dst: ManuallyDrop<Dst>, |
| 278 | } |
| 279 | |
| 280 | // SAFETY: Since `Transmute<Src, Dst>` is `#[repr(C)]`, its `src` and `dst` |
| 281 | // fields both start at the same offset and the types of those fields are |
| 282 | // transparent wrappers around `Src` and `Dst` [1]. Consequently, |
| 283 | // initializng `Transmute` with with `src` and then reading out `dst` is |
| 284 | // equivalent to transmuting from `Src` to `Dst` [2]. Transmuting from `src` |
| 285 | // to `Dst` is valid because — by contract on the caller — `src` is a valid |
| 286 | // instance of `Dst`. |
| 287 | // |
| 288 | // [1] Per https://doc.rust-lang.org/1.82.0/std/mem/struct.ManuallyDrop.html: |
| 289 | // |
| 290 | // `ManuallyDrop<T>` is guaranteed to have the same layout and bit |
| 291 | // validity as `T`, and is subject to the same layout optimizations as |
| 292 | // `T`. |
| 293 | // |
| 294 | // [2] Per https://doc.rust-lang.org/1.82.0/reference/items/unions.html#reading-and-writing-union-fields: |
| 295 | // |
| 296 | // Effectively, writing to and then reading from a union with the C |
| 297 | // representation is analogous to a transmute from the type used for |
| 298 | // writing to the type used for reading. |
| 299 | unsafe { ManuallyDrop::into_inner(Transmute { src: ManuallyDrop::new(src) }.dst) } |
| 300 | } |
| 301 | |
| 302 | /// Uses `allocate` to create a `Box<T>`. |
| 303 | /// |
| 304 | /// # Errors |
| 305 | /// |
| 306 | /// Returns an error on allocation failure. Allocation failure is guaranteed |
| 307 | /// never to cause a panic or an abort. |
| 308 | /// |
| 309 | /// # Safety |
| 310 | /// |
| 311 | /// `allocate` must be either `alloc::alloc::alloc` or |
| 312 | /// `alloc::alloc::alloc_zeroed`. The referent of the box returned by `new_box` |
| 313 | /// has the same bit-validity as the referent of the pointer returned by the |
| 314 | /// given `allocate` and sufficient size to store `T` with `meta`. |
| 315 | #[must_use = "has no side effects (other than allocation)" ] |
| 316 | #[cfg (feature = "alloc" )] |
| 317 | #[inline ] |
| 318 | pub(crate) unsafe fn new_box<T>( |
| 319 | meta: T::PointerMetadata, |
| 320 | allocate: unsafe fn(core::alloc::Layout) -> *mut u8, |
| 321 | ) -> Result<alloc::boxed::Box<T>, AllocError> |
| 322 | where |
| 323 | T: ?Sized + crate::KnownLayout, |
| 324 | { |
| 325 | let size = match meta.size_for_metadata(T::LAYOUT) { |
| 326 | Some(size) => size, |
| 327 | None => return Err(AllocError), |
| 328 | }; |
| 329 | |
| 330 | let align = T::LAYOUT.align.get(); |
| 331 | // On stable Rust versions <= 1.64.0, `Layout::from_size_align` has a bug in |
| 332 | // which sufficiently-large allocations (those which, when rounded up to the |
| 333 | // alignment, overflow `isize`) are not rejected, which can cause undefined |
| 334 | // behavior. See #64 for details. |
| 335 | // |
| 336 | // TODO(#67): Once our MSRV is > 1.64.0, remove this assertion. |
| 337 | #[allow (clippy::as_conversions)] |
| 338 | let max_alloc = (isize::MAX as usize).saturating_sub(align); |
| 339 | if size > max_alloc { |
| 340 | return Err(AllocError); |
| 341 | } |
| 342 | |
| 343 | // TODO(https://github.com/rust-lang/rust/issues/55724): Use |
| 344 | // `Layout::repeat` once it's stabilized. |
| 345 | let layout = Layout::from_size_align(size, align).or(Err(AllocError))?; |
| 346 | |
| 347 | let ptr = if layout.size() != 0 { |
| 348 | // SAFETY: By contract on the caller, `allocate` is either |
| 349 | // `alloc::alloc::alloc` or `alloc::alloc::alloc_zeroed`. The above |
| 350 | // check ensures their shared safety precondition: that the supplied |
| 351 | // layout is not zero-sized type [1]. |
| 352 | // |
| 353 | // [1] Per https://doc.rust-lang.org/stable/std/alloc/trait.GlobalAlloc.html#tymethod.alloc: |
| 354 | // |
| 355 | // This function is unsafe because undefined behavior can result if |
| 356 | // the caller does not ensure that layout has non-zero size. |
| 357 | let ptr = unsafe { allocate(layout) }; |
| 358 | match NonNull::new(ptr) { |
| 359 | Some(ptr) => ptr, |
| 360 | None => return Err(AllocError), |
| 361 | } |
| 362 | } else { |
| 363 | let align = T::LAYOUT.align.get(); |
| 364 | // We use `transmute` instead of an `as` cast since Miri (with strict |
| 365 | // provenance enabled) notices and complains that an `as` cast creates a |
| 366 | // pointer with no provenance. Miri isn't smart enough to realize that |
| 367 | // we're only executing this branch when we're constructing a zero-sized |
| 368 | // `Box`, which doesn't require provenance. |
| 369 | // |
| 370 | // SAFETY: any initialized bit sequence is a bit-valid `*mut u8`. All |
| 371 | // bits of a `usize` are initialized. |
| 372 | #[allow (clippy::useless_transmute)] |
| 373 | let dangling = unsafe { mem::transmute::<usize, *mut u8>(align) }; |
| 374 | // SAFETY: `dangling` is constructed from `T::LAYOUT.align`, which is a |
| 375 | // `NonZeroUsize`, which is guaranteed to be non-zero. |
| 376 | // |
| 377 | // `Box<[T]>` does not allocate when `T` is zero-sized or when `len` is |
| 378 | // zero, but it does require a non-null dangling pointer for its |
| 379 | // allocation. |
| 380 | // |
| 381 | // TODO(https://github.com/rust-lang/rust/issues/95228): Use |
| 382 | // `std::ptr::without_provenance` once it's stable. That may optimize |
| 383 | // better. As written, Rust may assume that this consumes "exposed" |
| 384 | // provenance, and thus Rust may have to assume that this may consume |
| 385 | // provenance from any pointer whose provenance has been exposed. |
| 386 | unsafe { NonNull::new_unchecked(dangling) } |
| 387 | }; |
| 388 | |
| 389 | let ptr = T::raw_from_ptr_len(ptr, meta); |
| 390 | |
| 391 | // TODO(#429): Add a "SAFETY" comment and remove this `allow`. Make sure to |
| 392 | // include a justification that `ptr.as_ptr()` is validly-aligned in the ZST |
| 393 | // case (in which we manually construct a dangling pointer) and to justify |
| 394 | // why `Box` is safe to drop (it's because `allocate` uses the system |
| 395 | // allocator). |
| 396 | #[allow (clippy::undocumented_unsafe_blocks)] |
| 397 | Ok(unsafe { alloc::boxed::Box::from_raw(ptr.as_ptr()) }) |
| 398 | } |
| 399 | |
| 400 | /// Since we support multiple versions of Rust, there are often features which |
| 401 | /// have been stabilized in the most recent stable release which do not yet |
| 402 | /// exist (stably) on our MSRV. This module provides polyfills for those |
| 403 | /// features so that we can write more "modern" code, and just remove the |
| 404 | /// polyfill once our MSRV supports the corresponding feature. Without this, |
| 405 | /// we'd have to write worse/more verbose code and leave TODO comments sprinkled |
| 406 | /// throughout the codebase to update to the new pattern once it's stabilized. |
| 407 | /// |
| 408 | /// Each trait is imported as `_` at the crate root; each polyfill should "just |
| 409 | /// work" at usage sites. |
| 410 | pub(crate) mod polyfills { |
| 411 | use core::ptr::{self, NonNull}; |
| 412 | |
| 413 | // A polyfill for `NonNull::slice_from_raw_parts` that we can use before our |
| 414 | // MSRV is 1.70, when that function was stabilized. |
| 415 | // |
| 416 | // The `#[allow(unused)]` is necessary because, on sufficiently recent |
| 417 | // toolchain versions, `ptr.slice_from_raw_parts()` resolves to the inherent |
| 418 | // method rather than to this trait, and so this trait is considered unused. |
| 419 | // |
| 420 | // TODO(#67): Once our MSRV is 1.70, remove this. |
| 421 | #[allow (unused)] |
| 422 | pub(crate) trait NonNullExt<T> { |
| 423 | fn slice_from_raw_parts(data: Self, len: usize) -> NonNull<[T]>; |
| 424 | } |
| 425 | |
| 426 | impl<T> NonNullExt<T> for NonNull<T> { |
| 427 | // NOTE on coverage: this will never be tested in nightly since it's a |
| 428 | // polyfill for a feature which has been stabilized on our nightly |
| 429 | // toolchain. |
| 430 | #[cfg_attr ( |
| 431 | all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS), |
| 432 | coverage(off) |
| 433 | )] |
| 434 | #[inline (always)] |
| 435 | fn slice_from_raw_parts(data: Self, len: usize) -> NonNull<[T]> { |
| 436 | let ptr = ptr::slice_from_raw_parts_mut(data.as_ptr(), len); |
| 437 | // SAFETY: `ptr` is converted from `data`, which is non-null. |
| 438 | unsafe { NonNull::new_unchecked(ptr) } |
| 439 | } |
| 440 | } |
| 441 | |
| 442 | // A polyfill for `Self::unchecked_sub` that we can use until methods like |
| 443 | // `usize::unchecked_sub` is stabilized. |
| 444 | // |
| 445 | // The `#[allow(unused)]` is necessary because, on sufficiently recent |
| 446 | // toolchain versions, `ptr.slice_from_raw_parts()` resolves to the inherent |
| 447 | // method rather than to this trait, and so this trait is considered unused. |
| 448 | // |
| 449 | // TODO(#67): Once our MSRV is high enough, remove this. |
| 450 | #[allow (unused)] |
| 451 | pub(crate) trait NumExt { |
| 452 | /// Subtract without checking for underflow. |
| 453 | /// |
| 454 | /// # Safety |
| 455 | /// |
| 456 | /// The caller promises that the subtraction will not underflow. |
| 457 | unsafe fn unchecked_sub(self, rhs: Self) -> Self; |
| 458 | } |
| 459 | |
| 460 | impl NumExt for usize { |
| 461 | // NOTE on coverage: this will never be tested in nightly since it's a |
| 462 | // polyfill for a feature which has been stabilized on our nightly |
| 463 | // toolchain. |
| 464 | #[cfg_attr ( |
| 465 | all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS), |
| 466 | coverage(off) |
| 467 | )] |
| 468 | #[inline (always)] |
| 469 | unsafe fn unchecked_sub(self, rhs: usize) -> usize { |
| 470 | match self.checked_sub(rhs) { |
| 471 | Some(x) => x, |
| 472 | None => { |
| 473 | // SAFETY: The caller promises that the subtraction will not |
| 474 | // underflow. |
| 475 | unsafe { core::hint::unreachable_unchecked() } |
| 476 | } |
| 477 | } |
| 478 | } |
| 479 | } |
| 480 | } |
| 481 | |
| 482 | #[cfg (test)] |
| 483 | pub(crate) mod testutil { |
| 484 | use crate::*; |
| 485 | |
| 486 | /// A `T` which is aligned to at least `align_of::<A>()`. |
| 487 | #[derive (Default)] |
| 488 | pub(crate) struct Align<T, A> { |
| 489 | pub(crate) t: T, |
| 490 | _a: [A; 0], |
| 491 | } |
| 492 | |
| 493 | impl<T: Default, A> Align<T, A> { |
| 494 | pub(crate) fn set_default(&mut self) { |
| 495 | self.t = T::default(); |
| 496 | } |
| 497 | } |
| 498 | |
| 499 | impl<T, A> Align<T, A> { |
| 500 | pub(crate) const fn new(t: T) -> Align<T, A> { |
| 501 | Align { t, _a: [] } |
| 502 | } |
| 503 | } |
| 504 | |
| 505 | /// A `T` which is guaranteed not to satisfy `align_of::<A>()`. |
| 506 | /// |
| 507 | /// It must be the case that `align_of::<T>() < align_of::<A>()` in order |
| 508 | /// fot this type to work properly. |
| 509 | #[repr (C)] |
| 510 | pub(crate) struct ForceUnalign<T: Unaligned, A> { |
| 511 | // The outer struct is aligned to `A`, and, thanks to `repr(C)`, `t` is |
| 512 | // placed at the minimum offset that guarantees its alignment. If |
| 513 | // `align_of::<T>() < align_of::<A>()`, then that offset will be |
| 514 | // guaranteed *not* to satisfy `align_of::<A>()`. |
| 515 | // |
| 516 | // Note that we need `T: Unaligned` in order to guarantee that there is |
| 517 | // no padding between `_u` and `t`. |
| 518 | _u: u8, |
| 519 | pub(crate) t: T, |
| 520 | _a: [A; 0], |
| 521 | } |
| 522 | |
| 523 | impl<T: Unaligned, A> ForceUnalign<T, A> { |
| 524 | pub(crate) fn new(t: T) -> ForceUnalign<T, A> { |
| 525 | ForceUnalign { _u: 0, t, _a: [] } |
| 526 | } |
| 527 | } |
| 528 | // A `u64` with alignment 8. |
| 529 | // |
| 530 | // Though `u64` has alignment 8 on some platforms, it's not guaranteed. By |
| 531 | // contrast, `AU64` is guaranteed to have alignment 8 on all platforms. |
| 532 | #[derive ( |
| 533 | KnownLayout, |
| 534 | Immutable, |
| 535 | FromBytes, |
| 536 | IntoBytes, |
| 537 | Eq, |
| 538 | PartialEq, |
| 539 | Ord, |
| 540 | PartialOrd, |
| 541 | Default, |
| 542 | Debug, |
| 543 | Copy, |
| 544 | Clone, |
| 545 | )] |
| 546 | #[repr (C, align(8))] |
| 547 | pub(crate) struct AU64(pub(crate) u64); |
| 548 | |
| 549 | impl AU64 { |
| 550 | // Converts this `AU64` to bytes using this platform's endianness. |
| 551 | pub(crate) fn to_bytes(self) -> [u8; 8] { |
| 552 | crate::transmute!(self) |
| 553 | } |
| 554 | } |
| 555 | |
| 556 | impl Display for AU64 { |
| 557 | #[cfg_attr ( |
| 558 | all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS), |
| 559 | coverage(off) |
| 560 | )] |
| 561 | fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { |
| 562 | Display::fmt(&self.0, f) |
| 563 | } |
| 564 | } |
| 565 | |
| 566 | #[derive (Immutable, FromBytes, Eq, PartialEq, Ord, PartialOrd, Default, Debug, Copy, Clone)] |
| 567 | #[repr (C)] |
| 568 | pub(crate) struct Nested<T, U: ?Sized> { |
| 569 | _t: T, |
| 570 | _u: U, |
| 571 | } |
| 572 | } |
| 573 | |
| 574 | #[cfg (test)] |
| 575 | mod tests { |
| 576 | use super::*; |
| 577 | |
| 578 | #[test ] |
| 579 | fn test_round_down_to_next_multiple_of_alignment() { |
| 580 | fn alt_impl(n: usize, align: NonZeroUsize) -> usize { |
| 581 | let mul = n / align.get(); |
| 582 | mul * align.get() |
| 583 | } |
| 584 | |
| 585 | for align in [1, 2, 4, 8, 16] { |
| 586 | for n in 0..256 { |
| 587 | let align = NonZeroUsize::new(align).unwrap(); |
| 588 | let want = alt_impl(n, align); |
| 589 | let got = round_down_to_next_multiple_of_alignment(n, align); |
| 590 | assert_eq!(got, want, "round_down_to_next_multiple_of_alignment({}, {})" , n, align); |
| 591 | } |
| 592 | } |
| 593 | } |
| 594 | |
| 595 | #[rustversion::since(1.57.0)] |
| 596 | #[test ] |
| 597 | #[should_panic ] |
| 598 | fn test_round_down_to_next_multiple_of_alignment_zerocopy_panic_in_const_and_vec_try_reserve() { |
| 599 | round_down_to_next_multiple_of_alignment(0, NonZeroUsize::new(3).unwrap()); |
| 600 | } |
| 601 | } |
| 602 | |