1// Copyright 2018 The Fuchsia Authors
2//
3// Licensed under the 2-Clause BSD License <LICENSE-BSD or
4// https://opensource.org/license/bsd-2-clause>, Apache License, Version 2.0
5// <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT
6// license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option.
7// This file may not be copied, modified, or distributed except according to
8// those terms.
9
10// After updating the following doc comment, make sure to run the following
11// command to update `README.md` based on its contents:
12//
13// ./generate-readme.sh > README.md
14
15//! *<span style="font-size: 100%; color:grey;">Want to help improve zerocopy?
16//! Fill out our [user survey][user-survey]!</span>*
17//!
18//! ***<span style="font-size: 140%">Fast, safe, <span
19//! style="color:red;">compile error</span>. Pick two.</span>***
20//!
21//! Zerocopy makes zero-cost memory manipulation effortless. We write `unsafe`
22//! so you don't have to.
23//!
24//! # Overview
25//!
26//! Zerocopy provides four core marker traits, each of which can be derived
27//! (e.g., `#[derive(FromZeroes)]`):
28//! - [`FromZeroes`] indicates that a sequence of zero bytes represents a valid
29//! instance of a type
30//! - [`FromBytes`] indicates that a type may safely be converted from an
31//! arbitrary byte sequence
32//! - [`AsBytes`] indicates that a type may safely be converted *to* a byte
33//! sequence
34//! - [`Unaligned`] indicates that a type's alignment requirement is 1
35//!
36//! Types which implement a subset of these traits can then be converted to/from
37//! byte sequences with little to no runtime overhead.
38//!
39//! Zerocopy also provides byte-order aware integer types that support these
40//! conversions; see the [`byteorder`] module. These types are especially useful
41//! for network parsing.
42//!
43//! [user-survey]: https://docs.google.com/forms/d/e/1FAIpQLSdzBNTN9tzwsmtyZxRFNL02K36IWCdHWW2ZBckyQS2xiO3i8Q/viewform?usp=published_options
44//!
45//! # Cargo Features
46//!
47//! - **`alloc`**
48//! By default, `zerocopy` is `no_std`. When the `alloc` feature is enabled,
49//! the `alloc` crate is added as a dependency, and some allocation-related
50//! functionality is added.
51//!
52//! - **`byteorder`** (enabled by default)
53//! Adds the [`byteorder`] module and a dependency on the `byteorder` crate.
54//! The `byteorder` module provides byte order-aware equivalents of the
55//! multi-byte primitive numerical types. Unlike their primitive equivalents,
56//! the types in this module have no alignment requirement and support byte
57//! order conversions. This can be useful in handling file formats, network
58//! packet layouts, etc which don't provide alignment guarantees and which may
59//! use a byte order different from that of the execution platform.
60//!
61//! - **`derive`**
62//! Provides derives for the core marker traits via the `zerocopy-derive`
63//! crate. These derives are re-exported from `zerocopy`, so it is not
64//! necessary to depend on `zerocopy-derive` directly.
65//!
66//! However, you may experience better compile times if you instead directly
67//! depend on both `zerocopy` and `zerocopy-derive` in your `Cargo.toml`,
68//! since doing so will allow Rust to compile these crates in parallel. To do
69//! so, do *not* enable the `derive` feature, and list both dependencies in
70//! your `Cargo.toml` with the same leading non-zero version number; e.g:
71//!
72//! ```toml
73//! [dependencies]
74//! zerocopy = "0.X"
75//! zerocopy-derive = "0.X"
76//! ```
77//!
78//! - **`simd`**
79//! When the `simd` feature is enabled, `FromZeroes`, `FromBytes`, and
80//! `AsBytes` impls are emitted for all stable SIMD types which exist on the
81//! target platform. Note that the layout of SIMD types is not yet stabilized,
82//! so these impls may be removed in the future if layout changes make them
83//! invalid. For more information, see the Unsafe Code Guidelines Reference
84//! page on the [layout of packed SIMD vectors][simd-layout].
85//!
86//! - **`simd-nightly`**
87//! Enables the `simd` feature and adds support for SIMD types which are only
88//! available on nightly. Since these types are unstable, support for any type
89//! may be removed at any point in the future.
90//!
91//! [simd-layout]: https://rust-lang.github.io/unsafe-code-guidelines/layout/packed-simd-vectors.html
92//!
93//! # Security Ethos
94//!
95//! Zerocopy is expressly designed for use in security-critical contexts. We
96//! strive to ensure that that zerocopy code is sound under Rust's current
97//! memory model, and *any future memory model*. We ensure this by:
98//! - **...not 'guessing' about Rust's semantics.**
99//! We annotate `unsafe` code with a precise rationale for its soundness that
100//! cites a relevant section of Rust's official documentation. When Rust's
101//! documented semantics are unclear, we work with the Rust Operational
102//! Semantics Team to clarify Rust's documentation.
103//! - **...rigorously testing our implementation.**
104//! We run tests using [Miri], ensuring that zerocopy is sound across a wide
105//! array of supported target platforms of varying endianness and pointer
106//! width, and across both current and experimental memory models of Rust.
107//! - **...formally proving the correctness of our implementation.**
108//! We apply formal verification tools like [Kani][kani] to prove zerocopy's
109//! correctness.
110//!
111//! For more information, see our full [soundness policy].
112//!
113//! [Miri]: https://github.com/rust-lang/miri
114//! [Kani]: https://github.com/model-checking/kani
115//! [soundness policy]: https://github.com/google/zerocopy/blob/main/POLICIES.md#soundness
116//!
117//! # Relationship to Project Safe Transmute
118//!
119//! [Project Safe Transmute] is an official initiative of the Rust Project to
120//! develop language-level support for safer transmutation. The Project consults
121//! with crates like zerocopy to identify aspects of safer transmutation that
122//! would benefit from compiler support, and has developed an [experimental,
123//! compiler-supported analysis][mcp-transmutability] which determines whether,
124//! for a given type, any value of that type may be soundly transmuted into
125//! another type. Once this functionality is sufficiently mature, zerocopy
126//! intends to replace its internal transmutability analysis (implemented by our
127//! custom derives) with the compiler-supported one. This change will likely be
128//! an implementation detail that is invisible to zerocopy's users.
129//!
130//! Project Safe Transmute will not replace the need for most of zerocopy's
131//! higher-level abstractions. The experimental compiler analysis is a tool for
132//! checking the soundness of `unsafe` code, not a tool to avoid writing
133//! `unsafe` code altogether. For the foreseeable future, crates like zerocopy
134//! will still be required in order to provide higher-level abstractions on top
135//! of the building block provided by Project Safe Transmute.
136//!
137//! [Project Safe Transmute]: https://rust-lang.github.io/rfcs/2835-project-safe-transmute.html
138//! [mcp-transmutability]: https://github.com/rust-lang/compiler-team/issues/411
139//!
140//! # MSRV
141//!
142//! See our [MSRV policy].
143//!
144//! [MSRV policy]: https://github.com/google/zerocopy/blob/main/POLICIES.md#msrv
145
146// Sometimes we want to use lints which were added after our MSRV.
147// `unknown_lints` is `warn` by default and we deny warnings in CI, so without
148// this attribute, any unknown lint would cause a CI failure when testing with
149// our MSRV.
150#![allow(unknown_lints)]
151#![deny(renamed_and_removed_lints)]
152#![deny(
153 anonymous_parameters,
154 deprecated_in_future,
155 illegal_floating_point_literal_pattern,
156 late_bound_lifetime_arguments,
157 missing_copy_implementations,
158 missing_debug_implementations,
159 missing_docs,
160 path_statements,
161 patterns_in_fns_without_body,
162 rust_2018_idioms,
163 trivial_numeric_casts,
164 unreachable_pub,
165 unsafe_op_in_unsafe_fn,
166 unused_extern_crates,
167 unused_qualifications,
168 variant_size_differences
169)]
170#![cfg_attr(
171 __INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS,
172 deny(fuzzy_provenance_casts, lossy_provenance_casts)
173)]
174#![deny(
175 clippy::all,
176 clippy::alloc_instead_of_core,
177 clippy::arithmetic_side_effects,
178 clippy::as_underscore,
179 clippy::assertions_on_result_states,
180 clippy::as_conversions,
181 clippy::correctness,
182 clippy::dbg_macro,
183 clippy::decimal_literal_representation,
184 clippy::get_unwrap,
185 clippy::indexing_slicing,
186 clippy::missing_inline_in_public_items,
187 clippy::missing_safety_doc,
188 clippy::obfuscated_if_else,
189 clippy::perf,
190 clippy::print_stdout,
191 clippy::std_instead_of_core,
192 clippy::style,
193 clippy::suspicious,
194 clippy::todo,
195 clippy::undocumented_unsafe_blocks,
196 clippy::unimplemented,
197 clippy::unnested_or_patterns,
198 clippy::unwrap_used,
199 clippy::use_debug
200)]
201#![deny(
202 rustdoc::bare_urls,
203 rustdoc::broken_intra_doc_links,
204 rustdoc::invalid_codeblock_attributes,
205 rustdoc::invalid_html_tags,
206 rustdoc::invalid_rust_codeblocks,
207 rustdoc::missing_crate_level_docs,
208 rustdoc::private_intra_doc_links
209)]
210// In test code, it makes sense to weight more heavily towards concise, readable
211// code over correct or debuggable code.
212#![cfg_attr(any(test, kani), allow(
213 // In tests, you get line numbers and have access to source code, so panic
214 // messages are less important. You also often unwrap a lot, which would
215 // make expect'ing instead very verbose.
216 clippy::unwrap_used,
217 // In tests, there's no harm to "panic risks" - the worst that can happen is
218 // that your test will fail, and you'll fix it. By contrast, panic risks in
219 // production code introduce the possibly of code panicking unexpectedly "in
220 // the field".
221 clippy::arithmetic_side_effects,
222 clippy::indexing_slicing,
223))]
224#![cfg_attr(not(test), no_std)]
225#![cfg_attr(feature = "simd-nightly", feature(stdsimd))]
226#![cfg_attr(doc_cfg, feature(doc_cfg))]
227#![cfg_attr(
228 __INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS,
229 feature(layout_for_ptr, strict_provenance)
230)]
231
232// This is a hack to allow zerocopy-derive derives to work in this crate. They
233// assume that zerocopy is linked as an extern crate, so they access items from
234// it as `zerocopy::Xxx`. This makes that still work.
235#[cfg(any(feature = "derive", test))]
236extern crate self as zerocopy;
237
238#[macro_use]
239mod macros;
240
241#[cfg(feature = "byteorder")]
242#[cfg_attr(doc_cfg, doc(cfg(feature = "byteorder")))]
243pub mod byteorder;
244#[doc(hidden)]
245pub mod macro_util;
246mod util;
247// TODO(#252): If we make this pub, come up with a better name.
248mod wrappers;
249
250#[cfg(feature = "byteorder")]
251#[cfg_attr(doc_cfg, doc(cfg(feature = "byteorder")))]
252pub use crate::byteorder::*;
253pub use crate::wrappers::*;
254
255#[cfg(any(feature = "derive", test))]
256#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
257pub use zerocopy_derive::Unaligned;
258
259// `pub use` separately here so that we can mark it `#[doc(hidden)]`.
260//
261// TODO(#29): Remove this or add a doc comment.
262#[cfg(any(feature = "derive", test))]
263#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
264#[doc(hidden)]
265pub use zerocopy_derive::KnownLayout;
266
267use core::{
268 cell::{self, RefMut},
269 cmp::Ordering,
270 fmt::{self, Debug, Display, Formatter},
271 hash::Hasher,
272 marker::PhantomData,
273 mem::{self, ManuallyDrop, MaybeUninit},
274 num::{
275 NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize, NonZeroU128,
276 NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize, Wrapping,
277 },
278 ops::{Deref, DerefMut},
279 ptr::{self, NonNull},
280 slice,
281};
282
283#[cfg(feature = "alloc")]
284extern crate alloc;
285#[cfg(feature = "alloc")]
286use alloc::{boxed::Box, vec::Vec};
287
288#[cfg(any(feature = "alloc", kani))]
289use core::alloc::Layout;
290
291// Used by `TryFromBytes::is_bit_valid`.
292#[doc(hidden)]
293pub use crate::util::ptr::Ptr;
294
295// For each polyfill, as soon as the corresponding feature is stable, the
296// polyfill import will be unused because method/function resolution will prefer
297// the inherent method/function over a trait method/function. Thus, we suppress
298// the `unused_imports` warning.
299//
300// See the documentation on `util::polyfills` for more information.
301#[allow(unused_imports)]
302use crate::util::polyfills::NonNullExt as _;
303
304#[rustversion::nightly]
305#[cfg(all(test, not(__INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS)))]
306const _: () = {
307 #[deprecated = "some tests may be skipped due to missing RUSTFLAGS=\"--cfg __INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS\""]
308 const _WARNING: () = ();
309 #[warn(deprecated)]
310 _WARNING
311};
312
313/// The target pointer width, counted in bits.
314const POINTER_WIDTH_BITS: usize = mem::size_of::<usize>() * 8;
315
316/// The layout of a type which might be dynamically-sized.
317///
318/// `DstLayout` describes the layout of sized types, slice types, and "slice
319/// DSTs" - ie, those that are known by the type system to have a trailing slice
320/// (as distinguished from `dyn Trait` types - such types *might* have a
321/// trailing slice type, but the type system isn't aware of it).
322///
323/// # Safety
324///
325/// Unlike [`core::alloc::Layout`], `DstLayout` is only used to describe full
326/// Rust types - ie, those that satisfy the layout requirements outlined by
327/// [the reference]. Callers may assume that an instance of `DstLayout`
328/// satisfies any conditions imposed on Rust types by the reference.
329///
330/// If `layout: DstLayout` describes a type, `T`, then it is guaranteed that:
331/// - `layout.align` is equal to `T`'s alignment
332/// - If `layout.size_info` is `SizeInfo::Sized { size }`, then `T: Sized` and
333/// `size_of::<T>() == size`
334/// - If `layout.size_info` is `SizeInfo::SliceDst(slice_layout)`, then
335/// - `T` is a slice DST
336/// - The `size` of an instance of `T` with `elems` trailing slice elements is
337/// equal to `slice_layout.offset + slice_layout.elem_size * elems` rounded up
338/// to the nearest multiple of `layout.align`. Any bytes in the range
339/// `[slice_layout.offset + slice_layout.elem_size * elems, size)` are padding
340/// and must not be assumed to be initialized.
341///
342/// [the reference]: https://doc.rust-lang.org/reference/type-layout.html
343#[doc(hidden)]
344#[allow(missing_debug_implementations, missing_copy_implementations)]
345#[cfg_attr(any(kani, test), derive(Copy, Clone, Debug, PartialEq, Eq))]
346pub struct DstLayout {
347 align: NonZeroUsize,
348 size_info: SizeInfo,
349}
350
351#[cfg_attr(any(kani, test), derive(Copy, Clone, Debug, PartialEq, Eq))]
352enum SizeInfo<E = usize> {
353 Sized { _size: usize },
354 SliceDst(TrailingSliceLayout<E>),
355}
356
357#[cfg_attr(any(kani, test), derive(Copy, Clone, Debug, PartialEq, Eq))]
358struct TrailingSliceLayout<E = usize> {
359 // The offset of the first byte of the trailing slice field. Note that this
360 // is NOT the same as the minimum size of the type. For example, consider
361 // the following type:
362 //
363 // struct Foo {
364 // a: u16,
365 // b: u8,
366 // c: [u8],
367 // }
368 //
369 // In `Foo`, `c` is at byte offset 3. When `c.len() == 0`, `c` is followed
370 // by a padding byte.
371 _offset: usize,
372 // The size of the element type of the trailing slice field.
373 _elem_size: E,
374}
375
376impl SizeInfo {
377 /// Attempts to create a `SizeInfo` from `Self` in which `elem_size` is a
378 /// `NonZeroUsize`. If `elem_size` is 0, returns `None`.
379 #[allow(unused)]
380 const fn try_to_nonzero_elem_size(&self) -> Option<SizeInfo<NonZeroUsize>> {
381 Some(match *self {
382 SizeInfo::Sized { _size: usize } => SizeInfo::Sized { _size },
383 SizeInfo::SliceDst(TrailingSliceLayout { _offset: usize, _elem_size: usize }) => {
384 if let Some(_elem_size: NonZero) = NonZeroUsize::new(_elem_size) {
385 SizeInfo::SliceDst(TrailingSliceLayout { _offset, _elem_size })
386 } else {
387 return None;
388 }
389 }
390 })
391 }
392}
393
394#[doc(hidden)]
395#[derive(Copy, Clone)]
396#[cfg_attr(test, derive(Debug))]
397#[allow(missing_debug_implementations)]
398pub enum _CastType {
399 _Prefix,
400 _Suffix,
401}
402
403impl DstLayout {
404 /// The minimum possible alignment of a type.
405 const MIN_ALIGN: NonZeroUsize = match NonZeroUsize::new(1) {
406 Some(min_align) => min_align,
407 None => unreachable!(),
408 };
409
410 /// The maximum theoretic possible alignment of a type.
411 ///
412 /// For compatibility with future Rust versions, this is defined as the
413 /// maximum power-of-two that fits into a `usize`. See also
414 /// [`DstLayout::CURRENT_MAX_ALIGN`].
415 const THEORETICAL_MAX_ALIGN: NonZeroUsize =
416 match NonZeroUsize::new(1 << (POINTER_WIDTH_BITS - 1)) {
417 Some(max_align) => max_align,
418 None => unreachable!(),
419 };
420
421 /// The current, documented max alignment of a type \[1\].
422 ///
423 /// \[1\] Per <https://doc.rust-lang.org/reference/type-layout.html#the-alignment-modifiers>:
424 ///
425 /// The alignment value must be a power of two from 1 up to
426 /// 2<sup>29</sup>.
427 #[cfg(not(kani))]
428 const CURRENT_MAX_ALIGN: NonZeroUsize = match NonZeroUsize::new(1 << 28) {
429 Some(max_align) => max_align,
430 None => unreachable!(),
431 };
432
433 /// Constructs a `DstLayout` for a zero-sized type with `repr_align`
434 /// alignment (or 1). If `repr_align` is provided, then it must be a power
435 /// of two.
436 ///
437 /// # Panics
438 ///
439 /// This function panics if the supplied `repr_align` is not a power of two.
440 ///
441 /// # Safety
442 ///
443 /// Unsafe code may assume that the contract of this function is satisfied.
444 #[doc(hidden)]
445 #[inline]
446 pub const fn new_zst(repr_align: Option<NonZeroUsize>) -> DstLayout {
447 let align = match repr_align {
448 Some(align) => align,
449 None => Self::MIN_ALIGN,
450 };
451
452 assert!(align.is_power_of_two());
453
454 DstLayout { align, size_info: SizeInfo::Sized { _size: 0 } }
455 }
456
457 /// Constructs a `DstLayout` which describes `T`.
458 ///
459 /// # Safety
460 ///
461 /// Unsafe code may assume that `DstLayout` is the correct layout for `T`.
462 #[doc(hidden)]
463 #[inline]
464 pub const fn for_type<T>() -> DstLayout {
465 // SAFETY: `align` is correct by construction. `T: Sized`, and so it is
466 // sound to initialize `size_info` to `SizeInfo::Sized { size }`; the
467 // `size` field is also correct by construction.
468 DstLayout {
469 align: match NonZeroUsize::new(mem::align_of::<T>()) {
470 Some(align) => align,
471 None => unreachable!(),
472 },
473 size_info: SizeInfo::Sized { _size: mem::size_of::<T>() },
474 }
475 }
476
477 /// Constructs a `DstLayout` which describes `[T]`.
478 ///
479 /// # Safety
480 ///
481 /// Unsafe code may assume that `DstLayout` is the correct layout for `[T]`.
482 const fn for_slice<T>() -> DstLayout {
483 // SAFETY: The alignment of a slice is equal to the alignment of its
484 // element type, and so `align` is initialized correctly.
485 //
486 // Since this is just a slice type, there is no offset between the
487 // beginning of the type and the beginning of the slice, so it is
488 // correct to set `offset: 0`. The `elem_size` is correct by
489 // construction. Since `[T]` is a (degenerate case of a) slice DST, it
490 // is correct to initialize `size_info` to `SizeInfo::SliceDst`.
491 DstLayout {
492 align: match NonZeroUsize::new(mem::align_of::<T>()) {
493 Some(align) => align,
494 None => unreachable!(),
495 },
496 size_info: SizeInfo::SliceDst(TrailingSliceLayout {
497 _offset: 0,
498 _elem_size: mem::size_of::<T>(),
499 }),
500 }
501 }
502
503 /// Like `Layout::extend`, this creates a layout that describes a record
504 /// whose layout consists of `self` followed by `next` that includes the
505 /// necessary inter-field padding, but not any trailing padding.
506 ///
507 /// In order to match the layout of a `#[repr(C)]` struct, this method
508 /// should be invoked for each field in declaration order. To add trailing
509 /// padding, call `DstLayout::pad_to_align` after extending the layout for
510 /// all fields. If `self` corresponds to a type marked with
511 /// `repr(packed(N))`, then `repr_packed` should be set to `Some(N)`,
512 /// otherwise `None`.
513 ///
514 /// This method cannot be used to match the layout of a record with the
515 /// default representation, as that representation is mostly unspecified.
516 ///
517 /// # Safety
518 ///
519 /// If a (potentially hypothetical) valid `repr(C)` Rust type begins with
520 /// fields whose layout are `self`, and those fields are immediately
521 /// followed by a field whose layout is `field`, then unsafe code may rely
522 /// on `self.extend(field, repr_packed)` producing a layout that correctly
523 /// encompasses those two components.
524 ///
525 /// We make no guarantees to the behavior of this method if these fragments
526 /// cannot appear in a valid Rust type (e.g., the concatenation of the
527 /// layouts would lead to a size larger than `isize::MAX`).
528 #[doc(hidden)]
529 #[inline]
530 pub const fn extend(self, field: DstLayout, repr_packed: Option<NonZeroUsize>) -> Self {
531 use util::{core_layout::padding_needed_for, max, min};
532
533 // If `repr_packed` is `None`, there are no alignment constraints, and
534 // the value can be defaulted to `THEORETICAL_MAX_ALIGN`.
535 let max_align = match repr_packed {
536 Some(max_align) => max_align,
537 None => Self::THEORETICAL_MAX_ALIGN,
538 };
539
540 assert!(max_align.is_power_of_two());
541
542 // We use Kani to prove that this method is robust to future increases
543 // in Rust's maximum allowed alignment. However, if such a change ever
544 // actually occurs, we'd like to be notified via assertion failures.
545 #[cfg(not(kani))]
546 {
547 debug_assert!(self.align.get() <= DstLayout::CURRENT_MAX_ALIGN.get());
548 debug_assert!(field.align.get() <= DstLayout::CURRENT_MAX_ALIGN.get());
549 if let Some(repr_packed) = repr_packed {
550 debug_assert!(repr_packed.get() <= DstLayout::CURRENT_MAX_ALIGN.get());
551 }
552 }
553
554 // The field's alignment is clamped by `repr_packed` (i.e., the
555 // `repr(packed(N))` attribute, if any) [1].
556 //
557 // [1] Per https://doc.rust-lang.org/reference/type-layout.html#the-alignment-modifiers:
558 //
559 // The alignments of each field, for the purpose of positioning
560 // fields, is the smaller of the specified alignment and the alignment
561 // of the field's type.
562 let field_align = min(field.align, max_align);
563
564 // The struct's alignment is the maximum of its previous alignment and
565 // `field_align`.
566 let align = max(self.align, field_align);
567
568 let size_info = match self.size_info {
569 // If the layout is already a DST, we panic; DSTs cannot be extended
570 // with additional fields.
571 SizeInfo::SliceDst(..) => panic!("Cannot extend a DST with additional fields."),
572
573 SizeInfo::Sized { _size: preceding_size } => {
574 // Compute the minimum amount of inter-field padding needed to
575 // satisfy the field's alignment, and offset of the trailing
576 // field. [1]
577 //
578 // [1] Per https://doc.rust-lang.org/reference/type-layout.html#the-alignment-modifiers:
579 //
580 // Inter-field padding is guaranteed to be the minimum
581 // required in order to satisfy each field's (possibly
582 // altered) alignment.
583 let padding = padding_needed_for(preceding_size, field_align);
584
585 // This will not panic (and is proven to not panic, with Kani)
586 // if the layout components can correspond to a leading layout
587 // fragment of a valid Rust type, but may panic otherwise (e.g.,
588 // combining or aligning the components would create a size
589 // exceeding `isize::MAX`).
590 let offset = match preceding_size.checked_add(padding) {
591 Some(offset) => offset,
592 None => panic!("Adding padding to `self`'s size overflows `usize`."),
593 };
594
595 match field.size_info {
596 SizeInfo::Sized { _size: field_size } => {
597 // If the trailing field is sized, the resulting layout
598 // will be sized. Its size will be the sum of the
599 // preceeding layout, the size of the new field, and the
600 // size of inter-field padding between the two.
601 //
602 // This will not panic (and is proven with Kani to not
603 // panic) if the layout components can correspond to a
604 // leading layout fragment of a valid Rust type, but may
605 // panic otherwise (e.g., combining or aligning the
606 // components would create a size exceeding
607 // `usize::MAX`).
608 let size = match offset.checked_add(field_size) {
609 Some(size) => size,
610 None => panic!("`field` cannot be appended without the total size overflowing `usize`"),
611 };
612 SizeInfo::Sized { _size: size }
613 }
614 SizeInfo::SliceDst(TrailingSliceLayout {
615 _offset: trailing_offset,
616 _elem_size,
617 }) => {
618 // If the trailing field is dynamically sized, so too
619 // will the resulting layout. The offset of the trailing
620 // slice component is the sum of the offset of the
621 // trailing field and the trailing slice offset within
622 // that field.
623 //
624 // This will not panic (and is proven with Kani to not
625 // panic) if the layout components can correspond to a
626 // leading layout fragment of a valid Rust type, but may
627 // panic otherwise (e.g., combining or aligning the
628 // components would create a size exceeding
629 // `usize::MAX`).
630 let offset = match offset.checked_add(trailing_offset) {
631 Some(offset) => offset,
632 None => panic!("`field` cannot be appended without the total size overflowing `usize`"),
633 };
634 SizeInfo::SliceDst(TrailingSliceLayout { _offset: offset, _elem_size })
635 }
636 }
637 }
638 };
639
640 DstLayout { align, size_info }
641 }
642
643 /// Like `Layout::pad_to_align`, this routine rounds the size of this layout
644 /// up to the nearest multiple of this type's alignment or `repr_packed`
645 /// (whichever is less). This method leaves DST layouts unchanged, since the
646 /// trailing padding of DSTs is computed at runtime.
647 ///
648 /// In order to match the layout of a `#[repr(C)]` struct, this method
649 /// should be invoked after the invocations of [`DstLayout::extend`]. If
650 /// `self` corresponds to a type marked with `repr(packed(N))`, then
651 /// `repr_packed` should be set to `Some(N)`, otherwise `None`.
652 ///
653 /// This method cannot be used to match the layout of a record with the
654 /// default representation, as that representation is mostly unspecified.
655 ///
656 /// # Safety
657 ///
658 /// If a (potentially hypothetical) valid `repr(C)` type begins with fields
659 /// whose layout are `self` followed only by zero or more bytes of trailing
660 /// padding (not included in `self`), then unsafe code may rely on
661 /// `self.pad_to_align(repr_packed)` producing a layout that correctly
662 /// encapsulates the layout of that type.
663 ///
664 /// We make no guarantees to the behavior of this method if `self` cannot
665 /// appear in a valid Rust type (e.g., because the addition of trailing
666 /// padding would lead to a size larger than `isize::MAX`).
667 #[doc(hidden)]
668 #[inline]
669 pub const fn pad_to_align(self) -> Self {
670 use util::core_layout::padding_needed_for;
671
672 let size_info = match self.size_info {
673 // For sized layouts, we add the minimum amount of trailing padding
674 // needed to satisfy alignment.
675 SizeInfo::Sized { _size: unpadded_size } => {
676 let padding = padding_needed_for(unpadded_size, self.align);
677 let size = match unpadded_size.checked_add(padding) {
678 Some(size) => size,
679 None => panic!("Adding padding caused size to overflow `usize`."),
680 };
681 SizeInfo::Sized { _size: size }
682 }
683 // For DST layouts, trailing padding depends on the length of the
684 // trailing DST and is computed at runtime. This does not alter the
685 // offset or element size of the layout, so we leave `size_info`
686 // unchanged.
687 size_info @ SizeInfo::SliceDst(_) => size_info,
688 };
689
690 DstLayout { align: self.align, size_info }
691 }
692
693 /// Validates that a cast is sound from a layout perspective.
694 ///
695 /// Validates that the size and alignment requirements of a type with the
696 /// layout described in `self` would not be violated by performing a
697 /// `cast_type` cast from a pointer with address `addr` which refers to a
698 /// memory region of size `bytes_len`.
699 ///
700 /// If the cast is valid, `validate_cast_and_convert_metadata` returns
701 /// `(elems, split_at)`. If `self` describes a dynamically-sized type, then
702 /// `elems` is the maximum number of trailing slice elements for which a
703 /// cast would be valid (for sized types, `elem` is meaningless and should
704 /// be ignored). `split_at` is the index at which to split the memory region
705 /// in order for the prefix (suffix) to contain the result of the cast, and
706 /// in order for the remaining suffix (prefix) to contain the leftover
707 /// bytes.
708 ///
709 /// There are three conditions under which a cast can fail:
710 /// - The smallest possible value for the type is larger than the provided
711 /// memory region
712 /// - A prefix cast is requested, and `addr` does not satisfy `self`'s
713 /// alignment requirement
714 /// - A suffix cast is requested, and `addr + bytes_len` does not satisfy
715 /// `self`'s alignment requirement (as a consequence, since all instances
716 /// of the type are a multiple of its alignment, no size for the type will
717 /// result in a starting address which is properly aligned)
718 ///
719 /// # Safety
720 ///
721 /// The caller may assume that this implementation is correct, and may rely
722 /// on that assumption for the soundness of their code. In particular, the
723 /// caller may assume that, if `validate_cast_and_convert_metadata` returns
724 /// `Some((elems, split_at))`, then:
725 /// - A pointer to the type (for dynamically sized types, this includes
726 /// `elems` as its pointer metadata) describes an object of size `size <=
727 /// bytes_len`
728 /// - If this is a prefix cast:
729 /// - `addr` satisfies `self`'s alignment
730 /// - `size == split_at`
731 /// - If this is a suffix cast:
732 /// - `split_at == bytes_len - size`
733 /// - `addr + split_at` satisfies `self`'s alignment
734 ///
735 /// Note that this method does *not* ensure that a pointer constructed from
736 /// its return values will be a valid pointer. In particular, this method
737 /// does not reason about `isize` overflow, which is a requirement of many
738 /// Rust pointer APIs, and may at some point be determined to be a validity
739 /// invariant of pointer types themselves. This should never be a problem so
740 /// long as the arguments to this method are derived from a known-valid
741 /// pointer (e.g., one derived from a safe Rust reference), but it is
742 /// nonetheless the caller's responsibility to justify that pointer
743 /// arithmetic will not overflow based on a safety argument *other than* the
744 /// mere fact that this method returned successfully.
745 ///
746 /// # Panics
747 ///
748 /// `validate_cast_and_convert_metadata` will panic if `self` describes a
749 /// DST whose trailing slice element is zero-sized.
750 ///
751 /// If `addr + bytes_len` overflows `usize`,
752 /// `validate_cast_and_convert_metadata` may panic, or it may return
753 /// incorrect results. No guarantees are made about when
754 /// `validate_cast_and_convert_metadata` will panic. The caller should not
755 /// rely on `validate_cast_and_convert_metadata` panicking in any particular
756 /// condition, even if `debug_assertions` are enabled.
757 #[allow(unused)]
758 const fn validate_cast_and_convert_metadata(
759 &self,
760 addr: usize,
761 bytes_len: usize,
762 cast_type: _CastType,
763 ) -> Option<(usize, usize)> {
764 // `debug_assert!`, but with `#[allow(clippy::arithmetic_side_effects)]`.
765 macro_rules! __debug_assert {
766 ($e:expr $(, $msg:expr)?) => {
767 debug_assert!({
768 #[allow(clippy::arithmetic_side_effects)]
769 let e = $e;
770 e
771 } $(, $msg)?);
772 };
773 }
774
775 // Note that, in practice, `self` is always a compile-time constant. We
776 // do this check earlier than needed to ensure that we always panic as a
777 // result of bugs in the program (such as calling this function on an
778 // invalid type) instead of allowing this panic to be hidden if the cast
779 // would have failed anyway for runtime reasons (such as a too-small
780 // memory region).
781 //
782 // TODO(#67): Once our MSRV is 1.65, use let-else:
783 // https://blog.rust-lang.org/2022/11/03/Rust-1.65.0.html#let-else-statements
784 let size_info = match self.size_info.try_to_nonzero_elem_size() {
785 Some(size_info) => size_info,
786 None => panic!("attempted to cast to slice type with zero-sized element"),
787 };
788
789 // Precondition
790 __debug_assert!(addr.checked_add(bytes_len).is_some(), "`addr` + `bytes_len` > usize::MAX");
791
792 // Alignment checks go in their own block to avoid introducing variables
793 // into the top-level scope.
794 {
795 // We check alignment for `addr` (for prefix casts) or `addr +
796 // bytes_len` (for suffix casts). For a prefix cast, the correctness
797 // of this check is trivial - `addr` is the address the object will
798 // live at.
799 //
800 // For a suffix cast, we know that all valid sizes for the type are
801 // a multiple of the alignment (and by safety precondition, we know
802 // `DstLayout` may only describe valid Rust types). Thus, a
803 // validly-sized instance which lives at a validly-aligned address
804 // must also end at a validly-aligned address. Thus, if the end
805 // address for a suffix cast (`addr + bytes_len`) is not aligned,
806 // then no valid start address will be aligned either.
807 let offset = match cast_type {
808 _CastType::_Prefix => 0,
809 _CastType::_Suffix => bytes_len,
810 };
811
812 // Addition is guaranteed not to overflow because `offset <=
813 // bytes_len`, and `addr + bytes_len <= usize::MAX` is a
814 // precondition of this method. Modulus is guaranteed not to divide
815 // by 0 because `align` is non-zero.
816 #[allow(clippy::arithmetic_side_effects)]
817 if (addr + offset) % self.align.get() != 0 {
818 return None;
819 }
820 }
821
822 let (elems, self_bytes) = match size_info {
823 SizeInfo::Sized { _size: size } => {
824 if size > bytes_len {
825 return None;
826 }
827 (0, size)
828 }
829 SizeInfo::SliceDst(TrailingSliceLayout { _offset: offset, _elem_size: elem_size }) => {
830 // Calculate the maximum number of bytes that could be consumed
831 // - any number of bytes larger than this will either not be a
832 // multiple of the alignment, or will be larger than
833 // `bytes_len`.
834 let max_total_bytes =
835 util::round_down_to_next_multiple_of_alignment(bytes_len, self.align);
836 // Calculate the maximum number of bytes that could be consumed
837 // by the trailing slice.
838 //
839 // TODO(#67): Once our MSRV is 1.65, use let-else:
840 // https://blog.rust-lang.org/2022/11/03/Rust-1.65.0.html#let-else-statements
841 let max_slice_and_padding_bytes = match max_total_bytes.checked_sub(offset) {
842 Some(max) => max,
843 // `bytes_len` too small even for 0 trailing slice elements.
844 None => return None,
845 };
846
847 // Calculate the number of elements that fit in
848 // `max_slice_and_padding_bytes`; any remaining bytes will be
849 // considered padding.
850 //
851 // Guaranteed not to divide by zero: `elem_size` is non-zero.
852 #[allow(clippy::arithmetic_side_effects)]
853 let elems = max_slice_and_padding_bytes / elem_size.get();
854 // Guaranteed not to overflow on multiplication: `usize::MAX >=
855 // max_slice_and_padding_bytes >= (max_slice_and_padding_bytes /
856 // elem_size) * elem_size`.
857 //
858 // Guaranteed not to overflow on addition:
859 // - max_slice_and_padding_bytes == max_total_bytes - offset
860 // - elems * elem_size <= max_slice_and_padding_bytes == max_total_bytes - offset
861 // - elems * elem_size + offset <= max_total_bytes <= usize::MAX
862 #[allow(clippy::arithmetic_side_effects)]
863 let without_padding = offset + elems * elem_size.get();
864 // `self_bytes` is equal to the offset bytes plus the bytes
865 // consumed by the trailing slice plus any padding bytes
866 // required to satisfy the alignment. Note that we have computed
867 // the maximum number of trailing slice elements that could fit
868 // in `self_bytes`, so any padding is guaranteed to be less than
869 // the size of an extra element.
870 //
871 // Guaranteed not to overflow:
872 // - By previous comment: without_padding == elems * elem_size +
873 // offset <= max_total_bytes
874 // - By construction, `max_total_bytes` is a multiple of
875 // `self.align`.
876 // - At most, adding padding needed to round `without_padding`
877 // up to the next multiple of the alignment will bring
878 // `self_bytes` up to `max_total_bytes`.
879 #[allow(clippy::arithmetic_side_effects)]
880 let self_bytes = without_padding
881 + util::core_layout::padding_needed_for(without_padding, self.align);
882 (elems, self_bytes)
883 }
884 };
885
886 __debug_assert!(self_bytes <= bytes_len);
887
888 let split_at = match cast_type {
889 _CastType::_Prefix => self_bytes,
890 // Guaranteed not to underflow:
891 // - In the `Sized` branch, only returns `size` if `size <=
892 // bytes_len`.
893 // - In the `SliceDst` branch, calculates `self_bytes <=
894 // max_toatl_bytes`, which is upper-bounded by `bytes_len`.
895 #[allow(clippy::arithmetic_side_effects)]
896 _CastType::_Suffix => bytes_len - self_bytes,
897 };
898
899 Some((elems, split_at))
900 }
901}
902
903/// A trait which carries information about a type's layout that is used by the
904/// internals of this crate.
905///
906/// This trait is not meant for consumption by code outside of this crate. While
907/// the normal semver stability guarantees apply with respect to which types
908/// implement this trait and which trait implementations are implied by this
909/// trait, no semver stability guarantees are made regarding its internals; they
910/// may change at any time, and code which makes use of them may break.
911///
912/// # Safety
913///
914/// This trait does not convey any safety guarantees to code outside this crate.
915#[doc(hidden)] // TODO: Remove this once KnownLayout is used by other APIs
916pub unsafe trait KnownLayout {
917 // The `Self: Sized` bound makes it so that `KnownLayout` can still be
918 // object safe. It's not currently object safe thanks to `const LAYOUT`, and
919 // it likely won't be in the future, but there's no reason not to be
920 // forwards-compatible with object safety.
921 #[doc(hidden)]
922 fn only_derive_is_allowed_to_implement_this_trait()
923 where
924 Self: Sized;
925
926 #[doc(hidden)]
927 const LAYOUT: DstLayout;
928
929 /// SAFETY: The returned pointer has the same address and provenance as
930 /// `bytes`. If `Self` is a DST, the returned pointer's referent has `elems`
931 /// elements in its trailing slice. If `Self` is sized, `elems` is ignored.
932 #[doc(hidden)]
933 fn raw_from_ptr_len(bytes: NonNull<u8>, elems: usize) -> NonNull<Self>;
934}
935
936// SAFETY: Delegates safety to `DstLayout::for_slice`.
937unsafe impl<T: KnownLayout> KnownLayout for [T] {
938 #[allow(clippy::missing_inline_in_public_items)]
939 fn only_derive_is_allowed_to_implement_this_trait()
940 where
941 Self: Sized,
942 {
943 }
944 const LAYOUT: DstLayout = DstLayout::for_slice::<T>();
945
946 // SAFETY: `.cast` preserves address and provenance. The returned pointer
947 // refers to an object with `elems` elements by construction.
948 #[inline(always)]
949 fn raw_from_ptr_len(data: NonNull<u8>, elems: usize) -> NonNull<Self> {
950 // TODO(#67): Remove this allow. See NonNullExt for more details.
951 #[allow(unstable_name_collisions)]
952 NonNull::slice_from_raw_parts(data:data.cast::<T>(), len:elems)
953 }
954}
955
956#[rustfmt::skip]
957impl_known_layout!(
958 (),
959 u8, i8, u16, i16, u32, i32, u64, i64, u128, i128, usize, isize, f32, f64,
960 bool, char,
961 NonZeroU8, NonZeroI8, NonZeroU16, NonZeroI16, NonZeroU32, NonZeroI32,
962 NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, NonZeroUsize, NonZeroIsize
963);
964#[rustfmt::skip]
965impl_known_layout!(
966 T => Option<T>,
967 T: ?Sized => PhantomData<T>,
968 T => Wrapping<T>,
969 T => MaybeUninit<T>,
970 T: ?Sized => *const T,
971 T: ?Sized => *mut T,
972);
973impl_known_layout!(const N: usize, T => [T; N]);
974
975safety_comment! {
976 /// SAFETY:
977 /// `str` and `ManuallyDrop<[T]>` [1] have the same representations as
978 /// `[u8]` and `[T]` repsectively. `str` has different bit validity than
979 /// `[u8]`, but that doesn't affect the soundness of this impl.
980 ///
981 /// [1] Per https://doc.rust-lang.org/nightly/core/mem/struct.ManuallyDrop.html:
982 ///
983 /// `ManuallyDrop<T>` is guaranteed to have the same layout and bit
984 /// validity as `T`
985 ///
986 /// TODO(#429):
987 /// - Add quotes from docs.
988 /// - Once [1] (added in
989 /// https://github.com/rust-lang/rust/pull/115522) is available on stable,
990 /// quote the stable docs instead of the nightly docs.
991 unsafe_impl_known_layout!(#[repr([u8])] str);
992 unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] ManuallyDrop<T>);
993}
994
995/// Analyzes whether a type is [`FromZeroes`].
996///
997/// This derive analyzes, at compile time, whether the annotated type satisfies
998/// the [safety conditions] of `FromZeroes` and implements `FromZeroes` if it is
999/// sound to do so. This derive can be applied to structs, enums, and unions;
1000/// e.g.:
1001///
1002/// ```
1003/// # use zerocopy_derive::FromZeroes;
1004/// #[derive(FromZeroes)]
1005/// struct MyStruct {
1006/// # /*
1007/// ...
1008/// # */
1009/// }
1010///
1011/// #[derive(FromZeroes)]
1012/// #[repr(u8)]
1013/// enum MyEnum {
1014/// # Variant0,
1015/// # /*
1016/// ...
1017/// # */
1018/// }
1019///
1020/// #[derive(FromZeroes)]
1021/// union MyUnion {
1022/// # variant: u8,
1023/// # /*
1024/// ...
1025/// # */
1026/// }
1027/// ```
1028///
1029/// [safety conditions]: trait@FromZeroes#safety
1030///
1031/// # Analysis
1032///
1033/// *This section describes, roughly, the analysis performed by this derive to
1034/// determine whether it is sound to implement `FromZeroes` for a given type.
1035/// Unless you are modifying the implementation of this derive, or attempting to
1036/// manually implement `FromZeroes` for a type yourself, you don't need to read
1037/// this section.*
1038///
1039/// If a type has the following properties, then this derive can implement
1040/// `FromZeroes` for that type:
1041///
1042/// - If the type is a struct, all of its fields must be `FromZeroes`.
1043/// - If the type is an enum, it must be C-like (meaning that all variants have
1044/// no fields) and it must have a variant with a discriminant of `0`. See [the
1045/// reference] for a description of how discriminant values are chosen.
1046/// - The type must not contain any [`UnsafeCell`]s (this is required in order
1047/// for it to be sound to construct a `&[u8]` and a `&T` to the same region of
1048/// memory). The type may contain references or pointers to `UnsafeCell`s so
1049/// long as those values can themselves be initialized from zeroes
1050/// (`FromZeroes` is not currently implemented for, e.g.,
1051/// `Option<&UnsafeCell<_>>`, but it could be one day).
1052///
1053/// This analysis is subject to change. Unsafe code may *only* rely on the
1054/// documented [safety conditions] of `FromZeroes`, and must *not* rely on the
1055/// implementation details of this derive.
1056///
1057/// [the reference]: https://doc.rust-lang.org/reference/items/enumerations.html#custom-discriminant-values-for-fieldless-enumerations
1058/// [`UnsafeCell`]: core::cell::UnsafeCell
1059///
1060/// ## Why isn't an explicit representation required for structs?
1061///
1062/// Neither this derive, nor the [safety conditions] of `FromZeroes`, requires
1063/// that structs are marked with `#[repr(C)]`.
1064///
1065/// Per the [Rust reference](reference),
1066///
1067/// > The representation of a type can change the padding between fields, but
1068/// does not change the layout of the fields themselves.
1069///
1070/// [reference]: https://doc.rust-lang.org/reference/type-layout.html#representations
1071///
1072/// Since the layout of structs only consists of padding bytes and field bytes,
1073/// a struct is soundly `FromZeroes` if:
1074/// 1. its padding is soundly `FromZeroes`, and
1075/// 2. its fields are soundly `FromZeroes`.
1076///
1077/// The answer to the first question is always yes: padding bytes do not have
1078/// any validity constraints. A [discussion] of this question in the Unsafe Code
1079/// Guidelines Working Group concluded that it would be virtually unimaginable
1080/// for future versions of rustc to add validity constraints to padding bytes.
1081///
1082/// [discussion]: https://github.com/rust-lang/unsafe-code-guidelines/issues/174
1083///
1084/// Whether a struct is soundly `FromZeroes` therefore solely depends on whether
1085/// its fields are `FromZeroes`.
1086// TODO(#146): Document why we don't require an enum to have an explicit `repr`
1087// attribute.
1088#[cfg(any(feature = "derive", test))]
1089#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
1090pub use zerocopy_derive::FromZeroes;
1091
1092/// Types whose validity can be checked at runtime, allowing them to be
1093/// conditionally converted from byte slices.
1094///
1095/// WARNING: Do not implement this trait yourself! Instead, use
1096/// `#[derive(TryFromBytes)]`.
1097///
1098/// `TryFromBytes` types can safely be deserialized from an untrusted sequence
1099/// of bytes by performing a runtime check that the byte sequence contains a
1100/// valid instance of `Self`.
1101///
1102/// `TryFromBytes` is ignorant of byte order. For byte order-aware types, see
1103/// the [`byteorder`] module.
1104///
1105/// # What is a "valid instance"?
1106///
1107/// In Rust, each type has *bit validity*, which refers to the set of bit
1108/// patterns which may appear in an instance of that type. It is impossible for
1109/// safe Rust code to produce values which violate bit validity (ie, values
1110/// outside of the "valid" set of bit patterns). If `unsafe` code produces an
1111/// invalid value, this is considered [undefined behavior].
1112///
1113/// Rust's bit validity rules are currently being decided, which means that some
1114/// types have three classes of bit patterns: those which are definitely valid,
1115/// and whose validity is documented in the language; those which may or may not
1116/// be considered valid at some point in the future; and those which are
1117/// definitely invalid.
1118///
1119/// Zerocopy takes a conservative approach, and only considers a bit pattern to
1120/// be valid if its validity is a documenteed guarantee provided by the
1121/// language.
1122///
1123/// For most use cases, Rust's current guarantees align with programmers'
1124/// intuitions about what ought to be valid. As a result, zerocopy's
1125/// conservatism should not affect most users. One notable exception is unions,
1126/// whose bit validity is very up in the air; zerocopy does not permit
1127/// implementing `TryFromBytes` for any union type.
1128///
1129/// If you are negatively affected by lack of support for a particular type,
1130/// we encourage you to let us know by [filing an issue][github-repo].
1131///
1132/// # Safety
1133///
1134/// On its own, `T: TryFromBytes` does not make any guarantees about the layout
1135/// or representation of `T`. It merely provides the ability to perform a
1136/// validity check at runtime via methods like [`try_from_ref`].
1137///
1138/// Currently, it is not possible to stably implement `TryFromBytes` other than
1139/// by using `#[derive(TryFromBytes)]`. While there are `#[doc(hidden)]` items
1140/// on this trait that provide well-defined safety invariants, no stability
1141/// guarantees are made with respect to these items. In particular, future
1142/// releases of zerocopy may make backwards-breaking changes to these items,
1143/// including changes that only affect soundness, which may cause code which
1144/// uses those items to silently become unsound.
1145///
1146/// [undefined behavior]: https://raphlinus.github.io/programming/rust/2018/08/17/undefined-behavior.html
1147/// [github-repo]: https://github.com/google/zerocopy
1148/// [`try_from_ref`]: TryFromBytes::try_from_ref
1149// TODO(#5): Update `try_from_ref` doc link once it exists
1150#[doc(hidden)]
1151pub unsafe trait TryFromBytes {
1152 /// Does a given memory range contain a valid instance of `Self`?
1153 ///
1154 /// # Safety
1155 ///
1156 /// ## Preconditions
1157 ///
1158 /// The memory referenced by `candidate` may only be accessed via reads for
1159 /// the duration of this method call. This prohibits writes through mutable
1160 /// references and through [`UnsafeCell`]s. There may exist immutable
1161 /// references to the same memory which contain `UnsafeCell`s so long as:
1162 /// - Those `UnsafeCell`s exist at the same byte ranges as `UnsafeCell`s in
1163 /// `Self`. This is a bidirectional property: `Self` may not contain
1164 /// `UnsafeCell`s where other references to the same memory do not, and
1165 /// vice-versa.
1166 /// - Those `UnsafeCell`s are never used to perform mutation for the
1167 /// duration of this method call.
1168 ///
1169 /// The memory referenced by `candidate` may not be referenced by any
1170 /// mutable references even if these references are not used to perform
1171 /// mutation.
1172 ///
1173 /// `candidate` is not required to refer to a valid `Self`. However, it must
1174 /// satisfy the requirement that uninitialized bytes may only be present
1175 /// where it is possible for them to be present in `Self`. This is a dynamic
1176 /// property: if, at a particular byte offset, a valid enum discriminant is
1177 /// set, the subsequent bytes may only have uninitialized bytes as
1178 /// specificed by the corresponding enum.
1179 ///
1180 /// Formally, given `len = size_of_val_raw(candidate)`, at every byte
1181 /// offset, `b`, in the range `[0, len)`:
1182 /// - If, in all instances `s: Self` of length `len`, the byte at offset `b`
1183 /// in `s` is initialized, then the byte at offset `b` within `*candidate`
1184 /// must be initialized.
1185 /// - Let `c` be the contents of the byte range `[0, b)` in `*candidate`.
1186 /// Let `S` be the subset of valid instances of `Self` of length `len`
1187 /// which contain `c` in the offset range `[0, b)`. If, for all instances
1188 /// of `s: Self` in `S`, the byte at offset `b` in `s` is initialized,
1189 /// then the byte at offset `b` in `*candidate` must be initialized.
1190 ///
1191 /// Pragmatically, this means that if `*candidate` is guaranteed to
1192 /// contain an enum type at a particular offset, and the enum discriminant
1193 /// stored in `*candidate` corresponds to a valid variant of that enum
1194 /// type, then it is guaranteed that the appropriate bytes of `*candidate`
1195 /// are initialized as defined by that variant's bit validity (although
1196 /// note that the variant may contain another enum type, in which case the
1197 /// same rules apply depending on the state of its discriminant, and so on
1198 /// recursively).
1199 ///
1200 /// ## Postconditions
1201 ///
1202 /// Unsafe code may assume that, if `is_bit_valid(candidate)` returns true,
1203 /// `*candidate` contains a valid `Self`.
1204 ///
1205 /// # Panics
1206 ///
1207 /// `is_bit_valid` may panic. Callers are responsible for ensuring that any
1208 /// `unsafe` code remains sound even in the face of `is_bit_valid`
1209 /// panicking. (We support user-defined validation routines; so long as
1210 /// these routines are not required to be `unsafe`, there is no way to
1211 /// ensure that these do not generate panics.)
1212 ///
1213 /// [`UnsafeCell`]: core::cell::UnsafeCell
1214 #[doc(hidden)]
1215 unsafe fn is_bit_valid(candidate: Ptr<'_, Self>) -> bool;
1216
1217 /// Attempts to interpret a byte slice as a `Self`.
1218 ///
1219 /// `try_from_ref` validates that `bytes` contains a valid `Self`, and that
1220 /// it satisfies `Self`'s alignment requirement. If it does, then `bytes` is
1221 /// reinterpreted as a `Self`.
1222 ///
1223 /// Note that Rust's bit validity rules are still being decided. As such,
1224 /// there exist types whose bit validity is ambiguous. See the
1225 /// `TryFromBytes` docs for a discussion of how these cases are handled.
1226 // TODO(#251): In a future in which we distinguish between `FromBytes` and
1227 // `RefFromBytes`, this requires `where Self: RefFromBytes` to disallow
1228 // interior mutability.
1229 #[inline]
1230 #[doc(hidden)] // TODO(#5): Finalize name before remove this attribute.
1231 fn try_from_ref(bytes: &[u8]) -> Option<&Self>
1232 where
1233 Self: KnownLayout,
1234 {
1235 let maybe_self = Ptr::from(bytes).try_cast_into_no_leftover::<Self>()?;
1236
1237 // SAFETY:
1238 // - Since `bytes` is an immutable reference, we know that no mutable
1239 // references exist to this memory region.
1240 // - Since `[u8]` contains no `UnsafeCell`s, we know there are no
1241 // `&UnsafeCell` references to this memory region.
1242 // - Since we don't permit implementing `TryFromBytes` for types which
1243 // contain `UnsafeCell`s, there are no `UnsafeCell`s in `Self`, and so
1244 // the requirement that all references contain `UnsafeCell`s at the
1245 // same offsets is trivially satisfied.
1246 // - All bytes of `bytes` are initialized.
1247 //
1248 // This call may panic. If that happens, it doesn't cause any soundness
1249 // issues, as we have not generated any invalid state which we need to
1250 // fix before returning.
1251 if unsafe { !Self::is_bit_valid(maybe_self) } {
1252 return None;
1253 }
1254
1255 // SAFETY:
1256 // - Preconditions for `as_ref`:
1257 // - `is_bit_valid` guarantees that `*maybe_self` contains a valid
1258 // `Self`. Since `&[u8]` does not permit interior mutation, this
1259 // cannot be invalidated after this method returns.
1260 // - Since the argument and return types are immutable references,
1261 // Rust will prevent the caller from producing any mutable
1262 // references to the same memory region.
1263 // - Since `Self` is not allowed to contain any `UnsafeCell`s and the
1264 // same is true of `[u8]`, interior mutation is not possible. Thus,
1265 // no mutation is possible. For the same reason, there is no
1266 // mismatch between the two types in terms of which byte ranges are
1267 // referenced as `UnsafeCell`s.
1268 // - Since interior mutation isn't possible within `Self`, there's no
1269 // way for the returned reference to be used to modify the byte range,
1270 // and thus there's no way for the returned reference to be used to
1271 // write an invalid `[u8]` which would be observable via the original
1272 // `&[u8]`.
1273 Some(unsafe { maybe_self.as_ref() })
1274 }
1275}
1276
1277/// Types for which a sequence of bytes all set to zero represents a valid
1278/// instance of the type.
1279///
1280/// Any memory region of the appropriate length which is guaranteed to contain
1281/// only zero bytes can be viewed as any `FromZeroes` type with no runtime
1282/// overhead. This is useful whenever memory is known to be in a zeroed state,
1283/// such memory returned from some allocation routines.
1284///
1285/// # Implementation
1286///
1287/// **Do not implement this trait yourself!** Instead, use
1288/// [`#[derive(FromZeroes)]`][derive] (requires the `derive` Cargo feature);
1289/// e.g.:
1290///
1291/// ```
1292/// # use zerocopy_derive::FromZeroes;
1293/// #[derive(FromZeroes)]
1294/// struct MyStruct {
1295/// # /*
1296/// ...
1297/// # */
1298/// }
1299///
1300/// #[derive(FromZeroes)]
1301/// #[repr(u8)]
1302/// enum MyEnum {
1303/// # Variant0,
1304/// # /*
1305/// ...
1306/// # */
1307/// }
1308///
1309/// #[derive(FromZeroes)]
1310/// union MyUnion {
1311/// # variant: u8,
1312/// # /*
1313/// ...
1314/// # */
1315/// }
1316/// ```
1317///
1318/// This derive performs a sophisticated, compile-time safety analysis to
1319/// determine whether a type is `FromZeroes`.
1320///
1321/// # Safety
1322///
1323/// *This section describes what is required in order for `T: FromZeroes`, and
1324/// what unsafe code may assume of such types. If you don't plan on implementing
1325/// `FromZeroes` manually, and you don't plan on writing unsafe code that
1326/// operates on `FromZeroes` types, then you don't need to read this section.*
1327///
1328/// If `T: FromZeroes`, then unsafe code may assume that:
1329/// - It is sound to treat any initialized sequence of zero bytes of length
1330/// `size_of::<T>()` as a `T`.
1331/// - Given `b: &[u8]` where `b.len() == size_of::<T>()`, `b` is aligned to
1332/// `align_of::<T>()`, and `b` contains only zero bytes, it is sound to
1333/// construct a `t: &T` at the same address as `b`, and it is sound for both
1334/// `b` and `t` to be live at the same time.
1335///
1336/// If a type is marked as `FromZeroes` which violates this contract, it may
1337/// cause undefined behavior.
1338///
1339/// `#[derive(FromZeroes)]` only permits [types which satisfy these
1340/// requirements][derive-analysis].
1341///
1342#[cfg_attr(
1343 feature = "derive",
1344 doc = "[derive]: zerocopy_derive::FromZeroes",
1345 doc = "[derive-analysis]: zerocopy_derive::FromZeroes#analysis"
1346)]
1347#[cfg_attr(
1348 not(feature = "derive"),
1349 doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromZeroes.html"),
1350 doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromZeroes.html#analysis"),
1351)]
1352pub unsafe trait FromZeroes {
1353 // The `Self: Sized` bound makes it so that `FromZeroes` is still object
1354 // safe.
1355 #[doc(hidden)]
1356 fn only_derive_is_allowed_to_implement_this_trait()
1357 where
1358 Self: Sized;
1359
1360 /// Overwrites `self` with zeroes.
1361 ///
1362 /// Sets every byte in `self` to 0. While this is similar to doing `*self =
1363 /// Self::new_zeroed()`, it differs in that `zero` does not semantically
1364 /// drop the current value and replace it with a new one - it simply
1365 /// modifies the bytes of the existing value.
1366 ///
1367 /// # Examples
1368 ///
1369 /// ```
1370 /// # use zerocopy::FromZeroes;
1371 /// # use zerocopy_derive::*;
1372 /// #
1373 /// #[derive(FromZeroes)]
1374 /// #[repr(C)]
1375 /// struct PacketHeader {
1376 /// src_port: [u8; 2],
1377 /// dst_port: [u8; 2],
1378 /// length: [u8; 2],
1379 /// checksum: [u8; 2],
1380 /// }
1381 ///
1382 /// let mut header = PacketHeader {
1383 /// src_port: 100u16.to_be_bytes(),
1384 /// dst_port: 200u16.to_be_bytes(),
1385 /// length: 300u16.to_be_bytes(),
1386 /// checksum: 400u16.to_be_bytes(),
1387 /// };
1388 ///
1389 /// header.zero();
1390 ///
1391 /// assert_eq!(header.src_port, [0, 0]);
1392 /// assert_eq!(header.dst_port, [0, 0]);
1393 /// assert_eq!(header.length, [0, 0]);
1394 /// assert_eq!(header.checksum, [0, 0]);
1395 /// ```
1396 #[inline(always)]
1397 fn zero(&mut self) {
1398 let slf: *mut Self = self;
1399 let len = mem::size_of_val(self);
1400 // SAFETY:
1401 // - `self` is guaranteed by the type system to be valid for writes of
1402 // size `size_of_val(self)`.
1403 // - `u8`'s alignment is 1, and thus `self` is guaranteed to be aligned
1404 // as required by `u8`.
1405 // - Since `Self: FromZeroes`, the all-zeroes instance is a valid
1406 // instance of `Self.`
1407 //
1408 // TODO(#429): Add references to docs and quotes.
1409 unsafe { ptr::write_bytes(slf.cast::<u8>(), 0, len) };
1410 }
1411
1412 /// Creates an instance of `Self` from zeroed bytes.
1413 ///
1414 /// # Examples
1415 ///
1416 /// ```
1417 /// # use zerocopy::FromZeroes;
1418 /// # use zerocopy_derive::*;
1419 /// #
1420 /// #[derive(FromZeroes)]
1421 /// #[repr(C)]
1422 /// struct PacketHeader {
1423 /// src_port: [u8; 2],
1424 /// dst_port: [u8; 2],
1425 /// length: [u8; 2],
1426 /// checksum: [u8; 2],
1427 /// }
1428 ///
1429 /// let header: PacketHeader = FromZeroes::new_zeroed();
1430 ///
1431 /// assert_eq!(header.src_port, [0, 0]);
1432 /// assert_eq!(header.dst_port, [0, 0]);
1433 /// assert_eq!(header.length, [0, 0]);
1434 /// assert_eq!(header.checksum, [0, 0]);
1435 /// ```
1436 #[inline(always)]
1437 fn new_zeroed() -> Self
1438 where
1439 Self: Sized,
1440 {
1441 // SAFETY: `FromZeroes` says that the all-zeroes bit pattern is legal.
1442 unsafe { mem::zeroed() }
1443 }
1444
1445 /// Creates a `Box<Self>` from zeroed bytes.
1446 ///
1447 /// This function is useful for allocating large values on the heap and
1448 /// zero-initializing them, without ever creating a temporary instance of
1449 /// `Self` on the stack. For example, `<[u8; 1048576]>::new_box_zeroed()`
1450 /// will allocate `[u8; 1048576]` directly on the heap; it does not require
1451 /// storing `[u8; 1048576]` in a temporary variable on the stack.
1452 ///
1453 /// On systems that use a heap implementation that supports allocating from
1454 /// pre-zeroed memory, using `new_box_zeroed` (or related functions) may
1455 /// have performance benefits.
1456 ///
1457 /// Note that `Box<Self>` can be converted to `Arc<Self>` and other
1458 /// container types without reallocation.
1459 ///
1460 /// # Panics
1461 ///
1462 /// Panics if allocation of `size_of::<Self>()` bytes fails.
1463 #[cfg(feature = "alloc")]
1464 #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
1465 #[inline]
1466 fn new_box_zeroed() -> Box<Self>
1467 where
1468 Self: Sized,
1469 {
1470 // If `T` is a ZST, then return a proper boxed instance of it. There is
1471 // no allocation, but `Box` does require a correct dangling pointer.
1472 let layout = Layout::new::<Self>();
1473 if layout.size() == 0 {
1474 return Box::new(Self::new_zeroed());
1475 }
1476
1477 // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
1478 #[allow(clippy::undocumented_unsafe_blocks)]
1479 let ptr = unsafe { alloc::alloc::alloc_zeroed(layout).cast::<Self>() };
1480 if ptr.is_null() {
1481 alloc::alloc::handle_alloc_error(layout);
1482 }
1483 // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
1484 #[allow(clippy::undocumented_unsafe_blocks)]
1485 unsafe {
1486 Box::from_raw(ptr)
1487 }
1488 }
1489
1490 /// Creates a `Box<[Self]>` (a boxed slice) from zeroed bytes.
1491 ///
1492 /// This function is useful for allocating large values of `[Self]` on the
1493 /// heap and zero-initializing them, without ever creating a temporary
1494 /// instance of `[Self; _]` on the stack. For example,
1495 /// `u8::new_box_slice_zeroed(1048576)` will allocate the slice directly on
1496 /// the heap; it does not require storing the slice on the stack.
1497 ///
1498 /// On systems that use a heap implementation that supports allocating from
1499 /// pre-zeroed memory, using `new_box_slice_zeroed` may have performance
1500 /// benefits.
1501 ///
1502 /// If `Self` is a zero-sized type, then this function will return a
1503 /// `Box<[Self]>` that has the correct `len`. Such a box cannot contain any
1504 /// actual information, but its `len()` property will report the correct
1505 /// value.
1506 ///
1507 /// # Panics
1508 ///
1509 /// * Panics if `size_of::<Self>() * len` overflows.
1510 /// * Panics if allocation of `size_of::<Self>() * len` bytes fails.
1511 #[cfg(feature = "alloc")]
1512 #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
1513 #[inline]
1514 fn new_box_slice_zeroed(len: usize) -> Box<[Self]>
1515 where
1516 Self: Sized,
1517 {
1518 let size = mem::size_of::<Self>()
1519 .checked_mul(len)
1520 .expect("mem::size_of::<Self>() * len overflows `usize`");
1521 let align = mem::align_of::<Self>();
1522 // On stable Rust versions <= 1.64.0, `Layout::from_size_align` has a
1523 // bug in which sufficiently-large allocations (those which, when
1524 // rounded up to the alignment, overflow `isize`) are not rejected,
1525 // which can cause undefined behavior. See #64 for details.
1526 //
1527 // TODO(#67): Once our MSRV is > 1.64.0, remove this assertion.
1528 #[allow(clippy::as_conversions)]
1529 let max_alloc = (isize::MAX as usize).saturating_sub(align);
1530 assert!(size <= max_alloc);
1531 // TODO(https://github.com/rust-lang/rust/issues/55724): Use
1532 // `Layout::repeat` once it's stabilized.
1533 let layout =
1534 Layout::from_size_align(size, align).expect("total allocation size overflows `isize`");
1535
1536 let ptr = if layout.size() != 0 {
1537 // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
1538 #[allow(clippy::undocumented_unsafe_blocks)]
1539 let ptr = unsafe { alloc::alloc::alloc_zeroed(layout).cast::<Self>() };
1540 if ptr.is_null() {
1541 alloc::alloc::handle_alloc_error(layout);
1542 }
1543 ptr
1544 } else {
1545 // `Box<[T]>` does not allocate when `T` is zero-sized or when `len`
1546 // is zero, but it does require a non-null dangling pointer for its
1547 // allocation.
1548 NonNull::<Self>::dangling().as_ptr()
1549 };
1550
1551 // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
1552 #[allow(clippy::undocumented_unsafe_blocks)]
1553 unsafe {
1554 Box::from_raw(slice::from_raw_parts_mut(ptr, len))
1555 }
1556 }
1557
1558 /// Creates a `Vec<Self>` from zeroed bytes.
1559 ///
1560 /// This function is useful for allocating large values of `Vec`s and
1561 /// zero-initializing them, without ever creating a temporary instance of
1562 /// `[Self; _]` (or many temporary instances of `Self`) on the stack. For
1563 /// example, `u8::new_vec_zeroed(1048576)` will allocate directly on the
1564 /// heap; it does not require storing intermediate values on the stack.
1565 ///
1566 /// On systems that use a heap implementation that supports allocating from
1567 /// pre-zeroed memory, using `new_vec_zeroed` may have performance benefits.
1568 ///
1569 /// If `Self` is a zero-sized type, then this function will return a
1570 /// `Vec<Self>` that has the correct `len`. Such a `Vec` cannot contain any
1571 /// actual information, but its `len()` property will report the correct
1572 /// value.
1573 ///
1574 /// # Panics
1575 ///
1576 /// * Panics if `size_of::<Self>() * len` overflows.
1577 /// * Panics if allocation of `size_of::<Self>() * len` bytes fails.
1578 #[cfg(feature = "alloc")]
1579 #[cfg_attr(doc_cfg, doc(cfg(feature = "new_vec_zeroed")))]
1580 #[inline(always)]
1581 fn new_vec_zeroed(len: usize) -> Vec<Self>
1582 where
1583 Self: Sized,
1584 {
1585 Self::new_box_slice_zeroed(len).into()
1586 }
1587}
1588
1589/// Analyzes whether a type is [`FromBytes`].
1590///
1591/// This derive analyzes, at compile time, whether the annotated type satisfies
1592/// the [safety conditions] of `FromBytes` and implements `FromBytes` if it is
1593/// sound to do so. This derive can be applied to structs, enums, and unions;
1594/// e.g.:
1595///
1596/// ```
1597/// # use zerocopy_derive::{FromBytes, FromZeroes};
1598/// #[derive(FromZeroes, FromBytes)]
1599/// struct MyStruct {
1600/// # /*
1601/// ...
1602/// # */
1603/// }
1604///
1605/// #[derive(FromZeroes, FromBytes)]
1606/// #[repr(u8)]
1607/// enum MyEnum {
1608/// # V00, V01, V02, V03, V04, V05, V06, V07, V08, V09, V0A, V0B, V0C, V0D, V0E,
1609/// # V0F, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V1A, V1B, V1C, V1D,
1610/// # V1E, V1F, V20, V21, V22, V23, V24, V25, V26, V27, V28, V29, V2A, V2B, V2C,
1611/// # V2D, V2E, V2F, V30, V31, V32, V33, V34, V35, V36, V37, V38, V39, V3A, V3B,
1612/// # V3C, V3D, V3E, V3F, V40, V41, V42, V43, V44, V45, V46, V47, V48, V49, V4A,
1613/// # V4B, V4C, V4D, V4E, V4F, V50, V51, V52, V53, V54, V55, V56, V57, V58, V59,
1614/// # V5A, V5B, V5C, V5D, V5E, V5F, V60, V61, V62, V63, V64, V65, V66, V67, V68,
1615/// # V69, V6A, V6B, V6C, V6D, V6E, V6F, V70, V71, V72, V73, V74, V75, V76, V77,
1616/// # V78, V79, V7A, V7B, V7C, V7D, V7E, V7F, V80, V81, V82, V83, V84, V85, V86,
1617/// # V87, V88, V89, V8A, V8B, V8C, V8D, V8E, V8F, V90, V91, V92, V93, V94, V95,
1618/// # V96, V97, V98, V99, V9A, V9B, V9C, V9D, V9E, V9F, VA0, VA1, VA2, VA3, VA4,
1619/// # VA5, VA6, VA7, VA8, VA9, VAA, VAB, VAC, VAD, VAE, VAF, VB0, VB1, VB2, VB3,
1620/// # VB4, VB5, VB6, VB7, VB8, VB9, VBA, VBB, VBC, VBD, VBE, VBF, VC0, VC1, VC2,
1621/// # VC3, VC4, VC5, VC6, VC7, VC8, VC9, VCA, VCB, VCC, VCD, VCE, VCF, VD0, VD1,
1622/// # VD2, VD3, VD4, VD5, VD6, VD7, VD8, VD9, VDA, VDB, VDC, VDD, VDE, VDF, VE0,
1623/// # VE1, VE2, VE3, VE4, VE5, VE6, VE7, VE8, VE9, VEA, VEB, VEC, VED, VEE, VEF,
1624/// # VF0, VF1, VF2, VF3, VF4, VF5, VF6, VF7, VF8, VF9, VFA, VFB, VFC, VFD, VFE,
1625/// # VFF,
1626/// # /*
1627/// ...
1628/// # */
1629/// }
1630///
1631/// #[derive(FromZeroes, FromBytes)]
1632/// union MyUnion {
1633/// # variant: u8,
1634/// # /*
1635/// ...
1636/// # */
1637/// }
1638/// ```
1639///
1640/// [safety conditions]: trait@FromBytes#safety
1641///
1642/// # Analysis
1643///
1644/// *This section describes, roughly, the analysis performed by this derive to
1645/// determine whether it is sound to implement `FromBytes` for a given type.
1646/// Unless you are modifying the implementation of this derive, or attempting to
1647/// manually implement `FromBytes` for a type yourself, you don't need to read
1648/// this section.*
1649///
1650/// If a type has the following properties, then this derive can implement
1651/// `FromBytes` for that type:
1652///
1653/// - If the type is a struct, all of its fields must be `FromBytes`.
1654/// - If the type is an enum:
1655/// - It must be a C-like enum (meaning that all variants have no fields).
1656/// - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`,
1657/// `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`).
1658/// - The maximum number of discriminants must be used (so that every possible
1659/// bit pattern is a valid one). Be very careful when using the `C`,
1660/// `usize`, or `isize` representations, as their size is
1661/// platform-dependent.
1662/// - The type must not contain any [`UnsafeCell`]s (this is required in order
1663/// for it to be sound to construct a `&[u8]` and a `&T` to the same region of
1664/// memory). The type may contain references or pointers to `UnsafeCell`s so
1665/// long as those values can themselves be initialized from zeroes
1666/// (`FromBytes` is not currently implemented for, e.g., `Option<*const
1667/// UnsafeCell<_>>`, but it could be one day).
1668///
1669/// [`UnsafeCell`]: core::cell::UnsafeCell
1670///
1671/// This analysis is subject to change. Unsafe code may *only* rely on the
1672/// documented [safety conditions] of `FromBytes`, and must *not* rely on the
1673/// implementation details of this derive.
1674///
1675/// ## Why isn't an explicit representation required for structs?
1676///
1677/// Neither this derive, nor the [safety conditions] of `FromBytes`, requires
1678/// that structs are marked with `#[repr(C)]`.
1679///
1680/// Per the [Rust reference](reference),
1681///
1682/// > The representation of a type can change the padding between fields, but
1683/// does not change the layout of the fields themselves.
1684///
1685/// [reference]: https://doc.rust-lang.org/reference/type-layout.html#representations
1686///
1687/// Since the layout of structs only consists of padding bytes and field bytes,
1688/// a struct is soundly `FromBytes` if:
1689/// 1. its padding is soundly `FromBytes`, and
1690/// 2. its fields are soundly `FromBytes`.
1691///
1692/// The answer to the first question is always yes: padding bytes do not have
1693/// any validity constraints. A [discussion] of this question in the Unsafe Code
1694/// Guidelines Working Group concluded that it would be virtually unimaginable
1695/// for future versions of rustc to add validity constraints to padding bytes.
1696///
1697/// [discussion]: https://github.com/rust-lang/unsafe-code-guidelines/issues/174
1698///
1699/// Whether a struct is soundly `FromBytes` therefore solely depends on whether
1700/// its fields are `FromBytes`.
1701// TODO(#146): Document why we don't require an enum to have an explicit `repr`
1702// attribute.
1703#[cfg(any(feature = "derive", test))]
1704#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
1705pub use zerocopy_derive::FromBytes;
1706
1707/// Types for which any bit pattern is valid.
1708///
1709/// Any memory region of the appropriate length which contains initialized bytes
1710/// can be viewed as any `FromBytes` type with no runtime overhead. This is
1711/// useful for efficiently parsing bytes as structured data.
1712///
1713/// # Implementation
1714///
1715/// **Do not implement this trait yourself!** Instead, use
1716/// [`#[derive(FromBytes)]`][derive] (requires the `derive` Cargo feature);
1717/// e.g.:
1718///
1719/// ```
1720/// # use zerocopy_derive::{FromBytes, FromZeroes};
1721/// #[derive(FromZeroes, FromBytes)]
1722/// struct MyStruct {
1723/// # /*
1724/// ...
1725/// # */
1726/// }
1727///
1728/// #[derive(FromZeroes, FromBytes)]
1729/// #[repr(u8)]
1730/// enum MyEnum {
1731/// # V00, V01, V02, V03, V04, V05, V06, V07, V08, V09, V0A, V0B, V0C, V0D, V0E,
1732/// # V0F, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V1A, V1B, V1C, V1D,
1733/// # V1E, V1F, V20, V21, V22, V23, V24, V25, V26, V27, V28, V29, V2A, V2B, V2C,
1734/// # V2D, V2E, V2F, V30, V31, V32, V33, V34, V35, V36, V37, V38, V39, V3A, V3B,
1735/// # V3C, V3D, V3E, V3F, V40, V41, V42, V43, V44, V45, V46, V47, V48, V49, V4A,
1736/// # V4B, V4C, V4D, V4E, V4F, V50, V51, V52, V53, V54, V55, V56, V57, V58, V59,
1737/// # V5A, V5B, V5C, V5D, V5E, V5F, V60, V61, V62, V63, V64, V65, V66, V67, V68,
1738/// # V69, V6A, V6B, V6C, V6D, V6E, V6F, V70, V71, V72, V73, V74, V75, V76, V77,
1739/// # V78, V79, V7A, V7B, V7C, V7D, V7E, V7F, V80, V81, V82, V83, V84, V85, V86,
1740/// # V87, V88, V89, V8A, V8B, V8C, V8D, V8E, V8F, V90, V91, V92, V93, V94, V95,
1741/// # V96, V97, V98, V99, V9A, V9B, V9C, V9D, V9E, V9F, VA0, VA1, VA2, VA3, VA4,
1742/// # VA5, VA6, VA7, VA8, VA9, VAA, VAB, VAC, VAD, VAE, VAF, VB0, VB1, VB2, VB3,
1743/// # VB4, VB5, VB6, VB7, VB8, VB9, VBA, VBB, VBC, VBD, VBE, VBF, VC0, VC1, VC2,
1744/// # VC3, VC4, VC5, VC6, VC7, VC8, VC9, VCA, VCB, VCC, VCD, VCE, VCF, VD0, VD1,
1745/// # VD2, VD3, VD4, VD5, VD6, VD7, VD8, VD9, VDA, VDB, VDC, VDD, VDE, VDF, VE0,
1746/// # VE1, VE2, VE3, VE4, VE5, VE6, VE7, VE8, VE9, VEA, VEB, VEC, VED, VEE, VEF,
1747/// # VF0, VF1, VF2, VF3, VF4, VF5, VF6, VF7, VF8, VF9, VFA, VFB, VFC, VFD, VFE,
1748/// # VFF,
1749/// # /*
1750/// ...
1751/// # */
1752/// }
1753///
1754/// #[derive(FromZeroes, FromBytes)]
1755/// union MyUnion {
1756/// # variant: u8,
1757/// # /*
1758/// ...
1759/// # */
1760/// }
1761/// ```
1762///
1763/// This derive performs a sophisticated, compile-time safety analysis to
1764/// determine whether a type is `FromBytes`.
1765///
1766/// # Safety
1767///
1768/// *This section describes what is required in order for `T: FromBytes`, and
1769/// what unsafe code may assume of such types. If you don't plan on implementing
1770/// `FromBytes` manually, and you don't plan on writing unsafe code that
1771/// operates on `FromBytes` types, then you don't need to read this section.*
1772///
1773/// If `T: FromBytes`, then unsafe code may assume that:
1774/// - It is sound to treat any initialized sequence of bytes of length
1775/// `size_of::<T>()` as a `T`.
1776/// - Given `b: &[u8]` where `b.len() == size_of::<T>()`, `b` is aligned to
1777/// `align_of::<T>()` it is sound to construct a `t: &T` at the same address
1778/// as `b`, and it is sound for both `b` and `t` to be live at the same time.
1779///
1780/// If a type is marked as `FromBytes` which violates this contract, it may
1781/// cause undefined behavior.
1782///
1783/// `#[derive(FromBytes)]` only permits [types which satisfy these
1784/// requirements][derive-analysis].
1785///
1786#[cfg_attr(
1787 feature = "derive",
1788 doc = "[derive]: zerocopy_derive::FromBytes",
1789 doc = "[derive-analysis]: zerocopy_derive::FromBytes#analysis"
1790)]
1791#[cfg_attr(
1792 not(feature = "derive"),
1793 doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromBytes.html"),
1794 doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromBytes.html#analysis"),
1795)]
1796pub unsafe trait FromBytes: FromZeroes {
1797 // The `Self: Sized` bound makes it so that `FromBytes` is still object
1798 // safe.
1799 #[doc(hidden)]
1800 fn only_derive_is_allowed_to_implement_this_trait()
1801 where
1802 Self: Sized;
1803
1804 /// Interprets the given `bytes` as a `&Self` without copying.
1805 ///
1806 /// If `bytes.len() != size_of::<Self>()` or `bytes` is not aligned to
1807 /// `align_of::<Self>()`, this returns `None`.
1808 ///
1809 /// # Examples
1810 ///
1811 /// ```
1812 /// use zerocopy::FromBytes;
1813 /// # use zerocopy_derive::*;
1814 ///
1815 /// #[derive(FromZeroes, FromBytes)]
1816 /// #[repr(C)]
1817 /// struct PacketHeader {
1818 /// src_port: [u8; 2],
1819 /// dst_port: [u8; 2],
1820 /// length: [u8; 2],
1821 /// checksum: [u8; 2],
1822 /// }
1823 ///
1824 /// // These bytes encode a `PacketHeader`.
1825 /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7].as_slice();
1826 ///
1827 /// let header = PacketHeader::ref_from(bytes).unwrap();
1828 ///
1829 /// assert_eq!(header.src_port, [0, 1]);
1830 /// assert_eq!(header.dst_port, [2, 3]);
1831 /// assert_eq!(header.length, [4, 5]);
1832 /// assert_eq!(header.checksum, [6, 7]);
1833 /// ```
1834 #[inline]
1835 fn ref_from(bytes: &[u8]) -> Option<&Self>
1836 where
1837 Self: Sized,
1838 {
1839 Ref::<&[u8], Self>::new(bytes).map(Ref::into_ref)
1840 }
1841
1842 /// Interprets the prefix of the given `bytes` as a `&Self` without copying.
1843 ///
1844 /// `ref_from_prefix` returns a reference to the first `size_of::<Self>()`
1845 /// bytes of `bytes`. If `bytes.len() < size_of::<Self>()` or `bytes` is not
1846 /// aligned to `align_of::<Self>()`, this returns `None`.
1847 ///
1848 /// To also access the prefix bytes, use [`Ref::new_from_prefix`]. Then, use
1849 /// [`Ref::into_ref`] to get a `&Self` with the same lifetime.
1850 ///
1851 /// # Examples
1852 ///
1853 /// ```
1854 /// use zerocopy::FromBytes;
1855 /// # use zerocopy_derive::*;
1856 ///
1857 /// #[derive(FromZeroes, FromBytes)]
1858 /// #[repr(C)]
1859 /// struct PacketHeader {
1860 /// src_port: [u8; 2],
1861 /// dst_port: [u8; 2],
1862 /// length: [u8; 2],
1863 /// checksum: [u8; 2],
1864 /// }
1865 ///
1866 /// // These are more bytes than are needed to encode a `PacketHeader`.
1867 /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9].as_slice();
1868 ///
1869 /// let header = PacketHeader::ref_from_prefix(bytes).unwrap();
1870 ///
1871 /// assert_eq!(header.src_port, [0, 1]);
1872 /// assert_eq!(header.dst_port, [2, 3]);
1873 /// assert_eq!(header.length, [4, 5]);
1874 /// assert_eq!(header.checksum, [6, 7]);
1875 /// ```
1876 #[inline]
1877 fn ref_from_prefix(bytes: &[u8]) -> Option<&Self>
1878 where
1879 Self: Sized,
1880 {
1881 Ref::<&[u8], Self>::new_from_prefix(bytes).map(|(r, _)| r.into_ref())
1882 }
1883
1884 /// Interprets the suffix of the given `bytes` as a `&Self` without copying.
1885 ///
1886 /// `ref_from_suffix` returns a reference to the last `size_of::<Self>()`
1887 /// bytes of `bytes`. If `bytes.len() < size_of::<Self>()` or the suffix of
1888 /// `bytes` is not aligned to `align_of::<Self>()`, this returns `None`.
1889 ///
1890 /// To also access the suffix bytes, use [`Ref::new_from_suffix`]. Then, use
1891 /// [`Ref::into_ref`] to get a `&Self` with the same lifetime.
1892 ///
1893 /// # Examples
1894 ///
1895 /// ```
1896 /// use zerocopy::FromBytes;
1897 /// # use zerocopy_derive::*;
1898 ///
1899 /// #[derive(FromZeroes, FromBytes)]
1900 /// #[repr(C)]
1901 /// struct PacketTrailer {
1902 /// frame_check_sequence: [u8; 4],
1903 /// }
1904 ///
1905 /// // These are more bytes than are needed to encode a `PacketTrailer`.
1906 /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9].as_slice();
1907 ///
1908 /// let trailer = PacketTrailer::ref_from_suffix(bytes).unwrap();
1909 ///
1910 /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
1911 /// ```
1912 #[inline]
1913 fn ref_from_suffix(bytes: &[u8]) -> Option<&Self>
1914 where
1915 Self: Sized,
1916 {
1917 Ref::<&[u8], Self>::new_from_suffix(bytes).map(|(_, r)| r.into_ref())
1918 }
1919
1920 /// Interprets the given `bytes` as a `&mut Self` without copying.
1921 ///
1922 /// If `bytes.len() != size_of::<Self>()` or `bytes` is not aligned to
1923 /// `align_of::<Self>()`, this returns `None`.
1924 ///
1925 /// # Examples
1926 ///
1927 /// ```
1928 /// use zerocopy::FromBytes;
1929 /// # use zerocopy_derive::*;
1930 ///
1931 /// #[derive(AsBytes, FromZeroes, FromBytes)]
1932 /// #[repr(C)]
1933 /// struct PacketHeader {
1934 /// src_port: [u8; 2],
1935 /// dst_port: [u8; 2],
1936 /// length: [u8; 2],
1937 /// checksum: [u8; 2],
1938 /// }
1939 ///
1940 /// // These bytes encode a `PacketHeader`.
1941 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7][..];
1942 ///
1943 /// let header = PacketHeader::mut_from(bytes).unwrap();
1944 ///
1945 /// assert_eq!(header.src_port, [0, 1]);
1946 /// assert_eq!(header.dst_port, [2, 3]);
1947 /// assert_eq!(header.length, [4, 5]);
1948 /// assert_eq!(header.checksum, [6, 7]);
1949 ///
1950 /// header.checksum = [0, 0];
1951 ///
1952 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0]);
1953 /// ```
1954 #[inline]
1955 fn mut_from(bytes: &mut [u8]) -> Option<&mut Self>
1956 where
1957 Self: Sized + AsBytes,
1958 {
1959 Ref::<&mut [u8], Self>::new(bytes).map(Ref::into_mut)
1960 }
1961
1962 /// Interprets the prefix of the given `bytes` as a `&mut Self` without
1963 /// copying.
1964 ///
1965 /// `mut_from_prefix` returns a reference to the first `size_of::<Self>()`
1966 /// bytes of `bytes`. If `bytes.len() < size_of::<Self>()` or `bytes` is not
1967 /// aligned to `align_of::<Self>()`, this returns `None`.
1968 ///
1969 /// To also access the prefix bytes, use [`Ref::new_from_prefix`]. Then, use
1970 /// [`Ref::into_mut`] to get a `&mut Self` with the same lifetime.
1971 ///
1972 /// # Examples
1973 ///
1974 /// ```
1975 /// use zerocopy::FromBytes;
1976 /// # use zerocopy_derive::*;
1977 ///
1978 /// #[derive(AsBytes, FromZeroes, FromBytes)]
1979 /// #[repr(C)]
1980 /// struct PacketHeader {
1981 /// src_port: [u8; 2],
1982 /// dst_port: [u8; 2],
1983 /// length: [u8; 2],
1984 /// checksum: [u8; 2],
1985 /// }
1986 ///
1987 /// // These are more bytes than are needed to encode a `PacketHeader`.
1988 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
1989 ///
1990 /// let header = PacketHeader::mut_from_prefix(bytes).unwrap();
1991 ///
1992 /// assert_eq!(header.src_port, [0, 1]);
1993 /// assert_eq!(header.dst_port, [2, 3]);
1994 /// assert_eq!(header.length, [4, 5]);
1995 /// assert_eq!(header.checksum, [6, 7]);
1996 ///
1997 /// header.checksum = [0, 0];
1998 ///
1999 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0, 8, 9]);
2000 /// ```
2001 #[inline]
2002 fn mut_from_prefix(bytes: &mut [u8]) -> Option<&mut Self>
2003 where
2004 Self: Sized + AsBytes,
2005 {
2006 Ref::<&mut [u8], Self>::new_from_prefix(bytes).map(|(r, _)| r.into_mut())
2007 }
2008
2009 /// Interprets the suffix of the given `bytes` as a `&mut Self` without copying.
2010 ///
2011 /// `mut_from_suffix` returns a reference to the last `size_of::<Self>()`
2012 /// bytes of `bytes`. If `bytes.len() < size_of::<Self>()` or the suffix of
2013 /// `bytes` is not aligned to `align_of::<Self>()`, this returns `None`.
2014 ///
2015 /// To also access the suffix bytes, use [`Ref::new_from_suffix`]. Then,
2016 /// use [`Ref::into_mut`] to get a `&mut Self` with the same lifetime.
2017 ///
2018 /// # Examples
2019 ///
2020 /// ```
2021 /// use zerocopy::FromBytes;
2022 /// # use zerocopy_derive::*;
2023 ///
2024 /// #[derive(AsBytes, FromZeroes, FromBytes)]
2025 /// #[repr(C)]
2026 /// struct PacketTrailer {
2027 /// frame_check_sequence: [u8; 4],
2028 /// }
2029 ///
2030 /// // These are more bytes than are needed to encode a `PacketTrailer`.
2031 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
2032 ///
2033 /// let trailer = PacketTrailer::mut_from_suffix(bytes).unwrap();
2034 ///
2035 /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
2036 ///
2037 /// trailer.frame_check_sequence = [0, 0, 0, 0];
2038 ///
2039 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0, 0, 0]);
2040 /// ```
2041 #[inline]
2042 fn mut_from_suffix(bytes: &mut [u8]) -> Option<&mut Self>
2043 where
2044 Self: Sized + AsBytes,
2045 {
2046 Ref::<&mut [u8], Self>::new_from_suffix(bytes).map(|(_, r)| r.into_mut())
2047 }
2048
2049 /// Interprets the given `bytes` as a `&[Self]` without copying.
2050 ///
2051 /// If `bytes.len() % size_of::<Self>() != 0` or `bytes` is not aligned to
2052 /// `align_of::<Self>()`, this returns `None`.
2053 ///
2054 /// If you need to convert a specific number of slice elements, see
2055 /// [`slice_from_prefix`](FromBytes::slice_from_prefix) or
2056 /// [`slice_from_suffix`](FromBytes::slice_from_suffix).
2057 ///
2058 /// # Panics
2059 ///
2060 /// If `Self` is a zero-sized type.
2061 ///
2062 /// # Examples
2063 ///
2064 /// ```
2065 /// use zerocopy::FromBytes;
2066 /// # use zerocopy_derive::*;
2067 ///
2068 /// # #[derive(Debug, PartialEq, Eq)]
2069 /// #[derive(FromZeroes, FromBytes)]
2070 /// #[repr(C)]
2071 /// struct Pixel {
2072 /// r: u8,
2073 /// g: u8,
2074 /// b: u8,
2075 /// a: u8,
2076 /// }
2077 ///
2078 /// // These bytes encode two `Pixel`s.
2079 /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7].as_slice();
2080 ///
2081 /// let pixels = Pixel::slice_from(bytes).unwrap();
2082 ///
2083 /// assert_eq!(pixels, &[
2084 /// Pixel { r: 0, g: 1, b: 2, a: 3 },
2085 /// Pixel { r: 4, g: 5, b: 6, a: 7 },
2086 /// ]);
2087 /// ```
2088 #[inline]
2089 fn slice_from(bytes: &[u8]) -> Option<&[Self]>
2090 where
2091 Self: Sized,
2092 {
2093 Ref::<_, [Self]>::new_slice(bytes).map(|r| r.into_slice())
2094 }
2095
2096 /// Interprets the prefix of the given `bytes` as a `&[Self]` with length
2097 /// equal to `count` without copying.
2098 ///
2099 /// This method verifies that `bytes.len() >= size_of::<T>() * count`
2100 /// and that `bytes` is aligned to `align_of::<T>()`. It consumes the
2101 /// first `size_of::<T>() * count` bytes from `bytes` to construct a
2102 /// `&[Self]`, and returns the remaining bytes to the caller. It also
2103 /// ensures that `sizeof::<T>() * count` does not overflow a `usize`.
2104 /// If any of the length, alignment, or overflow checks fail, it returns
2105 /// `None`.
2106 ///
2107 /// # Panics
2108 ///
2109 /// If `T` is a zero-sized type.
2110 ///
2111 /// # Examples
2112 ///
2113 /// ```
2114 /// use zerocopy::FromBytes;
2115 /// # use zerocopy_derive::*;
2116 ///
2117 /// # #[derive(Debug, PartialEq, Eq)]
2118 /// #[derive(FromZeroes, FromBytes)]
2119 /// #[repr(C)]
2120 /// struct Pixel {
2121 /// r: u8,
2122 /// g: u8,
2123 /// b: u8,
2124 /// a: u8,
2125 /// }
2126 ///
2127 /// // These are more bytes than are needed to encode two `Pixel`s.
2128 /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9].as_slice();
2129 ///
2130 /// let (pixels, rest) = Pixel::slice_from_prefix(bytes, 2).unwrap();
2131 ///
2132 /// assert_eq!(pixels, &[
2133 /// Pixel { r: 0, g: 1, b: 2, a: 3 },
2134 /// Pixel { r: 4, g: 5, b: 6, a: 7 },
2135 /// ]);
2136 ///
2137 /// assert_eq!(rest, &[8, 9]);
2138 /// ```
2139 #[inline]
2140 fn slice_from_prefix(bytes: &[u8], count: usize) -> Option<(&[Self], &[u8])>
2141 where
2142 Self: Sized,
2143 {
2144 Ref::<_, [Self]>::new_slice_from_prefix(bytes, count).map(|(r, b)| (r.into_slice(), b))
2145 }
2146
2147 /// Interprets the suffix of the given `bytes` as a `&[Self]` with length
2148 /// equal to `count` without copying.
2149 ///
2150 /// This method verifies that `bytes.len() >= size_of::<T>() * count`
2151 /// and that `bytes` is aligned to `align_of::<T>()`. It consumes the
2152 /// last `size_of::<T>() * count` bytes from `bytes` to construct a
2153 /// `&[Self]`, and returns the preceding bytes to the caller. It also
2154 /// ensures that `sizeof::<T>() * count` does not overflow a `usize`.
2155 /// If any of the length, alignment, or overflow checks fail, it returns
2156 /// `None`.
2157 ///
2158 /// # Panics
2159 ///
2160 /// If `T` is a zero-sized type.
2161 ///
2162 /// # Examples
2163 ///
2164 /// ```
2165 /// use zerocopy::FromBytes;
2166 /// # use zerocopy_derive::*;
2167 ///
2168 /// # #[derive(Debug, PartialEq, Eq)]
2169 /// #[derive(FromZeroes, FromBytes)]
2170 /// #[repr(C)]
2171 /// struct Pixel {
2172 /// r: u8,
2173 /// g: u8,
2174 /// b: u8,
2175 /// a: u8,
2176 /// }
2177 ///
2178 /// // These are more bytes than are needed to encode two `Pixel`s.
2179 /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9].as_slice();
2180 ///
2181 /// let (rest, pixels) = Pixel::slice_from_suffix(bytes, 2).unwrap();
2182 ///
2183 /// assert_eq!(rest, &[0, 1]);
2184 ///
2185 /// assert_eq!(pixels, &[
2186 /// Pixel { r: 2, g: 3, b: 4, a: 5 },
2187 /// Pixel { r: 6, g: 7, b: 8, a: 9 },
2188 /// ]);
2189 /// ```
2190 #[inline]
2191 fn slice_from_suffix(bytes: &[u8], count: usize) -> Option<(&[u8], &[Self])>
2192 where
2193 Self: Sized,
2194 {
2195 Ref::<_, [Self]>::new_slice_from_suffix(bytes, count).map(|(b, r)| (b, r.into_slice()))
2196 }
2197
2198 /// Interprets the given `bytes` as a `&mut [Self]` without copying.
2199 ///
2200 /// If `bytes.len() % size_of::<T>() != 0` or `bytes` is not aligned to
2201 /// `align_of::<T>()`, this returns `None`.
2202 ///
2203 /// If you need to convert a specific number of slice elements, see
2204 /// [`mut_slice_from_prefix`](FromBytes::mut_slice_from_prefix) or
2205 /// [`mut_slice_from_suffix`](FromBytes::mut_slice_from_suffix).
2206 ///
2207 /// # Panics
2208 ///
2209 /// If `T` is a zero-sized type.
2210 ///
2211 /// # Examples
2212 ///
2213 /// ```
2214 /// use zerocopy::FromBytes;
2215 /// # use zerocopy_derive::*;
2216 ///
2217 /// # #[derive(Debug, PartialEq, Eq)]
2218 /// #[derive(AsBytes, FromZeroes, FromBytes)]
2219 /// #[repr(C)]
2220 /// struct Pixel {
2221 /// r: u8,
2222 /// g: u8,
2223 /// b: u8,
2224 /// a: u8,
2225 /// }
2226 ///
2227 /// // These bytes encode two `Pixel`s.
2228 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7][..];
2229 ///
2230 /// let pixels = Pixel::mut_slice_from(bytes).unwrap();
2231 ///
2232 /// assert_eq!(pixels, &[
2233 /// Pixel { r: 0, g: 1, b: 2, a: 3 },
2234 /// Pixel { r: 4, g: 5, b: 6, a: 7 },
2235 /// ]);
2236 ///
2237 /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
2238 ///
2239 /// assert_eq!(bytes, [0, 1, 2, 3, 0, 0, 0, 0]);
2240 /// ```
2241 #[inline]
2242 fn mut_slice_from(bytes: &mut [u8]) -> Option<&mut [Self]>
2243 where
2244 Self: Sized + AsBytes,
2245 {
2246 Ref::<_, [Self]>::new_slice(bytes).map(|r| r.into_mut_slice())
2247 }
2248
2249 /// Interprets the prefix of the given `bytes` as a `&mut [Self]` with length
2250 /// equal to `count` without copying.
2251 ///
2252 /// This method verifies that `bytes.len() >= size_of::<T>() * count`
2253 /// and that `bytes` is aligned to `align_of::<T>()`. It consumes the
2254 /// first `size_of::<T>() * count` bytes from `bytes` to construct a
2255 /// `&[Self]`, and returns the remaining bytes to the caller. It also
2256 /// ensures that `sizeof::<T>() * count` does not overflow a `usize`.
2257 /// If any of the length, alignment, or overflow checks fail, it returns
2258 /// `None`.
2259 ///
2260 /// # Panics
2261 ///
2262 /// If `T` is a zero-sized type.
2263 ///
2264 /// # Examples
2265 ///
2266 /// ```
2267 /// use zerocopy::FromBytes;
2268 /// # use zerocopy_derive::*;
2269 ///
2270 /// # #[derive(Debug, PartialEq, Eq)]
2271 /// #[derive(AsBytes, FromZeroes, FromBytes)]
2272 /// #[repr(C)]
2273 /// struct Pixel {
2274 /// r: u8,
2275 /// g: u8,
2276 /// b: u8,
2277 /// a: u8,
2278 /// }
2279 ///
2280 /// // These are more bytes than are needed to encode two `Pixel`s.
2281 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
2282 ///
2283 /// let (pixels, rest) = Pixel::mut_slice_from_prefix(bytes, 2).unwrap();
2284 ///
2285 /// assert_eq!(pixels, &[
2286 /// Pixel { r: 0, g: 1, b: 2, a: 3 },
2287 /// Pixel { r: 4, g: 5, b: 6, a: 7 },
2288 /// ]);
2289 ///
2290 /// assert_eq!(rest, &[8, 9]);
2291 ///
2292 /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
2293 ///
2294 /// assert_eq!(bytes, [0, 1, 2, 3, 0, 0, 0, 0, 8, 9]);
2295 /// ```
2296 #[inline]
2297 fn mut_slice_from_prefix(bytes: &mut [u8], count: usize) -> Option<(&mut [Self], &mut [u8])>
2298 where
2299 Self: Sized + AsBytes,
2300 {
2301 Ref::<_, [Self]>::new_slice_from_prefix(bytes, count).map(|(r, b)| (r.into_mut_slice(), b))
2302 }
2303
2304 /// Interprets the suffix of the given `bytes` as a `&mut [Self]` with length
2305 /// equal to `count` without copying.
2306 ///
2307 /// This method verifies that `bytes.len() >= size_of::<T>() * count`
2308 /// and that `bytes` is aligned to `align_of::<T>()`. It consumes the
2309 /// last `size_of::<T>() * count` bytes from `bytes` to construct a
2310 /// `&[Self]`, and returns the preceding bytes to the caller. It also
2311 /// ensures that `sizeof::<T>() * count` does not overflow a `usize`.
2312 /// If any of the length, alignment, or overflow checks fail, it returns
2313 /// `None`.
2314 ///
2315 /// # Panics
2316 ///
2317 /// If `T` is a zero-sized type.
2318 ///
2319 /// # Examples
2320 ///
2321 /// ```
2322 /// use zerocopy::FromBytes;
2323 /// # use zerocopy_derive::*;
2324 ///
2325 /// # #[derive(Debug, PartialEq, Eq)]
2326 /// #[derive(AsBytes, FromZeroes, FromBytes)]
2327 /// #[repr(C)]
2328 /// struct Pixel {
2329 /// r: u8,
2330 /// g: u8,
2331 /// b: u8,
2332 /// a: u8,
2333 /// }
2334 ///
2335 /// // These are more bytes than are needed to encode two `Pixel`s.
2336 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
2337 ///
2338 /// let (rest, pixels) = Pixel::mut_slice_from_suffix(bytes, 2).unwrap();
2339 ///
2340 /// assert_eq!(rest, &[0, 1]);
2341 ///
2342 /// assert_eq!(pixels, &[
2343 /// Pixel { r: 2, g: 3, b: 4, a: 5 },
2344 /// Pixel { r: 6, g: 7, b: 8, a: 9 },
2345 /// ]);
2346 ///
2347 /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
2348 ///
2349 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0, 0, 0]);
2350 /// ```
2351 #[inline]
2352 fn mut_slice_from_suffix(bytes: &mut [u8], count: usize) -> Option<(&mut [u8], &mut [Self])>
2353 where
2354 Self: Sized + AsBytes,
2355 {
2356 Ref::<_, [Self]>::new_slice_from_suffix(bytes, count).map(|(b, r)| (b, r.into_mut_slice()))
2357 }
2358
2359 /// Reads a copy of `Self` from `bytes`.
2360 ///
2361 /// If `bytes.len() != size_of::<Self>()`, `read_from` returns `None`.
2362 ///
2363 /// # Examples
2364 ///
2365 /// ```
2366 /// use zerocopy::FromBytes;
2367 /// # use zerocopy_derive::*;
2368 ///
2369 /// #[derive(FromZeroes, FromBytes)]
2370 /// #[repr(C)]
2371 /// struct PacketHeader {
2372 /// src_port: [u8; 2],
2373 /// dst_port: [u8; 2],
2374 /// length: [u8; 2],
2375 /// checksum: [u8; 2],
2376 /// }
2377 ///
2378 /// // These bytes encode a `PacketHeader`.
2379 /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7].as_slice();
2380 ///
2381 /// let header = PacketHeader::read_from(bytes).unwrap();
2382 ///
2383 /// assert_eq!(header.src_port, [0, 1]);
2384 /// assert_eq!(header.dst_port, [2, 3]);
2385 /// assert_eq!(header.length, [4, 5]);
2386 /// assert_eq!(header.checksum, [6, 7]);
2387 /// ```
2388 #[inline]
2389 fn read_from(bytes: &[u8]) -> Option<Self>
2390 where
2391 Self: Sized,
2392 {
2393 Ref::<_, Unalign<Self>>::new_unaligned(bytes).map(|r| r.read().into_inner())
2394 }
2395
2396 /// Reads a copy of `Self` from the prefix of `bytes`.
2397 ///
2398 /// `read_from_prefix` reads a `Self` from the first `size_of::<Self>()`
2399 /// bytes of `bytes`. If `bytes.len() < size_of::<Self>()`, it returns
2400 /// `None`.
2401 ///
2402 /// # Examples
2403 ///
2404 /// ```
2405 /// use zerocopy::FromBytes;
2406 /// # use zerocopy_derive::*;
2407 ///
2408 /// #[derive(FromZeroes, FromBytes)]
2409 /// #[repr(C)]
2410 /// struct PacketHeader {
2411 /// src_port: [u8; 2],
2412 /// dst_port: [u8; 2],
2413 /// length: [u8; 2],
2414 /// checksum: [u8; 2],
2415 /// }
2416 ///
2417 /// // These are more bytes than are needed to encode a `PacketHeader`.
2418 /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9].as_slice();
2419 ///
2420 /// let header = PacketHeader::read_from_prefix(bytes).unwrap();
2421 ///
2422 /// assert_eq!(header.src_port, [0, 1]);
2423 /// assert_eq!(header.dst_port, [2, 3]);
2424 /// assert_eq!(header.length, [4, 5]);
2425 /// assert_eq!(header.checksum, [6, 7]);
2426 /// ```
2427 #[inline]
2428 fn read_from_prefix(bytes: &[u8]) -> Option<Self>
2429 where
2430 Self: Sized,
2431 {
2432 Ref::<_, Unalign<Self>>::new_unaligned_from_prefix(bytes)
2433 .map(|(r, _)| r.read().into_inner())
2434 }
2435
2436 /// Reads a copy of `Self` from the suffix of `bytes`.
2437 ///
2438 /// `read_from_suffix` reads a `Self` from the last `size_of::<Self>()`
2439 /// bytes of `bytes`. If `bytes.len() < size_of::<Self>()`, it returns
2440 /// `None`.
2441 ///
2442 /// # Examples
2443 ///
2444 /// ```
2445 /// use zerocopy::FromBytes;
2446 /// # use zerocopy_derive::*;
2447 ///
2448 /// #[derive(FromZeroes, FromBytes)]
2449 /// #[repr(C)]
2450 /// struct PacketTrailer {
2451 /// frame_check_sequence: [u8; 4],
2452 /// }
2453 ///
2454 /// // These are more bytes than are needed to encode a `PacketTrailer`.
2455 /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9].as_slice();
2456 ///
2457 /// let trailer = PacketTrailer::read_from_suffix(bytes).unwrap();
2458 ///
2459 /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
2460 /// ```
2461 #[inline]
2462 fn read_from_suffix(bytes: &[u8]) -> Option<Self>
2463 where
2464 Self: Sized,
2465 {
2466 Ref::<_, Unalign<Self>>::new_unaligned_from_suffix(bytes)
2467 .map(|(_, r)| r.read().into_inner())
2468 }
2469}
2470
2471/// Analyzes whether a type is [`AsBytes`].
2472///
2473/// This derive analyzes, at compile time, whether the annotated type satisfies
2474/// the [safety conditions] of `AsBytes` and implements `AsBytes` if it is
2475/// sound to do so. This derive can be applied to structs, enums, and unions;
2476/// e.g.:
2477///
2478/// ```
2479/// # use zerocopy_derive::{AsBytes};
2480/// #[derive(AsBytes)]
2481/// #[repr(C)]
2482/// struct MyStruct {
2483/// # /*
2484/// ...
2485/// # */
2486/// }
2487///
2488/// #[derive(AsBytes)]
2489/// #[repr(u8)]
2490/// enum MyEnum {
2491/// # Variant,
2492/// # /*
2493/// ...
2494/// # */
2495/// }
2496///
2497/// #[derive(AsBytes)]
2498/// #[repr(C)]
2499/// union MyUnion {
2500/// # variant: u8,
2501/// # /*
2502/// ...
2503/// # */
2504/// }
2505/// ```
2506///
2507/// [safety conditions]: trait@AsBytes#safety
2508///
2509/// # Error Messages
2510///
2511/// Due to the way that the custom derive for `AsBytes` is implemented, you may
2512/// get an error like this:
2513///
2514/// ```text
2515/// error[E0277]: the trait bound `HasPadding<Foo, true>: ShouldBe<false>` is not satisfied
2516/// --> lib.rs:23:10
2517/// |
2518/// 1 | #[derive(AsBytes)]
2519/// | ^^^^^^^ the trait `ShouldBe<false>` is not implemented for `HasPadding<Foo, true>`
2520/// |
2521/// = help: the trait `ShouldBe<VALUE>` is implemented for `HasPadding<T, VALUE>`
2522/// ```
2523///
2524/// This error indicates that the type being annotated has padding bytes, which
2525/// is illegal for `AsBytes` types. Consider reducing the alignment of some
2526/// fields by using types in the [`byteorder`] module, adding explicit struct
2527/// fields where those padding bytes would be, or using `#[repr(packed)]`. See
2528/// the Rust Reference's page on [type layout] for more information
2529/// about type layout and padding.
2530///
2531/// [type layout]: https://doc.rust-lang.org/reference/type-layout.html
2532///
2533/// # Analysis
2534///
2535/// *This section describes, roughly, the analysis performed by this derive to
2536/// determine whether it is sound to implement `AsBytes` for a given type.
2537/// Unless you are modifying the implementation of this derive, or attempting to
2538/// manually implement `AsBytes` for a type yourself, you don't need to read
2539/// this section.*
2540///
2541/// If a type has the following properties, then this derive can implement
2542/// `AsBytes` for that type:
2543///
2544/// - If the type is a struct:
2545/// - It must have a defined representation (`repr(C)`, `repr(transparent)`,
2546/// or `repr(packed)`).
2547/// - All of its fields must be `AsBytes`.
2548/// - Its layout must have no padding. This is always true for
2549/// `repr(transparent)` and `repr(packed)`. For `repr(C)`, see the layout
2550/// algorithm described in the [Rust Reference].
2551/// - If the type is an enum:
2552/// - It must be a C-like enum (meaning that all variants have no fields).
2553/// - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`,
2554/// `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`).
2555/// - The type must not contain any [`UnsafeCell`]s (this is required in order
2556/// for it to be sound to construct a `&[u8]` and a `&T` to the same region of
2557/// memory). The type may contain references or pointers to `UnsafeCell`s so
2558/// long as those values can themselves be initialized from zeroes (`AsBytes`
2559/// is not currently implemented for, e.g., `Option<&UnsafeCell<_>>`, but it
2560/// could be one day).
2561///
2562/// [`UnsafeCell`]: core::cell::UnsafeCell
2563///
2564/// This analysis is subject to change. Unsafe code may *only* rely on the
2565/// documented [safety conditions] of `FromBytes`, and must *not* rely on the
2566/// implementation details of this derive.
2567///
2568/// [Rust Reference]: https://doc.rust-lang.org/reference/type-layout.html
2569#[cfg(any(feature = "derive", test))]
2570#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
2571pub use zerocopy_derive::AsBytes;
2572
2573/// Types that can be viewed as an immutable slice of initialized bytes.
2574///
2575/// Any `AsBytes` type can be viewed as a slice of initialized bytes of the same
2576/// size. This is useful for efficiently serializing structured data as raw
2577/// bytes.
2578///
2579/// # Implementation
2580///
2581/// **Do not implement this trait yourself!** Instead, use
2582/// [`#[derive(AsBytes)]`][derive] (requires the `derive` Cargo feature); e.g.:
2583///
2584/// ```
2585/// # use zerocopy_derive::AsBytes;
2586/// #[derive(AsBytes)]
2587/// #[repr(C)]
2588/// struct MyStruct {
2589/// # /*
2590/// ...
2591/// # */
2592/// }
2593///
2594/// #[derive(AsBytes)]
2595/// #[repr(u8)]
2596/// enum MyEnum {
2597/// # Variant0,
2598/// # /*
2599/// ...
2600/// # */
2601/// }
2602///
2603/// #[derive(AsBytes)]
2604/// #[repr(C)]
2605/// union MyUnion {
2606/// # variant: u8,
2607/// # /*
2608/// ...
2609/// # */
2610/// }
2611/// ```
2612///
2613/// This derive performs a sophisticated, compile-time safety analysis to
2614/// determine whether a type is `AsBytes`. See the [derive
2615/// documentation][derive] for guidance on how to interpret error messages
2616/// produced by the derive's analysis.
2617///
2618/// # Safety
2619///
2620/// *This section describes what is required in order for `T: AsBytes`, and
2621/// what unsafe code may assume of such types. If you don't plan on implementing
2622/// `AsBytes` manually, and you don't plan on writing unsafe code that
2623/// operates on `AsBytes` types, then you don't need to read this section.*
2624///
2625/// If `T: AsBytes`, then unsafe code may assume that:
2626/// - It is sound to treat any `t: T` as an immutable `[u8]` of length
2627/// `size_of_val(t)`.
2628/// - Given `t: &T`, it is sound to construct a `b: &[u8]` where `b.len() ==
2629/// size_of_val(t)` at the same address as `t`, and it is sound for both `b`
2630/// and `t` to be live at the same time.
2631///
2632/// If a type is marked as `AsBytes` which violates this contract, it may cause
2633/// undefined behavior.
2634///
2635/// `#[derive(AsBytes)]` only permits [types which satisfy these
2636/// requirements][derive-analysis].
2637///
2638#[cfg_attr(
2639 feature = "derive",
2640 doc = "[derive]: zerocopy_derive::AsBytes",
2641 doc = "[derive-analysis]: zerocopy_derive::AsBytes#analysis"
2642)]
2643#[cfg_attr(
2644 not(feature = "derive"),
2645 doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.AsBytes.html"),
2646 doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.AsBytes.html#analysis"),
2647)]
2648pub unsafe trait AsBytes {
2649 // The `Self: Sized` bound makes it so that this function doesn't prevent
2650 // `AsBytes` from being object safe. Note that other `AsBytes` methods
2651 // prevent object safety, but those provide a benefit in exchange for object
2652 // safety. If at some point we remove those methods, change their type
2653 // signatures, or move them out of this trait so that `AsBytes` is object
2654 // safe again, it's important that this function not prevent object safety.
2655 #[doc(hidden)]
2656 fn only_derive_is_allowed_to_implement_this_trait()
2657 where
2658 Self: Sized;
2659
2660 /// Gets the bytes of this value.
2661 ///
2662 /// `as_bytes` provides access to the bytes of this value as an immutable
2663 /// byte slice.
2664 ///
2665 /// # Examples
2666 ///
2667 /// ```
2668 /// use zerocopy::AsBytes;
2669 /// # use zerocopy_derive::*;
2670 ///
2671 /// #[derive(AsBytes)]
2672 /// #[repr(C)]
2673 /// struct PacketHeader {
2674 /// src_port: [u8; 2],
2675 /// dst_port: [u8; 2],
2676 /// length: [u8; 2],
2677 /// checksum: [u8; 2],
2678 /// }
2679 ///
2680 /// let header = PacketHeader {
2681 /// src_port: [0, 1],
2682 /// dst_port: [2, 3],
2683 /// length: [4, 5],
2684 /// checksum: [6, 7],
2685 /// };
2686 ///
2687 /// let bytes = header.as_bytes();
2688 ///
2689 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
2690 /// ```
2691 #[inline(always)]
2692 fn as_bytes(&self) -> &[u8] {
2693 // Note that this method does not have a `Self: Sized` bound;
2694 // `size_of_val` works for unsized values too.
2695 let len = mem::size_of_val(self);
2696 let slf: *const Self = self;
2697
2698 // SAFETY:
2699 // - `slf.cast::<u8>()` is valid for reads for `len *
2700 // mem::size_of::<u8>()` many bytes because...
2701 // - `slf` is the same pointer as `self`, and `self` is a reference
2702 // which points to an object whose size is `len`. Thus...
2703 // - The entire region of `len` bytes starting at `slf` is contained
2704 // within a single allocation.
2705 // - `slf` is non-null.
2706 // - `slf` is trivially aligned to `align_of::<u8>() == 1`.
2707 // - `Self: AsBytes` ensures that all of the bytes of `slf` are
2708 // initialized.
2709 // - Since `slf` is derived from `self`, and `self` is an immutable
2710 // reference, the only other references to this memory region that
2711 // could exist are other immutable references, and those don't allow
2712 // mutation. `AsBytes` prohibits types which contain `UnsafeCell`s,
2713 // which are the only types for which this rule wouldn't be sufficient.
2714 // - The total size of the resulting slice is no larger than
2715 // `isize::MAX` because no allocation produced by safe code can be
2716 // larger than `isize::MAX`.
2717 //
2718 // TODO(#429): Add references to docs and quotes.
2719 unsafe { slice::from_raw_parts(slf.cast::<u8>(), len) }
2720 }
2721
2722 /// Gets the bytes of this value mutably.
2723 ///
2724 /// `as_bytes_mut` provides access to the bytes of this value as a mutable
2725 /// byte slice.
2726 ///
2727 /// # Examples
2728 ///
2729 /// ```
2730 /// use zerocopy::AsBytes;
2731 /// # use zerocopy_derive::*;
2732 ///
2733 /// # #[derive(Eq, PartialEq, Debug)]
2734 /// #[derive(AsBytes, FromZeroes, FromBytes)]
2735 /// #[repr(C)]
2736 /// struct PacketHeader {
2737 /// src_port: [u8; 2],
2738 /// dst_port: [u8; 2],
2739 /// length: [u8; 2],
2740 /// checksum: [u8; 2],
2741 /// }
2742 ///
2743 /// let mut header = PacketHeader {
2744 /// src_port: [0, 1],
2745 /// dst_port: [2, 3],
2746 /// length: [4, 5],
2747 /// checksum: [6, 7],
2748 /// };
2749 ///
2750 /// let bytes = header.as_bytes_mut();
2751 ///
2752 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
2753 ///
2754 /// bytes.reverse();
2755 ///
2756 /// assert_eq!(header, PacketHeader {
2757 /// src_port: [7, 6],
2758 /// dst_port: [5, 4],
2759 /// length: [3, 2],
2760 /// checksum: [1, 0],
2761 /// });
2762 /// ```
2763 #[inline(always)]
2764 fn as_bytes_mut(&mut self) -> &mut [u8]
2765 where
2766 Self: FromBytes,
2767 {
2768 // Note that this method does not have a `Self: Sized` bound;
2769 // `size_of_val` works for unsized values too.
2770 let len = mem::size_of_val(self);
2771 let slf: *mut Self = self;
2772
2773 // SAFETY:
2774 // - `slf.cast::<u8>()` is valid for reads and writes for `len *
2775 // mem::size_of::<u8>()` many bytes because...
2776 // - `slf` is the same pointer as `self`, and `self` is a reference
2777 // which points to an object whose size is `len`. Thus...
2778 // - The entire region of `len` bytes starting at `slf` is contained
2779 // within a single allocation.
2780 // - `slf` is non-null.
2781 // - `slf` is trivially aligned to `align_of::<u8>() == 1`.
2782 // - `Self: AsBytes` ensures that all of the bytes of `slf` are
2783 // initialized.
2784 // - `Self: FromBytes` ensures that no write to this memory region
2785 // could result in it containing an invalid `Self`.
2786 // - Since `slf` is derived from `self`, and `self` is a mutable
2787 // reference, no other references to this memory region can exist.
2788 // - The total size of the resulting slice is no larger than
2789 // `isize::MAX` because no allocation produced by safe code can be
2790 // larger than `isize::MAX`.
2791 //
2792 // TODO(#429): Add references to docs and quotes.
2793 unsafe { slice::from_raw_parts_mut(slf.cast::<u8>(), len) }
2794 }
2795
2796 /// Writes a copy of `self` to `bytes`.
2797 ///
2798 /// If `bytes.len() != size_of_val(self)`, `write_to` returns `None`.
2799 ///
2800 /// # Examples
2801 ///
2802 /// ```
2803 /// use zerocopy::AsBytes;
2804 /// # use zerocopy_derive::*;
2805 ///
2806 /// #[derive(AsBytes)]
2807 /// #[repr(C)]
2808 /// struct PacketHeader {
2809 /// src_port: [u8; 2],
2810 /// dst_port: [u8; 2],
2811 /// length: [u8; 2],
2812 /// checksum: [u8; 2],
2813 /// }
2814 ///
2815 /// let header = PacketHeader {
2816 /// src_port: [0, 1],
2817 /// dst_port: [2, 3],
2818 /// length: [4, 5],
2819 /// checksum: [6, 7],
2820 /// };
2821 ///
2822 /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0];
2823 ///
2824 /// header.write_to(&mut bytes[..]);
2825 ///
2826 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
2827 /// ```
2828 ///
2829 /// If too many or too few target bytes are provided, `write_to` returns
2830 /// `None` and leaves the target bytes unmodified:
2831 ///
2832 /// ```
2833 /// # use zerocopy::AsBytes;
2834 /// # let header = u128::MAX;
2835 /// let mut excessive_bytes = &mut [0u8; 128][..];
2836 ///
2837 /// let write_result = header.write_to(excessive_bytes);
2838 ///
2839 /// assert!(write_result.is_none());
2840 /// assert_eq!(excessive_bytes, [0u8; 128]);
2841 /// ```
2842 #[inline]
2843 fn write_to(&self, bytes: &mut [u8]) -> Option<()> {
2844 if bytes.len() != mem::size_of_val(self) {
2845 return None;
2846 }
2847
2848 bytes.copy_from_slice(self.as_bytes());
2849 Some(())
2850 }
2851
2852 /// Writes a copy of `self` to the prefix of `bytes`.
2853 ///
2854 /// `write_to_prefix` writes `self` to the first `size_of_val(self)` bytes
2855 /// of `bytes`. If `bytes.len() < size_of_val(self)`, it returns `None`.
2856 ///
2857 /// # Examples
2858 ///
2859 /// ```
2860 /// use zerocopy::AsBytes;
2861 /// # use zerocopy_derive::*;
2862 ///
2863 /// #[derive(AsBytes)]
2864 /// #[repr(C)]
2865 /// struct PacketHeader {
2866 /// src_port: [u8; 2],
2867 /// dst_port: [u8; 2],
2868 /// length: [u8; 2],
2869 /// checksum: [u8; 2],
2870 /// }
2871 ///
2872 /// let header = PacketHeader {
2873 /// src_port: [0, 1],
2874 /// dst_port: [2, 3],
2875 /// length: [4, 5],
2876 /// checksum: [6, 7],
2877 /// };
2878 ///
2879 /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
2880 ///
2881 /// header.write_to_prefix(&mut bytes[..]);
2882 ///
2883 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7, 0, 0]);
2884 /// ```
2885 ///
2886 /// If insufficient target bytes are provided, `write_to_prefix` returns
2887 /// `None` and leaves the target bytes unmodified:
2888 ///
2889 /// ```
2890 /// # use zerocopy::AsBytes;
2891 /// # let header = u128::MAX;
2892 /// let mut insufficent_bytes = &mut [0, 0][..];
2893 ///
2894 /// let write_result = header.write_to_suffix(insufficent_bytes);
2895 ///
2896 /// assert!(write_result.is_none());
2897 /// assert_eq!(insufficent_bytes, [0, 0]);
2898 /// ```
2899 #[inline]
2900 fn write_to_prefix(&self, bytes: &mut [u8]) -> Option<()> {
2901 let size = mem::size_of_val(self);
2902 bytes.get_mut(..size)?.copy_from_slice(self.as_bytes());
2903 Some(())
2904 }
2905
2906 /// Writes a copy of `self` to the suffix of `bytes`.
2907 ///
2908 /// `write_to_suffix` writes `self` to the last `size_of_val(self)` bytes of
2909 /// `bytes`. If `bytes.len() < size_of_val(self)`, it returns `None`.
2910 ///
2911 /// # Examples
2912 ///
2913 /// ```
2914 /// use zerocopy::AsBytes;
2915 /// # use zerocopy_derive::*;
2916 ///
2917 /// #[derive(AsBytes)]
2918 /// #[repr(C)]
2919 /// struct PacketHeader {
2920 /// src_port: [u8; 2],
2921 /// dst_port: [u8; 2],
2922 /// length: [u8; 2],
2923 /// checksum: [u8; 2],
2924 /// }
2925 ///
2926 /// let header = PacketHeader {
2927 /// src_port: [0, 1],
2928 /// dst_port: [2, 3],
2929 /// length: [4, 5],
2930 /// checksum: [6, 7],
2931 /// };
2932 ///
2933 /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
2934 ///
2935 /// header.write_to_suffix(&mut bytes[..]);
2936 ///
2937 /// assert_eq!(bytes, [0, 0, 0, 1, 2, 3, 4, 5, 6, 7]);
2938 ///
2939 /// let mut insufficent_bytes = &mut [0, 0][..];
2940 ///
2941 /// let write_result = header.write_to_suffix(insufficent_bytes);
2942 ///
2943 /// assert!(write_result.is_none());
2944 /// assert_eq!(insufficent_bytes, [0, 0]);
2945 /// ```
2946 ///
2947 /// If insufficient target bytes are provided, `write_to_suffix` returns
2948 /// `None` and leaves the target bytes unmodified:
2949 ///
2950 /// ```
2951 /// # use zerocopy::AsBytes;
2952 /// # let header = u128::MAX;
2953 /// let mut insufficent_bytes = &mut [0, 0][..];
2954 ///
2955 /// let write_result = header.write_to_suffix(insufficent_bytes);
2956 ///
2957 /// assert!(write_result.is_none());
2958 /// assert_eq!(insufficent_bytes, [0, 0]);
2959 /// ```
2960 #[inline]
2961 fn write_to_suffix(&self, bytes: &mut [u8]) -> Option<()> {
2962 let start = bytes.len().checked_sub(mem::size_of_val(self))?;
2963 bytes
2964 .get_mut(start..)
2965 .expect("`start` should be in-bounds of `bytes`")
2966 .copy_from_slice(self.as_bytes());
2967 Some(())
2968 }
2969}
2970
2971/// Types with no alignment requirement.
2972///
2973/// WARNING: Do not implement this trait yourself! Instead, use
2974/// `#[derive(Unaligned)]` (requires the `derive` Cargo feature).
2975///
2976/// If `T: Unaligned`, then `align_of::<T>() == 1`.
2977///
2978/// # Safety
2979///
2980/// *This section describes what is required in order for `T: Unaligned`, and
2981/// what unsafe code may assume of such types. `#[derive(Unaligned)]` only
2982/// permits types which satisfy these requirements. If you don't plan on
2983/// implementing `Unaligned` manually, and you don't plan on writing unsafe code
2984/// that operates on `Unaligned` types, then you don't need to read this
2985/// section.*
2986///
2987/// If `T: Unaligned`, then unsafe code may assume that it is sound to produce a
2988/// reference to `T` at any memory location regardless of alignment. If a type
2989/// is marked as `Unaligned` which violates this contract, it may cause
2990/// undefined behavior.
2991pub unsafe trait Unaligned {
2992 // The `Self: Sized` bound makes it so that `Unaligned` is still object
2993 // safe.
2994 #[doc(hidden)]
2995 fn only_derive_is_allowed_to_implement_this_trait()
2996 where
2997 Self: Sized;
2998}
2999
3000safety_comment! {
3001 /// SAFETY:
3002 /// Per the reference [1], "the unit tuple (`()`) ... is guaranteed as a
3003 /// zero-sized type to have a size of 0 and an alignment of 1."
3004 /// - `TryFromBytes` (with no validator), `FromZeroes`, `FromBytes`: There
3005 /// is only one possible sequence of 0 bytes, and `()` is inhabited.
3006 /// - `AsBytes`: Since `()` has size 0, it contains no padding bytes.
3007 /// - `Unaligned`: `()` has alignment 1.
3008 ///
3009 /// [1] https://doc.rust-lang.org/reference/type-layout.html#tuple-layout
3010 unsafe_impl!((): TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
3011 assert_unaligned!(());
3012}
3013
3014safety_comment! {
3015 /// SAFETY:
3016 /// - `TryFromBytes` (with no validator), `FromZeroes`, `FromBytes`: all bit
3017 /// patterns are valid for numeric types [1]
3018 /// - `AsBytes`: numeric types have no padding bytes [1]
3019 /// - `Unaligned` (`u8` and `i8` only): The reference [2] specifies the size
3020 /// of `u8` and `i8` as 1 byte. We also know that:
3021 /// - Alignment is >= 1 [3]
3022 /// - Size is an integer multiple of alignment [4]
3023 /// - The only value >= 1 for which 1 is an integer multiple is 1
3024 /// Therefore, the only possible alignment for `u8` and `i8` is 1.
3025 ///
3026 /// [1] Per https://doc.rust-lang.org/beta/reference/types/numeric.html#bit-validity:
3027 ///
3028 /// For every numeric type, `T`, the bit validity of `T` is equivalent to
3029 /// the bit validity of `[u8; size_of::<T>()]`. An uninitialized byte is
3030 /// not a valid `u8`.
3031 ///
3032 /// TODO(https://github.com/rust-lang/reference/pull/1392): Once this text
3033 /// is available on the Stable docs, cite those instead.
3034 ///
3035 /// [2] https://doc.rust-lang.org/reference/type-layout.html#primitive-data-layout
3036 ///
3037 /// [3] Per https://doc.rust-lang.org/reference/type-layout.html#size-and-alignment:
3038 ///
3039 /// Alignment is measured in bytes, and must be at least 1.
3040 ///
3041 /// [4] Per https://doc.rust-lang.org/reference/type-layout.html#size-and-alignment:
3042 ///
3043 /// The size of a value is always a multiple of its alignment.
3044 ///
3045 /// TODO(#278): Once we've updated the trait docs to refer to `u8`s rather
3046 /// than bits or bytes, update this comment, especially the reference to
3047 /// [1].
3048 unsafe_impl!(u8: TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
3049 unsafe_impl!(i8: TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
3050 assert_unaligned!(u8, i8);
3051 unsafe_impl!(u16: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3052 unsafe_impl!(i16: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3053 unsafe_impl!(u32: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3054 unsafe_impl!(i32: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3055 unsafe_impl!(u64: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3056 unsafe_impl!(i64: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3057 unsafe_impl!(u128: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3058 unsafe_impl!(i128: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3059 unsafe_impl!(usize: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3060 unsafe_impl!(isize: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3061 unsafe_impl!(f32: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3062 unsafe_impl!(f64: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3063}
3064
3065safety_comment! {
3066 /// SAFETY:
3067 /// - `FromZeroes`: Valid since "[t]he value false has the bit pattern
3068 /// 0x00" [1].
3069 /// - `AsBytes`: Since "the boolean type has a size and alignment of 1 each"
3070 /// and "The value false has the bit pattern 0x00 and the value true has
3071 /// the bit pattern 0x01" [1]. Thus, the only byte of the bool is always
3072 /// initialized.
3073 /// - `Unaligned`: Per the reference [1], "[a]n object with the boolean type
3074 /// has a size and alignment of 1 each."
3075 ///
3076 /// [1] https://doc.rust-lang.org/reference/types/boolean.html
3077 unsafe_impl!(bool: FromZeroes, AsBytes, Unaligned);
3078 assert_unaligned!(bool);
3079 /// SAFETY:
3080 /// - The safety requirements for `unsafe_impl!` with an `is_bit_valid`
3081 /// closure:
3082 /// - Given `t: *mut bool` and `let r = *mut u8`, `r` refers to an object
3083 /// of the same size as that referred to by `t`. This is true because
3084 /// `bool` and `u8` have the same size (1 byte) [1].
3085 /// - Since the closure takes a `&u8` argument, given a `Ptr<'a, bool>`
3086 /// which satisfies the preconditions of
3087 /// `TryFromBytes::<bool>::is_bit_valid`, it must be guaranteed that the
3088 /// memory referenced by that `Ptr` always contains a valid `u8`. Since
3089 /// `bool`'s single byte is always initialized, `is_bit_valid`'s
3090 /// precondition requires that the same is true of its argument. Since
3091 /// `u8`'s only bit validity invariant is that its single byte must be
3092 /// initialized, this memory is guaranteed to contain a valid `u8`.
3093 /// - The alignment of `bool` is equal to the alignment of `u8`. [1] [2]
3094 /// - The impl must only return `true` for its argument if the original
3095 /// `Ptr<bool>` refers to a valid `bool`. We only return true if the
3096 /// `u8` value is 0 or 1, and both of these are valid values for `bool`.
3097 /// [3]
3098 ///
3099 /// [1] Per https://doc.rust-lang.org/reference/type-layout.html#primitive-data-layout:
3100 ///
3101 /// The size of most primitives is given in this table.
3102 ///
3103 /// | Type | `size_of::<Type>() ` |
3104 /// |-----------|----------------------|
3105 /// | `bool` | 1 |
3106 /// | `u8`/`i8` | 1 |
3107 ///
3108 /// [2] Per https://doc.rust-lang.org/reference/type-layout.html#size-and-alignment:
3109 ///
3110 /// The size of a value is always a multiple of its alignment.
3111 ///
3112 /// [3] Per https://doc.rust-lang.org/reference/types/boolean.html:
3113 ///
3114 /// The value false has the bit pattern 0x00 and the value true has the
3115 /// bit pattern 0x01.
3116 unsafe_impl!(bool: TryFromBytes; |byte: &u8| *byte < 2);
3117}
3118safety_comment! {
3119 /// SAFETY:
3120 /// - `FromZeroes`: Per reference [1], "[a] value of type char is a Unicode
3121 /// scalar value (i.e. a code point that is not a surrogate), represented
3122 /// as a 32-bit unsigned word in the 0x0000 to 0xD7FF or 0xE000 to
3123 /// 0x10FFFF range" which contains 0x0000.
3124 /// - `AsBytes`: `char` is per reference [1] "represented as a 32-bit
3125 /// unsigned word" (`u32`) which is `AsBytes`. Note that unlike `u32`, not
3126 /// all bit patterns are valid for `char`.
3127 ///
3128 /// [1] https://doc.rust-lang.org/reference/types/textual.html
3129 unsafe_impl!(char: FromZeroes, AsBytes);
3130 /// SAFETY:
3131 /// - The safety requirements for `unsafe_impl!` with an `is_bit_valid`
3132 /// closure:
3133 /// - Given `t: *mut char` and `let r = *mut u32`, `r` refers to an object
3134 /// of the same size as that referred to by `t`. This is true because
3135 /// `char` and `u32` have the same size [1].
3136 /// - Since the closure takes a `&u32` argument, given a `Ptr<'a, char>`
3137 /// which satisfies the preconditions of
3138 /// `TryFromBytes::<char>::is_bit_valid`, it must be guaranteed that the
3139 /// memory referenced by that `Ptr` always contains a valid `u32`. Since
3140 /// `char`'s bytes are always initialized [2], `is_bit_valid`'s
3141 /// precondition requires that the same is true of its argument. Since
3142 /// `u32`'s only bit validity invariant is that its bytes must be
3143 /// initialized, this memory is guaranteed to contain a valid `u32`.
3144 /// - The alignment of `char` is equal to the alignment of `u32`. [1]
3145 /// - The impl must only return `true` for its argument if the original
3146 /// `Ptr<char>` refers to a valid `char`. `char::from_u32` guarantees
3147 /// that it returns `None` if its input is not a valid `char`. [3]
3148 ///
3149 /// [1] Per https://doc.rust-lang.org/nightly/reference/types/textual.html#layout-and-bit-validity:
3150 ///
3151 /// `char` is guaranteed to have the same size and alignment as `u32` on
3152 /// all platforms.
3153 ///
3154 /// [2] Per https://doc.rust-lang.org/core/primitive.char.html#method.from_u32:
3155 ///
3156 /// Every byte of a `char` is guaranteed to be initialized.
3157 ///
3158 /// [3] Per https://doc.rust-lang.org/core/primitive.char.html#method.from_u32:
3159 ///
3160 /// `from_u32()` will return `None` if the input is not a valid value for
3161 /// a `char`.
3162 unsafe_impl!(char: TryFromBytes; |candidate: &u32| char::from_u32(*candidate).is_some());
3163}
3164safety_comment! {
3165 /// SAFETY:
3166 /// - `FromZeroes`, `AsBytes`, `Unaligned`: Per the reference [1], `str`
3167 /// has the same layout as `[u8]`, and `[u8]` is `FromZeroes`, `AsBytes`,
3168 /// and `Unaligned`.
3169 ///
3170 /// Note that we don't `assert_unaligned!(str)` because `assert_unaligned!`
3171 /// uses `align_of`, which only works for `Sized` types.
3172 ///
3173 /// TODO(#429): Add quotes from documentation.
3174 ///
3175 /// [1] https://doc.rust-lang.org/reference/type-layout.html#str-layout
3176 unsafe_impl!(str: FromZeroes, AsBytes, Unaligned);
3177 /// SAFETY:
3178 /// - The safety requirements for `unsafe_impl!` with an `is_bit_valid`
3179 /// closure:
3180 /// - Given `t: *mut str` and `let r = *mut [u8]`, `r` refers to an object
3181 /// of the same size as that referred to by `t`. This is true because
3182 /// `str` and `[u8]` have the same representation. [1]
3183 /// - Since the closure takes a `&[u8]` argument, given a `Ptr<'a, str>`
3184 /// which satisfies the preconditions of
3185 /// `TryFromBytes::<str>::is_bit_valid`, it must be guaranteed that the
3186 /// memory referenced by that `Ptr` always contains a valid `[u8]`.
3187 /// Since `str`'s bytes are always initialized [1], `is_bit_valid`'s
3188 /// precondition requires that the same is true of its argument. Since
3189 /// `[u8]`'s only bit validity invariant is that its bytes must be
3190 /// initialized, this memory is guaranteed to contain a valid `[u8]`.
3191 /// - The alignment of `str` is equal to the alignment of `[u8]`. [1]
3192 /// - The impl must only return `true` for its argument if the original
3193 /// `Ptr<str>` refers to a valid `str`. `str::from_utf8` guarantees that
3194 /// it returns `Err` if its input is not a valid `str`. [2]
3195 ///
3196 /// [1] Per https://doc.rust-lang.org/reference/types/textual.html:
3197 ///
3198 /// A value of type `str` is represented the same was as `[u8]`.
3199 ///
3200 /// [2] Per https://doc.rust-lang.org/core/str/fn.from_utf8.html#errors:
3201 ///
3202 /// Returns `Err` if the slice is not UTF-8.
3203 unsafe_impl!(str: TryFromBytes; |candidate: &[u8]| core::str::from_utf8(candidate).is_ok());
3204}
3205
3206safety_comment! {
3207 // `NonZeroXxx` is `AsBytes`, but not `FromZeroes` or `FromBytes`.
3208 //
3209 /// SAFETY:
3210 /// - `AsBytes`: `NonZeroXxx` has the same layout as its associated
3211 /// primitive. Since it is the same size, this guarantees it has no
3212 /// padding - integers have no padding, and there's no room for padding
3213 /// if it can represent all of the same values except 0.
3214 /// - `Unaligned`: `NonZeroU8` and `NonZeroI8` document that
3215 /// `Option<NonZeroU8>` and `Option<NonZeroI8>` both have size 1. [1] [2]
3216 /// This is worded in a way that makes it unclear whether it's meant as a
3217 /// guarantee, but given the purpose of those types, it's virtually
3218 /// unthinkable that that would ever change. `Option` cannot be smaller
3219 /// than its contained type, which implies that, and `NonZeroX8` are of
3220 /// size 1 or 0. `NonZeroX8` can represent multiple states, so they cannot
3221 /// be 0 bytes, which means that they must be 1 byte. The only valid
3222 /// alignment for a 1-byte type is 1.
3223 ///
3224 /// TODO(#429): Add quotes from documentation.
3225 ///
3226 /// [1] https://doc.rust-lang.org/stable/std/num/struct.NonZeroU8.html
3227 /// [2] https://doc.rust-lang.org/stable/std/num/struct.NonZeroI8.html
3228 /// TODO(https://github.com/rust-lang/rust/pull/104082): Cite documentation
3229 /// that layout is the same as primitive layout.
3230 unsafe_impl!(NonZeroU8: AsBytes, Unaligned);
3231 unsafe_impl!(NonZeroI8: AsBytes, Unaligned);
3232 assert_unaligned!(NonZeroU8, NonZeroI8);
3233 unsafe_impl!(NonZeroU16: AsBytes);
3234 unsafe_impl!(NonZeroI16: AsBytes);
3235 unsafe_impl!(NonZeroU32: AsBytes);
3236 unsafe_impl!(NonZeroI32: AsBytes);
3237 unsafe_impl!(NonZeroU64: AsBytes);
3238 unsafe_impl!(NonZeroI64: AsBytes);
3239 unsafe_impl!(NonZeroU128: AsBytes);
3240 unsafe_impl!(NonZeroI128: AsBytes);
3241 unsafe_impl!(NonZeroUsize: AsBytes);
3242 unsafe_impl!(NonZeroIsize: AsBytes);
3243 /// SAFETY:
3244 /// - The safety requirements for `unsafe_impl!` with an `is_bit_valid`
3245 /// closure:
3246 /// - Given `t: *mut NonZeroXxx` and `let r = *mut xxx`, `r` refers to an
3247 /// object of the same size as that referred to by `t`. This is true
3248 /// because `NonZeroXxx` and `xxx` have the same size. [1]
3249 /// - Since the closure takes a `&xxx` argument, given a `Ptr<'a,
3250 /// NonZeroXxx>` which satisfies the preconditions of
3251 /// `TryFromBytes::<NonZeroXxx>::is_bit_valid`, it must be guaranteed
3252 /// that the memory referenced by that `Ptr` always contains a valid
3253 /// `xxx`. Since `NonZeroXxx`'s bytes are always initialized [1],
3254 /// `is_bit_valid`'s precondition requires that the same is true of its
3255 /// argument. Since `xxx`'s only bit validity invariant is that its
3256 /// bytes must be initialized, this memory is guaranteed to contain a
3257 /// valid `xxx`.
3258 /// - The alignment of `NonZeroXxx` is equal to the alignment of `xxx`.
3259 /// [1]
3260 /// - The impl must only return `true` for its argument if the original
3261 /// `Ptr<NonZeroXxx>` refers to a valid `NonZeroXxx`. The only `xxx`
3262 /// which is not also a valid `NonZeroXxx` is 0. [1]
3263 ///
3264 /// [1] Per https://doc.rust-lang.org/core/num/struct.NonZeroU16.html:
3265 ///
3266 /// `NonZeroU16` is guaranteed to have the same layout and bit validity as
3267 /// `u16` with the exception that `0` is not a valid instance.
3268 unsafe_impl!(NonZeroU8: TryFromBytes; |n: &u8| *n != 0);
3269 unsafe_impl!(NonZeroI8: TryFromBytes; |n: &i8| *n != 0);
3270 unsafe_impl!(NonZeroU16: TryFromBytes; |n: &u16| *n != 0);
3271 unsafe_impl!(NonZeroI16: TryFromBytes; |n: &i16| *n != 0);
3272 unsafe_impl!(NonZeroU32: TryFromBytes; |n: &u32| *n != 0);
3273 unsafe_impl!(NonZeroI32: TryFromBytes; |n: &i32| *n != 0);
3274 unsafe_impl!(NonZeroU64: TryFromBytes; |n: &u64| *n != 0);
3275 unsafe_impl!(NonZeroI64: TryFromBytes; |n: &i64| *n != 0);
3276 unsafe_impl!(NonZeroU128: TryFromBytes; |n: &u128| *n != 0);
3277 unsafe_impl!(NonZeroI128: TryFromBytes; |n: &i128| *n != 0);
3278 unsafe_impl!(NonZeroUsize: TryFromBytes; |n: &usize| *n != 0);
3279 unsafe_impl!(NonZeroIsize: TryFromBytes; |n: &isize| *n != 0);
3280}
3281safety_comment! {
3282 /// SAFETY:
3283 /// - `TryFromBytes` (with no validator), `FromZeroes`, `FromBytes`,
3284 /// `AsBytes`: The Rust compiler reuses `0` value to represent `None`, so
3285 /// `size_of::<Option<NonZeroXxx>>() == size_of::<xxx>()`; see
3286 /// `NonZeroXxx` documentation.
3287 /// - `Unaligned`: `NonZeroU8` and `NonZeroI8` document that
3288 /// `Option<NonZeroU8>` and `Option<NonZeroI8>` both have size 1. [1] [2]
3289 /// This is worded in a way that makes it unclear whether it's meant as a
3290 /// guarantee, but given the purpose of those types, it's virtually
3291 /// unthinkable that that would ever change. The only valid alignment for
3292 /// a 1-byte type is 1.
3293 ///
3294 /// TODO(#429): Add quotes from documentation.
3295 ///
3296 /// [1] https://doc.rust-lang.org/stable/std/num/struct.NonZeroU8.html
3297 /// [2] https://doc.rust-lang.org/stable/std/num/struct.NonZeroI8.html
3298 ///
3299 /// TODO(https://github.com/rust-lang/rust/pull/104082): Cite documentation
3300 /// for layout guarantees.
3301 unsafe_impl!(Option<NonZeroU8>: TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
3302 unsafe_impl!(Option<NonZeroI8>: TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
3303 assert_unaligned!(Option<NonZeroU8>, Option<NonZeroI8>);
3304 unsafe_impl!(Option<NonZeroU16>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3305 unsafe_impl!(Option<NonZeroI16>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3306 unsafe_impl!(Option<NonZeroU32>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3307 unsafe_impl!(Option<NonZeroI32>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3308 unsafe_impl!(Option<NonZeroU64>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3309 unsafe_impl!(Option<NonZeroI64>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3310 unsafe_impl!(Option<NonZeroU128>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3311 unsafe_impl!(Option<NonZeroI128>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3312 unsafe_impl!(Option<NonZeroUsize>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3313 unsafe_impl!(Option<NonZeroIsize>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3314}
3315
3316safety_comment! {
3317 /// SAFETY:
3318 /// The following types can be transmuted from `[0u8; size_of::<T>()]`. [1]
3319 /// None of them contain `UnsafeCell`s, and so they all soundly implement
3320 /// `FromZeroes`.
3321 ///
3322 /// [1] Per
3323 /// https://doc.rust-lang.org/nightly/core/option/index.html#representation:
3324 ///
3325 /// Rust guarantees to optimize the following types `T` such that
3326 /// [`Option<T>`] has the same size and alignment as `T`. In some of these
3327 /// cases, Rust further guarantees that `transmute::<_, Option<T>>([0u8;
3328 /// size_of::<T>()])` is sound and produces `Option::<T>::None`. These
3329 /// cases are identified by the second column:
3330 ///
3331 /// | `T` | `transmute::<_, Option<T>>([0u8; size_of::<T>()])` sound? |
3332 /// |-----------------------|-----------------------------------------------------------|
3333 /// | [`Box<U>`] | when `U: Sized` |
3334 /// | `&U` | when `U: Sized` |
3335 /// | `&mut U` | when `U: Sized` |
3336 /// | [`ptr::NonNull<U>`] | when `U: Sized` |
3337 /// | `fn`, `extern "C" fn` | always |
3338 ///
3339 /// TODO(#429), TODO(https://github.com/rust-lang/rust/pull/115333): Cite
3340 /// the Stable docs once they're available.
3341 #[cfg(feature = "alloc")]
3342 unsafe_impl!(
3343 #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3344 T => FromZeroes for Option<Box<T>>
3345 );
3346 unsafe_impl!(T => FromZeroes for Option<&'_ T>);
3347 unsafe_impl!(T => FromZeroes for Option<&'_ mut T>);
3348 unsafe_impl!(T => FromZeroes for Option<NonNull<T>>);
3349 unsafe_impl_for_power_set!(A, B, C, D, E, F, G, H, I, J, K, L -> M => FromZeroes for opt_fn!(...));
3350 unsafe_impl_for_power_set!(A, B, C, D, E, F, G, H, I, J, K, L -> M => FromZeroes for opt_extern_c_fn!(...));
3351}
3352
3353safety_comment! {
3354 /// SAFETY:
3355 /// Per reference [1]:
3356 /// "For all T, the following are guaranteed:
3357 /// size_of::<PhantomData<T>>() == 0
3358 /// align_of::<PhantomData<T>>() == 1".
3359 /// This gives:
3360 /// - `TryFromBytes` (with no validator), `FromZeroes`, `FromBytes`: There
3361 /// is only one possible sequence of 0 bytes, and `PhantomData` is
3362 /// inhabited.
3363 /// - `AsBytes`: Since `PhantomData` has size 0, it contains no padding
3364 /// bytes.
3365 /// - `Unaligned`: Per the preceding reference, `PhantomData` has alignment
3366 /// 1.
3367 ///
3368 /// [1] https://doc.rust-lang.org/std/marker/struct.PhantomData.html#layout-1
3369 unsafe_impl!(T: ?Sized => TryFromBytes for PhantomData<T>);
3370 unsafe_impl!(T: ?Sized => FromZeroes for PhantomData<T>);
3371 unsafe_impl!(T: ?Sized => FromBytes for PhantomData<T>);
3372 unsafe_impl!(T: ?Sized => AsBytes for PhantomData<T>);
3373 unsafe_impl!(T: ?Sized => Unaligned for PhantomData<T>);
3374 assert_unaligned!(PhantomData<()>, PhantomData<u8>, PhantomData<u64>);
3375}
3376safety_comment! {
3377 /// SAFETY:
3378 /// `Wrapping<T>` is guaranteed by its docs [1] to have the same layout and
3379 /// bit validity as `T`. Also, `Wrapping<T>` is `#[repr(transparent)]`, and
3380 /// has a single field, which is `pub`. Per the reference [2], this means
3381 /// that the `#[repr(transparent)]` attribute is "considered part of the
3382 /// public ABI".
3383 ///
3384 /// - `TryFromBytes`: The safety requirements for `unsafe_impl!` with an
3385 /// `is_bit_valid` closure:
3386 /// - Given `t: *mut Wrapping<T>` and `let r = *mut T`, `r` refers to an
3387 /// object of the same size as that referred to by `t`. This is true
3388 /// because `Wrapping<T>` and `T` have the same layout
3389 /// - The alignment of `Wrapping<T>` is equal to the alignment of `T`.
3390 /// - The impl must only return `true` for its argument if the original
3391 /// `Ptr<Wrapping<T>>` refers to a valid `Wrapping<T>`. Since
3392 /// `Wrapping<T>` has the same bit validity as `T`, and since our impl
3393 /// just calls `T::is_bit_valid`, our impl returns `true` exactly when
3394 /// its argument contains a valid `Wrapping<T>`.
3395 /// - `FromBytes`: Since `Wrapping<T>` has the same bit validity as `T`, if
3396 /// `T: FromBytes`, then all initialized byte sequences are valid
3397 /// instances of `Wrapping<T>`. Similarly, if `T: FromBytes`, then
3398 /// `Wrapping<T>` doesn't contain any `UnsafeCell`s. Thus, `impl FromBytes
3399 /// for Wrapping<T> where T: FromBytes` is a sound impl.
3400 /// - `AsBytes`: Since `Wrapping<T>` has the same bit validity as `T`, if
3401 /// `T: AsBytes`, then all valid instances of `Wrapping<T>` have all of
3402 /// their bytes initialized. Similarly, if `T: AsBytes`, then
3403 /// `Wrapping<T>` doesn't contain any `UnsafeCell`s. Thus, `impl AsBytes
3404 /// for Wrapping<T> where T: AsBytes` is a valid impl.
3405 /// - `Unaligned`: Since `Wrapping<T>` has the same layout as `T`,
3406 /// `Wrapping<T>` has alignment 1 exactly when `T` does.
3407 ///
3408 /// [1] Per https://doc.rust-lang.org/core/num/struct.NonZeroU16.html:
3409 ///
3410 /// `NonZeroU16` is guaranteed to have the same layout and bit validity as
3411 /// `u16` with the exception that `0` is not a valid instance.
3412 ///
3413 /// TODO(#429): Add quotes from documentation.
3414 ///
3415 /// [1] TODO(https://doc.rust-lang.org/nightly/core/num/struct.Wrapping.html#layout-1):
3416 /// Reference this documentation once it's available on stable.
3417 ///
3418 /// [2] https://doc.rust-lang.org/nomicon/other-reprs.html#reprtransparent
3419 unsafe_impl!(T: TryFromBytes => TryFromBytes for Wrapping<T>; |candidate: Ptr<T>| {
3420 // SAFETY:
3421 // - Since `T` and `Wrapping<T>` have the same layout and bit validity
3422 // and contain the same fields, `T` contains `UnsafeCell`s exactly
3423 // where `Wrapping<T>` does. Thus, all memory and `UnsafeCell`
3424 // preconditions of `T::is_bit_valid` hold exactly when the same
3425 // preconditions for `Wrapping<T>::is_bit_valid` hold.
3426 // - By the same token, since `candidate` is guaranteed to have its
3427 // bytes initialized where there are always initialized bytes in
3428 // `Wrapping<T>`, the same is true for `T`.
3429 unsafe { T::is_bit_valid(candidate) }
3430 });
3431 unsafe_impl!(T: FromZeroes => FromZeroes for Wrapping<T>);
3432 unsafe_impl!(T: FromBytes => FromBytes for Wrapping<T>);
3433 unsafe_impl!(T: AsBytes => AsBytes for Wrapping<T>);
3434 unsafe_impl!(T: Unaligned => Unaligned for Wrapping<T>);
3435 assert_unaligned!(Wrapping<()>, Wrapping<u8>);
3436}
3437safety_comment! {
3438 // `MaybeUninit<T>` is `FromZeroes` and `FromBytes`, but never `AsBytes`
3439 // since it may contain uninitialized bytes.
3440 //
3441 /// SAFETY:
3442 /// - `TryFromBytes` (with no validator), `FromZeroes`, `FromBytes`:
3443 /// `MaybeUninit<T>` has no restrictions on its contents. Unfortunately,
3444 /// in addition to bit validity, `TryFromBytes`, `FromZeroes` and
3445 /// `FromBytes` also require that implementers contain no `UnsafeCell`s.
3446 /// Thus, we require `T: Trait` in order to ensure that `T` - and thus
3447 /// `MaybeUninit<T>` - contains to `UnsafeCell`s. Thus, requiring that `T`
3448 /// implement each of these traits is sufficient.
3449 /// - `Unaligned`: "MaybeUninit<T> is guaranteed to have the same size,
3450 /// alignment, and ABI as T" [1]
3451 ///
3452 /// [1] https://doc.rust-lang.org/stable/core/mem/union.MaybeUninit.html#layout-1
3453 ///
3454 /// TODO(https://github.com/google/zerocopy/issues/251): If we split
3455 /// `FromBytes` and `RefFromBytes`, or if we introduce a separate
3456 /// `NoCell`/`Freeze` trait, we can relax the trait bounds for `FromZeroes`
3457 /// and `FromBytes`.
3458 unsafe_impl!(T: TryFromBytes => TryFromBytes for MaybeUninit<T>);
3459 unsafe_impl!(T: FromZeroes => FromZeroes for MaybeUninit<T>);
3460 unsafe_impl!(T: FromBytes => FromBytes for MaybeUninit<T>);
3461 unsafe_impl!(T: Unaligned => Unaligned for MaybeUninit<T>);
3462 assert_unaligned!(MaybeUninit<()>, MaybeUninit<u8>);
3463}
3464safety_comment! {
3465 /// SAFETY:
3466 /// `ManuallyDrop` has the same layout and bit validity as `T` [1], and
3467 /// accessing the inner value is safe (meaning that it's unsound to leave
3468 /// the inner value uninitialized while exposing the `ManuallyDrop` to safe
3469 /// code).
3470 /// - `FromZeroes`, `FromBytes`: Since it has the same layout as `T`, any
3471 /// valid `T` is a valid `ManuallyDrop<T>`. If `T: FromZeroes`, a sequence
3472 /// of zero bytes is a valid `T`, and thus a valid `ManuallyDrop<T>`. If
3473 /// `T: FromBytes`, any sequence of bytes is a valid `T`, and thus a valid
3474 /// `ManuallyDrop<T>`.
3475 /// - `AsBytes`: Since it has the same layout as `T`, and since it's unsound
3476 /// to let safe code access a `ManuallyDrop` whose inner value is
3477 /// uninitialized, safe code can only ever access a `ManuallyDrop` whose
3478 /// contents are a valid `T`. Since `T: AsBytes`, this means that safe
3479 /// code can only ever access a `ManuallyDrop` with all initialized bytes.
3480 /// - `Unaligned`: `ManuallyDrop` has the same layout (and thus alignment)
3481 /// as `T`, and `T: Unaligned` guarantees that that alignment is 1.
3482 ///
3483 /// `ManuallyDrop<T>` is guaranteed to have the same layout and bit
3484 /// validity as `T`
3485 ///
3486 /// [1] Per https://doc.rust-lang.org/nightly/core/mem/struct.ManuallyDrop.html:
3487 ///
3488 /// TODO(#429):
3489 /// - Add quotes from docs.
3490 /// - Once [1] (added in
3491 /// https://github.com/rust-lang/rust/pull/115522) is available on stable,
3492 /// quote the stable docs instead of the nightly docs.
3493 unsafe_impl!(T: ?Sized + FromZeroes => FromZeroes for ManuallyDrop<T>);
3494 unsafe_impl!(T: ?Sized + FromBytes => FromBytes for ManuallyDrop<T>);
3495 unsafe_impl!(T: ?Sized + AsBytes => AsBytes for ManuallyDrop<T>);
3496 unsafe_impl!(T: ?Sized + Unaligned => Unaligned for ManuallyDrop<T>);
3497 assert_unaligned!(ManuallyDrop<()>, ManuallyDrop<u8>);
3498}
3499safety_comment! {
3500 /// SAFETY:
3501 /// Per the reference [1]:
3502 ///
3503 /// An array of `[T; N]` has a size of `size_of::<T>() * N` and the same
3504 /// alignment of `T`. Arrays are laid out so that the zero-based `nth`
3505 /// element of the array is offset from the start of the array by `n *
3506 /// size_of::<T>()` bytes.
3507 ///
3508 /// ...
3509 ///
3510 /// Slices have the same layout as the section of the array they slice.
3511 ///
3512 /// In other words, the layout of a `[T]` or `[T; N]` is a sequence of `T`s
3513 /// laid out back-to-back with no bytes in between. Therefore, `[T]` or `[T;
3514 /// N]` are `FromZeroes`, `FromBytes`, and `AsBytes` if `T` is
3515 /// (respectively). Furthermore, since an array/slice has "the same
3516 /// alignment of `T`", `[T]` and `[T; N]` are `Unaligned` if `T` is.
3517 ///
3518 /// Note that we don't `assert_unaligned!` for slice types because
3519 /// `assert_unaligned!` uses `align_of`, which only works for `Sized` types.
3520 ///
3521 /// [1] https://doc.rust-lang.org/reference/type-layout.html#array-layout
3522 unsafe_impl!(const N: usize, T: FromZeroes => FromZeroes for [T; N]);
3523 unsafe_impl!(const N: usize, T: FromBytes => FromBytes for [T; N]);
3524 unsafe_impl!(const N: usize, T: AsBytes => AsBytes for [T; N]);
3525 unsafe_impl!(const N: usize, T: Unaligned => Unaligned for [T; N]);
3526 assert_unaligned!([(); 0], [(); 1], [u8; 0], [u8; 1]);
3527 unsafe_impl!(T: FromZeroes => FromZeroes for [T]);
3528 unsafe_impl!(T: FromBytes => FromBytes for [T]);
3529 unsafe_impl!(T: AsBytes => AsBytes for [T]);
3530 unsafe_impl!(T: Unaligned => Unaligned for [T]);
3531}
3532safety_comment! {
3533 /// SAFETY:
3534 /// - `FromZeroes`: For thin pointers (note that `T: Sized`), the zero
3535 /// pointer is considered "null". [1] No operations which require
3536 /// provenance are legal on null pointers, so this is not a footgun.
3537 ///
3538 /// NOTE(#170): Implementing `FromBytes` and `AsBytes` for raw pointers
3539 /// would be sound, but carries provenance footguns. We want to support
3540 /// `FromBytes` and `AsBytes` for raw pointers eventually, but we are
3541 /// holding off until we can figure out how to address those footguns.
3542 ///
3543 /// [1] TODO(https://github.com/rust-lang/rust/pull/116988): Cite the
3544 /// documentation once this PR lands.
3545 unsafe_impl!(T => FromZeroes for *const T);
3546 unsafe_impl!(T => FromZeroes for *mut T);
3547}
3548
3549// SIMD support
3550//
3551// Per the Unsafe Code Guidelines Reference [1]:
3552//
3553// Packed SIMD vector types are `repr(simd)` homogeneous tuple-structs
3554// containing `N` elements of type `T` where `N` is a power-of-two and the
3555// size and alignment requirements of `T` are equal:
3556//
3557// ```rust
3558// #[repr(simd)]
3559// struct Vector<T, N>(T_0, ..., T_(N - 1));
3560// ```
3561//
3562// ...
3563//
3564// The size of `Vector` is `N * size_of::<T>()` and its alignment is an
3565// implementation-defined function of `T` and `N` greater than or equal to
3566// `align_of::<T>()`.
3567//
3568// ...
3569//
3570// Vector elements are laid out in source field order, enabling random access
3571// to vector elements by reinterpreting the vector as an array:
3572//
3573// ```rust
3574// union U {
3575// vec: Vector<T, N>,
3576// arr: [T; N]
3577// }
3578//
3579// assert_eq!(size_of::<Vector<T, N>>(), size_of::<[T; N]>());
3580// assert!(align_of::<Vector<T, N>>() >= align_of::<[T; N]>());
3581//
3582// unsafe {
3583// let u = U { vec: Vector<T, N>(t_0, ..., t_(N - 1)) };
3584//
3585// assert_eq!(u.vec.0, u.arr[0]);
3586// // ...
3587// assert_eq!(u.vec.(N - 1), u.arr[N - 1]);
3588// }
3589// ```
3590//
3591// Given this background, we can observe that:
3592// - The size and bit pattern requirements of a SIMD type are equivalent to the
3593// equivalent array type. Thus, for any SIMD type whose primitive `T` is
3594// `TryFromBytes`, `FromZeroes`, `FromBytes`, or `AsBytes`, that SIMD type is
3595// also `TryFromBytes`, `FromZeroes`, `FromBytes`, or `AsBytes` respectively.
3596// - Since no upper bound is placed on the alignment, no SIMD type can be
3597// guaranteed to be `Unaligned`.
3598//
3599// Also per [1]:
3600//
3601// This chapter represents the consensus from issue #38. The statements in
3602// here are not (yet) "guaranteed" not to change until an RFC ratifies them.
3603//
3604// See issue #38 [2]. While this behavior is not technically guaranteed, the
3605// likelihood that the behavior will change such that SIMD types are no longer
3606// `TryFromBytes`, `FromZeroes`, `FromBytes`, or `AsBytes` is next to zero, as
3607// that would defeat the entire purpose of SIMD types. Nonetheless, we put this
3608// behavior behind the `simd` Cargo feature, which requires consumers to opt
3609// into this stability hazard.
3610//
3611// [1] https://rust-lang.github.io/unsafe-code-guidelines/layout/packed-simd-vectors.html
3612// [2] https://github.com/rust-lang/unsafe-code-guidelines/issues/38
3613#[cfg(feature = "simd")]
3614#[cfg_attr(doc_cfg, doc(cfg(feature = "simd")))]
3615mod simd {
3616 /// Defines a module which implements `TryFromBytes`, `FromZeroes`,
3617 /// `FromBytes`, and `AsBytes` for a set of types from a module in
3618 /// `core::arch`.
3619 ///
3620 /// `$arch` is both the name of the defined module and the name of the
3621 /// module in `core::arch`, and `$typ` is the list of items from that module
3622 /// to implement `FromZeroes`, `FromBytes`, and `AsBytes` for.
3623 #[allow(unused_macros)] // `allow(unused_macros)` is needed because some
3624 // target/feature combinations don't emit any impls
3625 // and thus don't use this macro.
3626 macro_rules! simd_arch_mod {
3627 (#[cfg $cfg:tt] $arch:ident, $mod:ident, $($typ:ident),*) => {
3628 #[cfg $cfg]
3629 #[cfg_attr(doc_cfg, doc(cfg $cfg))]
3630 mod $mod {
3631 use core::arch::$arch::{$($typ),*};
3632
3633 use crate::*;
3634 impl_known_layout!($($typ),*);
3635 safety_comment! {
3636 /// SAFETY:
3637 /// See comment on module definition for justification.
3638 $( unsafe_impl!($typ: TryFromBytes, FromZeroes, FromBytes, AsBytes); )*
3639 }
3640 }
3641 };
3642 }
3643
3644 #[rustfmt::skip]
3645 const _: () = {
3646 simd_arch_mod!(
3647 #[cfg(target_arch = "x86")]
3648 x86, x86, __m128, __m128d, __m128i, __m256, __m256d, __m256i
3649 );
3650 simd_arch_mod!(
3651 #[cfg(all(feature = "simd-nightly", target_arch = "x86"))]
3652 x86, x86_nightly, __m512bh, __m512, __m512d, __m512i
3653 );
3654 simd_arch_mod!(
3655 #[cfg(target_arch = "x86_64")]
3656 x86_64, x86_64, __m128, __m128d, __m128i, __m256, __m256d, __m256i
3657 );
3658 simd_arch_mod!(
3659 #[cfg(all(feature = "simd-nightly", target_arch = "x86_64"))]
3660 x86_64, x86_64_nightly, __m512bh, __m512, __m512d, __m512i
3661 );
3662 simd_arch_mod!(
3663 #[cfg(target_arch = "wasm32")]
3664 wasm32, wasm32, v128
3665 );
3666 simd_arch_mod!(
3667 #[cfg(all(feature = "simd-nightly", target_arch = "powerpc"))]
3668 powerpc, powerpc, vector_bool_long, vector_double, vector_signed_long, vector_unsigned_long
3669 );
3670 simd_arch_mod!(
3671 #[cfg(all(feature = "simd-nightly", target_arch = "powerpc64"))]
3672 powerpc64, powerpc64, vector_bool_long, vector_double, vector_signed_long, vector_unsigned_long
3673 );
3674 simd_arch_mod!(
3675 #[cfg(target_arch = "aarch64")]
3676 aarch64, aarch64, float32x2_t, float32x4_t, float64x1_t, float64x2_t, int8x8_t, int8x8x2_t,
3677 int8x8x3_t, int8x8x4_t, int8x16_t, int8x16x2_t, int8x16x3_t, int8x16x4_t, int16x4_t,
3678 int16x8_t, int32x2_t, int32x4_t, int64x1_t, int64x2_t, poly8x8_t, poly8x8x2_t, poly8x8x3_t,
3679 poly8x8x4_t, poly8x16_t, poly8x16x2_t, poly8x16x3_t, poly8x16x4_t, poly16x4_t, poly16x8_t,
3680 poly64x1_t, poly64x2_t, uint8x8_t, uint8x8x2_t, uint8x8x3_t, uint8x8x4_t, uint8x16_t,
3681 uint8x16x2_t, uint8x16x3_t, uint8x16x4_t, uint16x4_t, uint16x8_t, uint32x2_t, uint32x4_t,
3682 uint64x1_t, uint64x2_t
3683 );
3684 simd_arch_mod!(
3685 #[cfg(all(feature = "simd-nightly", target_arch = "arm"))]
3686 arm, arm, int8x4_t, uint8x4_t
3687 );
3688 };
3689}
3690
3691/// Safely transmutes a value of one type to a value of another type of the same
3692/// size.
3693///
3694/// The expression `$e` must have a concrete type, `T`, which implements
3695/// `AsBytes`. The `transmute!` expression must also have a concrete type, `U`
3696/// (`U` is inferred from the calling context), and `U` must implement
3697/// `FromBytes`.
3698///
3699/// Note that the `T` produced by the expression `$e` will *not* be dropped.
3700/// Semantically, its bits will be copied into a new value of type `U`, the
3701/// original `T` will be forgotten, and the value of type `U` will be returned.
3702///
3703/// # Examples
3704///
3705/// ```
3706/// # use zerocopy::transmute;
3707/// let one_dimensional: [u8; 8] = [0, 1, 2, 3, 4, 5, 6, 7];
3708///
3709/// let two_dimensional: [[u8; 4]; 2] = transmute!(one_dimensional);
3710///
3711/// assert_eq!(two_dimensional, [[0, 1, 2, 3], [4, 5, 6, 7]]);
3712/// ```
3713#[macro_export]
3714macro_rules! transmute {
3715 ($e:expr) => {{
3716 // NOTE: This must be a macro (rather than a function with trait bounds)
3717 // because there's no way, in a generic context, to enforce that two
3718 // types have the same size. `core::mem::transmute` uses compiler magic
3719 // to enforce this so long as the types are concrete.
3720
3721 let e = $e;
3722 if false {
3723 // This branch, though never taken, ensures that the type of `e` is
3724 // `AsBytes` and that the type of this macro invocation expression
3725 // is `FromBytes`.
3726
3727 struct AssertIsAsBytes<T: $crate::AsBytes>(T);
3728 let _ = AssertIsAsBytes(e);
3729
3730 struct AssertIsFromBytes<U: $crate::FromBytes>(U);
3731 #[allow(unused, unreachable_code)]
3732 let u = AssertIsFromBytes(loop {});
3733 u.0
3734 } else {
3735 // SAFETY: `core::mem::transmute` ensures that the type of `e` and
3736 // the type of this macro invocation expression have the same size.
3737 // We know this transmute is safe thanks to the `AsBytes` and
3738 // `FromBytes` bounds enforced by the `false` branch.
3739 //
3740 // We use this reexport of `core::mem::transmute` because we know it
3741 // will always be available for crates which are using the 2015
3742 // edition of Rust. By contrast, if we were to use
3743 // `std::mem::transmute`, this macro would not work for such crates
3744 // in `no_std` contexts, and if we were to use
3745 // `core::mem::transmute`, this macro would not work in `std`
3746 // contexts in which `core` was not manually imported. This is not a
3747 // problem for 2018 edition crates.
3748 unsafe {
3749 // Clippy: It's okay to transmute a type to itself.
3750 #[allow(clippy::useless_transmute)]
3751 $crate::macro_util::core_reexport::mem::transmute(e)
3752 }
3753 }
3754 }}
3755}
3756
3757/// Safely transmutes a mutable or immutable reference of one type to an
3758/// immutable reference of another type of the same size.
3759///
3760/// The expression `$e` must have a concrete type, `&T` or `&mut T`, where `T:
3761/// Sized + AsBytes`. The `transmute_ref!` expression must also have a concrete
3762/// type, `&U` (`U` is inferred from the calling context), where `U: Sized +
3763/// FromBytes`. It must be the case that `align_of::<T>() >= align_of::<U>()`.
3764///
3765/// The lifetime of the input type, `&T` or `&mut T`, must be the same as or
3766/// outlive the lifetime of the output type, `&U`.
3767///
3768/// # Examples
3769///
3770/// ```
3771/// # use zerocopy::transmute_ref;
3772/// let one_dimensional: [u8; 8] = [0, 1, 2, 3, 4, 5, 6, 7];
3773///
3774/// let two_dimensional: &[[u8; 4]; 2] = transmute_ref!(&one_dimensional);
3775///
3776/// assert_eq!(two_dimensional, &[[0, 1, 2, 3], [4, 5, 6, 7]]);
3777/// ```
3778///
3779/// # Alignment increase error message
3780///
3781/// Because of limitations on macros, the error message generated when
3782/// `transmute_ref!` is used to transmute from a type of lower alignment to a
3783/// type of higher alignment is somewhat confusing. For example, the following
3784/// code:
3785///
3786/// ```compile_fail
3787/// const INCREASE_ALIGNMENT: &u16 = zerocopy::transmute_ref!(&[0u8; 2]);
3788/// ```
3789///
3790/// ...generates the following error:
3791///
3792/// ```text
3793/// error[E0512]: cannot transmute between types of different sizes, or dependently-sized types
3794/// --> src/lib.rs:1524:34
3795/// |
3796/// 5 | const INCREASE_ALIGNMENT: &u16 = zerocopy::transmute_ref!(&[0u8; 2]);
3797/// | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
3798/// |
3799/// = note: source type: `AlignOf<[u8; 2]>` (8 bits)
3800/// = note: target type: `MaxAlignsOf<[u8; 2], u16>` (16 bits)
3801/// = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info)
3802/// ```
3803///
3804/// This is saying that `max(align_of::<T>(), align_of::<U>()) !=
3805/// align_of::<T>()`, which is equivalent to `align_of::<T>() <
3806/// align_of::<U>()`.
3807#[macro_export]
3808macro_rules! transmute_ref {
3809 ($e:expr) => {{
3810 // NOTE: This must be a macro (rather than a function with trait bounds)
3811 // because there's no way, in a generic context, to enforce that two
3812 // types have the same size or alignment.
3813
3814 // Ensure that the source type is a reference or a mutable reference
3815 // (note that mutable references are implicitly reborrowed here).
3816 let e: &_ = $e;
3817
3818 #[allow(unused, clippy::diverging_sub_expression)]
3819 if false {
3820 // This branch, though never taken, ensures that the type of `e` is
3821 // `&T` where `T: 't + Sized + AsBytes`, that the type of this macro
3822 // expression is `&U` where `U: 'u + Sized + FromBytes`, and that
3823 // `'t` outlives `'u`.
3824
3825 struct AssertIsAsBytes<'a, T: ::core::marker::Sized + $crate::AsBytes>(&'a T);
3826 let _ = AssertIsAsBytes(e);
3827
3828 struct AssertIsFromBytes<'a, U: ::core::marker::Sized + $crate::FromBytes>(&'a U);
3829 #[allow(unused, unreachable_code)]
3830 let u = AssertIsFromBytes(loop {});
3831 u.0
3832 } else if false {
3833 // This branch, though never taken, ensures that `size_of::<T>() ==
3834 // size_of::<U>()` and that that `align_of::<T>() >=
3835 // align_of::<U>()`.
3836
3837 // `t` is inferred to have type `T` because it's assigned to `e` (of
3838 // type `&T`) as `&t`.
3839 let mut t = unreachable!();
3840 e = &t;
3841
3842 // `u` is inferred to have type `U` because it's used as `&u` as the
3843 // value returned from this branch.
3844 let u;
3845
3846 $crate::assert_size_eq!(t, u);
3847 $crate::assert_align_gt_eq!(t, u);
3848
3849 &u
3850 } else {
3851 // SAFETY: For source type `Src` and destination type `Dst`:
3852 // - We know that `Src: AsBytes` and `Dst: FromBytes` thanks to the
3853 // uses of `AssertIsAsBytes` and `AssertIsFromBytes` above.
3854 // - We know that `size_of::<Src>() == size_of::<Dst>()` thanks to
3855 // the use of `assert_size_eq!` above.
3856 // - We know that `align_of::<Src>() >= align_of::<Dst>()` thanks to
3857 // the use of `assert_align_gt_eq!` above.
3858 unsafe { $crate::macro_util::transmute_ref(e) }
3859 }
3860 }}
3861}
3862
3863/// Safely transmutes a mutable reference of one type to an mutable reference of
3864/// another type of the same size.
3865///
3866/// The expression `$e` must have a concrete type, `&mut T`, where `T: Sized +
3867/// AsBytes`. The `transmute_mut!` expression must also have a concrete type,
3868/// `&mut U` (`U` is inferred from the calling context), where `U: Sized +
3869/// FromBytes`. It must be the case that `align_of::<T>() >= align_of::<U>()`.
3870///
3871/// The lifetime of the input type, `&mut T`, must be the same as or outlive the
3872/// lifetime of the output type, `&mut U`.
3873///
3874/// # Examples
3875///
3876/// ```
3877/// # use zerocopy::transmute_mut;
3878/// let mut one_dimensional: [u8; 8] = [0, 1, 2, 3, 4, 5, 6, 7];
3879///
3880/// let two_dimensional: &mut [[u8; 4]; 2] = transmute_mut!(&mut one_dimensional);
3881///
3882/// assert_eq!(two_dimensional, &[[0, 1, 2, 3], [4, 5, 6, 7]]);
3883///
3884/// two_dimensional.reverse();
3885///
3886/// assert_eq!(one_dimensional, [4, 5, 6, 7, 0, 1, 2, 3]);
3887/// ```
3888///
3889/// # Alignment increase error message
3890///
3891/// Because of limitations on macros, the error message generated when
3892/// `transmute_mut!` is used to transmute from a type of lower alignment to a
3893/// type of higher alignment is somewhat confusing. For example, the following
3894/// code:
3895///
3896/// ```compile_fail
3897/// const INCREASE_ALIGNMENT: &mut u16 = zerocopy::transmute_mut!(&mut [0u8; 2]);
3898/// ```
3899///
3900/// ...generates the following error:
3901///
3902/// ```text
3903/// error[E0512]: cannot transmute between types of different sizes, or dependently-sized types
3904/// --> src/lib.rs:1524:34
3905/// |
3906/// 5 | const INCREASE_ALIGNMENT: &mut u16 = zerocopy::transmute_mut!(&mut [0u8; 2]);
3907/// | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
3908/// |
3909/// = note: source type: `AlignOf<[u8; 2]>` (8 bits)
3910/// = note: target type: `MaxAlignsOf<[u8; 2], u16>` (16 bits)
3911/// = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info)
3912/// ```
3913///
3914/// This is saying that `max(align_of::<T>(), align_of::<U>()) !=
3915/// align_of::<T>()`, which is equivalent to `align_of::<T>() <
3916/// align_of::<U>()`.
3917#[macro_export]
3918macro_rules! transmute_mut {
3919 ($e:expr) => {{
3920 // NOTE: This must be a macro (rather than a function with trait bounds)
3921 // because there's no way, in a generic context, to enforce that two
3922 // types have the same size or alignment.
3923
3924 // Ensure that the source type is a mutable reference.
3925 let e: &mut _ = $e;
3926
3927 #[allow(unused, clippy::diverging_sub_expression)]
3928 if false {
3929 // This branch, though never taken, ensures that the type of `e` is
3930 // `&mut T` where `T: 't + Sized + FromBytes + AsBytes`, that the
3931 // type of this macro expression is `&mut U` where `U: 'u + Sized +
3932 // FromBytes + AsBytes`.
3933
3934 // We use immutable references here rather than mutable so that, if
3935 // this macro is used in a const context (in which, as of this
3936 // writing, mutable references are banned), the error message
3937 // appears to originate in the user's code rather than in the
3938 // internals of this macro.
3939 struct AssertSrcIsFromBytes<'a, T: ::core::marker::Sized + $crate::FromBytes>(&'a T);
3940 struct AssertSrcIsAsBytes<'a, T: ::core::marker::Sized + $crate::AsBytes>(&'a T);
3941 struct AssertDstIsFromBytes<'a, T: ::core::marker::Sized + $crate::FromBytes>(&'a T);
3942 struct AssertDstIsAsBytes<'a, T: ::core::marker::Sized + $crate::AsBytes>(&'a T);
3943
3944 if true {
3945 let _ = AssertSrcIsFromBytes(&*e);
3946 } else {
3947 let _ = AssertSrcIsAsBytes(&*e);
3948 }
3949
3950 if true {
3951 #[allow(unused, unreachable_code)]
3952 let u = AssertDstIsFromBytes(loop {});
3953 &mut *u.0
3954 } else {
3955 #[allow(unused, unreachable_code)]
3956 let u = AssertDstIsAsBytes(loop {});
3957 &mut *u.0
3958 }
3959 } else if false {
3960 // This branch, though never taken, ensures that `size_of::<T>() ==
3961 // size_of::<U>()` and that that `align_of::<T>() >=
3962 // align_of::<U>()`.
3963
3964 // `t` is inferred to have type `T` because it's assigned to `e` (of
3965 // type `&mut T`) as `&mut t`.
3966 let mut t = unreachable!();
3967 e = &mut t;
3968
3969 // `u` is inferred to have type `U` because it's used as `&mut u` as
3970 // the value returned from this branch.
3971 let u;
3972
3973 $crate::assert_size_eq!(t, u);
3974 $crate::assert_align_gt_eq!(t, u);
3975
3976 &mut u
3977 } else {
3978 // SAFETY: For source type `Src` and destination type `Dst`:
3979 // - We know that `Src: FromBytes + AsBytes` and `Dst: FromBytes +
3980 // AsBytes` thanks to the uses of `AssertSrcIsFromBytes`,
3981 // `AssertSrcIsAsBytes`, `AssertDstIsFromBytes`, and
3982 // `AssertDstIsAsBytes` above.
3983 // - We know that `size_of::<Src>() == size_of::<Dst>()` thanks to
3984 // the use of `assert_size_eq!` above.
3985 // - We know that `align_of::<Src>() >= align_of::<Dst>()` thanks to
3986 // the use of `assert_align_gt_eq!` above.
3987 unsafe { $crate::macro_util::transmute_mut(e) }
3988 }
3989 }}
3990}
3991
3992/// Includes a file and safely transmutes it to a value of an arbitrary type.
3993///
3994/// The file will be included as a byte array, `[u8; N]`, which will be
3995/// transmuted to another type, `T`. `T` is inferred from the calling context,
3996/// and must implement [`FromBytes`].
3997///
3998/// The file is located relative to the current file (similarly to how modules
3999/// are found). The provided path is interpreted in a platform-specific way at
4000/// compile time. So, for instance, an invocation with a Windows path containing
4001/// backslashes `\` would not compile correctly on Unix.
4002///
4003/// `include_value!` is ignorant of byte order. For byte order-aware types, see
4004/// the [`byteorder`] module.
4005///
4006/// # Examples
4007///
4008/// Assume there are two files in the same directory with the following
4009/// contents:
4010///
4011/// File `data` (no trailing newline):
4012///
4013/// ```text
4014/// abcd
4015/// ```
4016///
4017/// File `main.rs`:
4018///
4019/// ```rust
4020/// use zerocopy::include_value;
4021/// # macro_rules! include_value {
4022/// # ($file:expr) => { zerocopy::include_value!(concat!("../testdata/include_value/", $file)) };
4023/// # }
4024///
4025/// fn main() {
4026/// let as_u32: u32 = include_value!("data");
4027/// assert_eq!(as_u32, u32::from_ne_bytes([b'a', b'b', b'c', b'd']));
4028/// let as_i32: i32 = include_value!("data");
4029/// assert_eq!(as_i32, i32::from_ne_bytes([b'a', b'b', b'c', b'd']));
4030/// }
4031/// ```
4032#[doc(alias("include_bytes", "include_data", "include_type"))]
4033#[macro_export]
4034macro_rules! include_value {
4035 ($file:expr $(,)?) => {
4036 $crate::transmute!(*::core::include_bytes!($file))
4037 };
4038}
4039
4040/// A typed reference derived from a byte slice.
4041///
4042/// A `Ref<B, T>` is a reference to a `T` which is stored in a byte slice, `B`.
4043/// Unlike a native reference (`&T` or `&mut T`), `Ref<B, T>` has the same
4044/// mutability as the byte slice it was constructed from (`B`).
4045///
4046/// # Examples
4047///
4048/// `Ref` can be used to treat a sequence of bytes as a structured type, and to
4049/// read and write the fields of that type as if the byte slice reference were
4050/// simply a reference to that type.
4051///
4052/// ```rust
4053/// # #[cfg(feature = "derive")] { // This example uses derives, and won't compile without them
4054/// use zerocopy::{AsBytes, ByteSlice, ByteSliceMut, FromBytes, FromZeroes, Ref, Unaligned};
4055///
4056/// #[derive(FromZeroes, FromBytes, AsBytes, Unaligned)]
4057/// #[repr(C)]
4058/// struct UdpHeader {
4059/// src_port: [u8; 2],
4060/// dst_port: [u8; 2],
4061/// length: [u8; 2],
4062/// checksum: [u8; 2],
4063/// }
4064///
4065/// struct UdpPacket<B> {
4066/// header: Ref<B, UdpHeader>,
4067/// body: B,
4068/// }
4069///
4070/// impl<B: ByteSlice> UdpPacket<B> {
4071/// pub fn parse(bytes: B) -> Option<UdpPacket<B>> {
4072/// let (header, body) = Ref::new_unaligned_from_prefix(bytes)?;
4073/// Some(UdpPacket { header, body })
4074/// }
4075///
4076/// pub fn get_src_port(&self) -> [u8; 2] {
4077/// self.header.src_port
4078/// }
4079/// }
4080///
4081/// impl<B: ByteSliceMut> UdpPacket<B> {
4082/// pub fn set_src_port(&mut self, src_port: [u8; 2]) {
4083/// self.header.src_port = src_port;
4084/// }
4085/// }
4086/// # }
4087/// ```
4088pub struct Ref<B, T: ?Sized>(B, PhantomData<T>);
4089
4090/// Deprecated: prefer [`Ref`] instead.
4091#[deprecated(since = "0.7.0", note = "LayoutVerified has been renamed to Ref")]
4092#[doc(hidden)]
4093pub type LayoutVerified<B, T> = Ref<B, T>;
4094
4095impl<B, T> Ref<B, T>
4096where
4097 B: ByteSlice,
4098{
4099 /// Constructs a new `Ref`.
4100 ///
4101 /// `new` verifies that `bytes.len() == size_of::<T>()` and that `bytes` is
4102 /// aligned to `align_of::<T>()`, and constructs a new `Ref`. If either of
4103 /// these checks fail, it returns `None`.
4104 #[inline]
4105 pub fn new(bytes: B) -> Option<Ref<B, T>> {
4106 if bytes.len() != mem::size_of::<T>() || !util::aligned_to::<_, T>(bytes.deref()) {
4107 return None;
4108 }
4109 Some(Ref(bytes, PhantomData))
4110 }
4111
4112 /// Constructs a new `Ref` from the prefix of a byte slice.
4113 ///
4114 /// `new_from_prefix` verifies that `bytes.len() >= size_of::<T>()` and that
4115 /// `bytes` is aligned to `align_of::<T>()`. It consumes the first
4116 /// `size_of::<T>()` bytes from `bytes` to construct a `Ref`, and returns
4117 /// the remaining bytes to the caller. If either the length or alignment
4118 /// checks fail, it returns `None`.
4119 #[inline]
4120 pub fn new_from_prefix(bytes: B) -> Option<(Ref<B, T>, B)> {
4121 if bytes.len() < mem::size_of::<T>() || !util::aligned_to::<_, T>(bytes.deref()) {
4122 return None;
4123 }
4124 let (bytes, suffix) = bytes.split_at(mem::size_of::<T>());
4125 Some((Ref(bytes, PhantomData), suffix))
4126 }
4127
4128 /// Constructs a new `Ref` from the suffix of a byte slice.
4129 ///
4130 /// `new_from_suffix` verifies that `bytes.len() >= size_of::<T>()` and that
4131 /// the last `size_of::<T>()` bytes of `bytes` are aligned to
4132 /// `align_of::<T>()`. It consumes the last `size_of::<T>()` bytes from
4133 /// `bytes` to construct a `Ref`, and returns the preceding bytes to the
4134 /// caller. If either the length or alignment checks fail, it returns
4135 /// `None`.
4136 #[inline]
4137 pub fn new_from_suffix(bytes: B) -> Option<(B, Ref<B, T>)> {
4138 let bytes_len = bytes.len();
4139 let split_at = bytes_len.checked_sub(mem::size_of::<T>())?;
4140 let (prefix, bytes) = bytes.split_at(split_at);
4141 if !util::aligned_to::<_, T>(bytes.deref()) {
4142 return None;
4143 }
4144 Some((prefix, Ref(bytes, PhantomData)))
4145 }
4146}
4147
4148impl<B, T> Ref<B, [T]>
4149where
4150 B: ByteSlice,
4151{
4152 /// Constructs a new `Ref` of a slice type.
4153 ///
4154 /// `new_slice` verifies that `bytes.len()` is a multiple of
4155 /// `size_of::<T>()` and that `bytes` is aligned to `align_of::<T>()`, and
4156 /// constructs a new `Ref`. If either of these checks fail, it returns
4157 /// `None`.
4158 ///
4159 /// # Panics
4160 ///
4161 /// `new_slice` panics if `T` is a zero-sized type.
4162 #[inline]
4163 pub fn new_slice(bytes: B) -> Option<Ref<B, [T]>> {
4164 let remainder = bytes
4165 .len()
4166 .checked_rem(mem::size_of::<T>())
4167 .expect("Ref::new_slice called on a zero-sized type");
4168 if remainder != 0 || !util::aligned_to::<_, T>(bytes.deref()) {
4169 return None;
4170 }
4171 Some(Ref(bytes, PhantomData))
4172 }
4173
4174 /// Constructs a new `Ref` of a slice type from the prefix of a byte slice.
4175 ///
4176 /// `new_slice_from_prefix` verifies that `bytes.len() >= size_of::<T>() *
4177 /// count` and that `bytes` is aligned to `align_of::<T>()`. It consumes the
4178 /// first `size_of::<T>() * count` bytes from `bytes` to construct a `Ref`,
4179 /// and returns the remaining bytes to the caller. It also ensures that
4180 /// `sizeof::<T>() * count` does not overflow a `usize`. If any of the
4181 /// length, alignment, or overflow checks fail, it returns `None`.
4182 ///
4183 /// # Panics
4184 ///
4185 /// `new_slice_from_prefix` panics if `T` is a zero-sized type.
4186 #[inline]
4187 pub fn new_slice_from_prefix(bytes: B, count: usize) -> Option<(Ref<B, [T]>, B)> {
4188 let expected_len = match mem::size_of::<T>().checked_mul(count) {
4189 Some(len) => len,
4190 None => return None,
4191 };
4192 if bytes.len() < expected_len {
4193 return None;
4194 }
4195 let (prefix, bytes) = bytes.split_at(expected_len);
4196 Self::new_slice(prefix).map(move |l| (l, bytes))
4197 }
4198
4199 /// Constructs a new `Ref` of a slice type from the suffix of a byte slice.
4200 ///
4201 /// `new_slice_from_suffix` verifies that `bytes.len() >= size_of::<T>() *
4202 /// count` and that `bytes` is aligned to `align_of::<T>()`. It consumes the
4203 /// last `size_of::<T>() * count` bytes from `bytes` to construct a `Ref`,
4204 /// and returns the preceding bytes to the caller. It also ensures that
4205 /// `sizeof::<T>() * count` does not overflow a `usize`. If any of the
4206 /// length, alignment, or overflow checks fail, it returns `None`.
4207 ///
4208 /// # Panics
4209 ///
4210 /// `new_slice_from_suffix` panics if `T` is a zero-sized type.
4211 #[inline]
4212 pub fn new_slice_from_suffix(bytes: B, count: usize) -> Option<(B, Ref<B, [T]>)> {
4213 let expected_len = match mem::size_of::<T>().checked_mul(count) {
4214 Some(len) => len,
4215 None => return None,
4216 };
4217 let split_at = bytes.len().checked_sub(expected_len)?;
4218 let (bytes, suffix) = bytes.split_at(split_at);
4219 Self::new_slice(suffix).map(move |l| (bytes, l))
4220 }
4221}
4222
4223fn map_zeroed<B: ByteSliceMut, T: ?Sized>(opt: Option<Ref<B, T>>) -> Option<Ref<B, T>> {
4224 match opt {
4225 Some(mut r: Ref) => {
4226 r.0.fill(0);
4227 Some(r)
4228 }
4229 None => None,
4230 }
4231}
4232
4233fn map_prefix_tuple_zeroed<B: ByteSliceMut, T: ?Sized>(
4234 opt: Option<(Ref<B, T>, B)>,
4235) -> Option<(Ref<B, T>, B)> {
4236 match opt {
4237 Some((mut r: Ref, rest: B)) => {
4238 r.0.fill(0);
4239 Some((r, rest))
4240 }
4241 None => None,
4242 }
4243}
4244
4245fn map_suffix_tuple_zeroed<B: ByteSliceMut, T: ?Sized>(
4246 opt: Option<(B, Ref<B, T>)>,
4247) -> Option<(B, Ref<B, T>)> {
4248 map_prefix_tuple_zeroed(opt:opt.map(|(a: B, b: Ref)| (b, a))).map(|(a: Ref, b: B)| (b, a))
4249}
4250
4251impl<B, T> Ref<B, T>
4252where
4253 B: ByteSliceMut,
4254{
4255 /// Constructs a new `Ref` after zeroing the bytes.
4256 ///
4257 /// `new_zeroed` verifies that `bytes.len() == size_of::<T>()` and that
4258 /// `bytes` is aligned to `align_of::<T>()`, and constructs a new `Ref`. If
4259 /// either of these checks fail, it returns `None`.
4260 ///
4261 /// If the checks succeed, then `bytes` will be initialized to zero. This
4262 /// can be useful when re-using buffers to ensure that sensitive data
4263 /// previously stored in the buffer is not leaked.
4264 #[inline(always)]
4265 pub fn new_zeroed(bytes: B) -> Option<Ref<B, T>> {
4266 map_zeroed(Self::new(bytes))
4267 }
4268
4269 /// Constructs a new `Ref` from the prefix of a byte slice, zeroing the
4270 /// prefix.
4271 ///
4272 /// `new_from_prefix_zeroed` verifies that `bytes.len() >= size_of::<T>()`
4273 /// and that `bytes` is aligned to `align_of::<T>()`. It consumes the first
4274 /// `size_of::<T>()` bytes from `bytes` to construct a `Ref`, and returns
4275 /// the remaining bytes to the caller. If either the length or alignment
4276 /// checks fail, it returns `None`.
4277 ///
4278 /// If the checks succeed, then the prefix which is consumed will be
4279 /// initialized to zero. This can be useful when re-using buffers to ensure
4280 /// that sensitive data previously stored in the buffer is not leaked.
4281 #[inline(always)]
4282 pub fn new_from_prefix_zeroed(bytes: B) -> Option<(Ref<B, T>, B)> {
4283 map_prefix_tuple_zeroed(Self::new_from_prefix(bytes))
4284 }
4285
4286 /// Constructs a new `Ref` from the suffix of a byte slice, zeroing the
4287 /// suffix.
4288 ///
4289 /// `new_from_suffix_zeroed` verifies that `bytes.len() >= size_of::<T>()`
4290 /// and that the last `size_of::<T>()` bytes of `bytes` are aligned to
4291 /// `align_of::<T>()`. It consumes the last `size_of::<T>()` bytes from
4292 /// `bytes` to construct a `Ref`, and returns the preceding bytes to the
4293 /// caller. If either the length or alignment checks fail, it returns
4294 /// `None`.
4295 ///
4296 /// If the checks succeed, then the suffix which is consumed will be
4297 /// initialized to zero. This can be useful when re-using buffers to ensure
4298 /// that sensitive data previously stored in the buffer is not leaked.
4299 #[inline(always)]
4300 pub fn new_from_suffix_zeroed(bytes: B) -> Option<(B, Ref<B, T>)> {
4301 map_suffix_tuple_zeroed(Self::new_from_suffix(bytes))
4302 }
4303}
4304
4305impl<B, T> Ref<B, [T]>
4306where
4307 B: ByteSliceMut,
4308{
4309 /// Constructs a new `Ref` of a slice type after zeroing the bytes.
4310 ///
4311 /// `new_slice_zeroed` verifies that `bytes.len()` is a multiple of
4312 /// `size_of::<T>()` and that `bytes` is aligned to `align_of::<T>()`, and
4313 /// constructs a new `Ref`. If either of these checks fail, it returns
4314 /// `None`.
4315 ///
4316 /// If the checks succeed, then `bytes` will be initialized to zero. This
4317 /// can be useful when re-using buffers to ensure that sensitive data
4318 /// previously stored in the buffer is not leaked.
4319 ///
4320 /// # Panics
4321 ///
4322 /// `new_slice` panics if `T` is a zero-sized type.
4323 #[inline(always)]
4324 pub fn new_slice_zeroed(bytes: B) -> Option<Ref<B, [T]>> {
4325 map_zeroed(Self::new_slice(bytes))
4326 }
4327
4328 /// Constructs a new `Ref` of a slice type from the prefix of a byte slice,
4329 /// after zeroing the bytes.
4330 ///
4331 /// `new_slice_from_prefix` verifies that `bytes.len() >= size_of::<T>() *
4332 /// count` and that `bytes` is aligned to `align_of::<T>()`. It consumes the
4333 /// first `size_of::<T>() * count` bytes from `bytes` to construct a `Ref`,
4334 /// and returns the remaining bytes to the caller. It also ensures that
4335 /// `sizeof::<T>() * count` does not overflow a `usize`. If any of the
4336 /// length, alignment, or overflow checks fail, it returns `None`.
4337 ///
4338 /// If the checks succeed, then the suffix which is consumed will be
4339 /// initialized to zero. This can be useful when re-using buffers to ensure
4340 /// that sensitive data previously stored in the buffer is not leaked.
4341 ///
4342 /// # Panics
4343 ///
4344 /// `new_slice_from_prefix_zeroed` panics if `T` is a zero-sized type.
4345 #[inline(always)]
4346 pub fn new_slice_from_prefix_zeroed(bytes: B, count: usize) -> Option<(Ref<B, [T]>, B)> {
4347 map_prefix_tuple_zeroed(Self::new_slice_from_prefix(bytes, count))
4348 }
4349
4350 /// Constructs a new `Ref` of a slice type from the prefix of a byte slice,
4351 /// after zeroing the bytes.
4352 ///
4353 /// `new_slice_from_suffix` verifies that `bytes.len() >= size_of::<T>() *
4354 /// count` and that `bytes` is aligned to `align_of::<T>()`. It consumes the
4355 /// last `size_of::<T>() * count` bytes from `bytes` to construct a `Ref`,
4356 /// and returns the preceding bytes to the caller. It also ensures that
4357 /// `sizeof::<T>() * count` does not overflow a `usize`. If any of the
4358 /// length, alignment, or overflow checks fail, it returns `None`.
4359 ///
4360 /// If the checks succeed, then the consumed suffix will be initialized to
4361 /// zero. This can be useful when re-using buffers to ensure that sensitive
4362 /// data previously stored in the buffer is not leaked.
4363 ///
4364 /// # Panics
4365 ///
4366 /// `new_slice_from_suffix_zeroed` panics if `T` is a zero-sized type.
4367 #[inline(always)]
4368 pub fn new_slice_from_suffix_zeroed(bytes: B, count: usize) -> Option<(B, Ref<B, [T]>)> {
4369 map_suffix_tuple_zeroed(Self::new_slice_from_suffix(bytes, count))
4370 }
4371}
4372
4373impl<B, T> Ref<B, T>
4374where
4375 B: ByteSlice,
4376 T: Unaligned,
4377{
4378 /// Constructs a new `Ref` for a type with no alignment requirement.
4379 ///
4380 /// `new_unaligned` verifies that `bytes.len() == size_of::<T>()` and
4381 /// constructs a new `Ref`. If the check fails, it returns `None`.
4382 #[inline(always)]
4383 pub fn new_unaligned(bytes: B) -> Option<Ref<B, T>> {
4384 Ref::new(bytes)
4385 }
4386
4387 /// Constructs a new `Ref` from the prefix of a byte slice for a type with
4388 /// no alignment requirement.
4389 ///
4390 /// `new_unaligned_from_prefix` verifies that `bytes.len() >=
4391 /// size_of::<T>()`. It consumes the first `size_of::<T>()` bytes from
4392 /// `bytes` to construct a `Ref`, and returns the remaining bytes to the
4393 /// caller. If the length check fails, it returns `None`.
4394 #[inline(always)]
4395 pub fn new_unaligned_from_prefix(bytes: B) -> Option<(Ref<B, T>, B)> {
4396 Ref::new_from_prefix(bytes)
4397 }
4398
4399 /// Constructs a new `Ref` from the suffix of a byte slice for a type with
4400 /// no alignment requirement.
4401 ///
4402 /// `new_unaligned_from_suffix` verifies that `bytes.len() >=
4403 /// size_of::<T>()`. It consumes the last `size_of::<T>()` bytes from
4404 /// `bytes` to construct a `Ref`, and returns the preceding bytes to the
4405 /// caller. If the length check fails, it returns `None`.
4406 #[inline(always)]
4407 pub fn new_unaligned_from_suffix(bytes: B) -> Option<(B, Ref<B, T>)> {
4408 Ref::new_from_suffix(bytes)
4409 }
4410}
4411
4412impl<B, T> Ref<B, [T]>
4413where
4414 B: ByteSlice,
4415 T: Unaligned,
4416{
4417 /// Constructs a new `Ref` of a slice type with no alignment requirement.
4418 ///
4419 /// `new_slice_unaligned` verifies that `bytes.len()` is a multiple of
4420 /// `size_of::<T>()` and constructs a new `Ref`. If the check fails, it
4421 /// returns `None`.
4422 ///
4423 /// # Panics
4424 ///
4425 /// `new_slice` panics if `T` is a zero-sized type.
4426 #[inline(always)]
4427 pub fn new_slice_unaligned(bytes: B) -> Option<Ref<B, [T]>> {
4428 Ref::new_slice(bytes)
4429 }
4430
4431 /// Constructs a new `Ref` of a slice type with no alignment requirement
4432 /// from the prefix of a byte slice.
4433 ///
4434 /// `new_slice_from_prefix` verifies that `bytes.len() >= size_of::<T>() *
4435 /// count`. It consumes the first `size_of::<T>() * count` bytes from
4436 /// `bytes` to construct a `Ref`, and returns the remaining bytes to the
4437 /// caller. It also ensures that `sizeof::<T>() * count` does not overflow a
4438 /// `usize`. If either the length, or overflow checks fail, it returns
4439 /// `None`.
4440 ///
4441 /// # Panics
4442 ///
4443 /// `new_slice_unaligned_from_prefix` panics if `T` is a zero-sized type.
4444 #[inline(always)]
4445 pub fn new_slice_unaligned_from_prefix(bytes: B, count: usize) -> Option<(Ref<B, [T]>, B)> {
4446 Ref::new_slice_from_prefix(bytes, count)
4447 }
4448
4449 /// Constructs a new `Ref` of a slice type with no alignment requirement
4450 /// from the suffix of a byte slice.
4451 ///
4452 /// `new_slice_from_suffix` verifies that `bytes.len() >= size_of::<T>() *
4453 /// count`. It consumes the last `size_of::<T>() * count` bytes from `bytes`
4454 /// to construct a `Ref`, and returns the remaining bytes to the caller. It
4455 /// also ensures that `sizeof::<T>() * count` does not overflow a `usize`.
4456 /// If either the length, or overflow checks fail, it returns `None`.
4457 ///
4458 /// # Panics
4459 ///
4460 /// `new_slice_unaligned_from_suffix` panics if `T` is a zero-sized type.
4461 #[inline(always)]
4462 pub fn new_slice_unaligned_from_suffix(bytes: B, count: usize) -> Option<(B, Ref<B, [T]>)> {
4463 Ref::new_slice_from_suffix(bytes, count)
4464 }
4465}
4466
4467impl<B, T> Ref<B, T>
4468where
4469 B: ByteSliceMut,
4470 T: Unaligned,
4471{
4472 /// Constructs a new `Ref` for a type with no alignment requirement, zeroing
4473 /// the bytes.
4474 ///
4475 /// `new_unaligned_zeroed` verifies that `bytes.len() == size_of::<T>()` and
4476 /// constructs a new `Ref`. If the check fails, it returns `None`.
4477 ///
4478 /// If the check succeeds, then `bytes` will be initialized to zero. This
4479 /// can be useful when re-using buffers to ensure that sensitive data
4480 /// previously stored in the buffer is not leaked.
4481 #[inline(always)]
4482 pub fn new_unaligned_zeroed(bytes: B) -> Option<Ref<B, T>> {
4483 map_zeroed(Self::new_unaligned(bytes))
4484 }
4485
4486 /// Constructs a new `Ref` from the prefix of a byte slice for a type with
4487 /// no alignment requirement, zeroing the prefix.
4488 ///
4489 /// `new_unaligned_from_prefix_zeroed` verifies that `bytes.len() >=
4490 /// size_of::<T>()`. It consumes the first `size_of::<T>()` bytes from
4491 /// `bytes` to construct a `Ref`, and returns the remaining bytes to the
4492 /// caller. If the length check fails, it returns `None`.
4493 ///
4494 /// If the check succeeds, then the prefix which is consumed will be
4495 /// initialized to zero. This can be useful when re-using buffers to ensure
4496 /// that sensitive data previously stored in the buffer is not leaked.
4497 #[inline(always)]
4498 pub fn new_unaligned_from_prefix_zeroed(bytes: B) -> Option<(Ref<B, T>, B)> {
4499 map_prefix_tuple_zeroed(Self::new_unaligned_from_prefix(bytes))
4500 }
4501
4502 /// Constructs a new `Ref` from the suffix of a byte slice for a type with
4503 /// no alignment requirement, zeroing the suffix.
4504 ///
4505 /// `new_unaligned_from_suffix_zeroed` verifies that `bytes.len() >=
4506 /// size_of::<T>()`. It consumes the last `size_of::<T>()` bytes from
4507 /// `bytes` to construct a `Ref`, and returns the preceding bytes to the
4508 /// caller. If the length check fails, it returns `None`.
4509 ///
4510 /// If the check succeeds, then the suffix which is consumed will be
4511 /// initialized to zero. This can be useful when re-using buffers to ensure
4512 /// that sensitive data previously stored in the buffer is not leaked.
4513 #[inline(always)]
4514 pub fn new_unaligned_from_suffix_zeroed(bytes: B) -> Option<(B, Ref<B, T>)> {
4515 map_suffix_tuple_zeroed(Self::new_unaligned_from_suffix(bytes))
4516 }
4517}
4518
4519impl<B, T> Ref<B, [T]>
4520where
4521 B: ByteSliceMut,
4522 T: Unaligned,
4523{
4524 /// Constructs a new `Ref` for a slice type with no alignment requirement,
4525 /// zeroing the bytes.
4526 ///
4527 /// `new_slice_unaligned_zeroed` verifies that `bytes.len()` is a multiple
4528 /// of `size_of::<T>()` and constructs a new `Ref`. If the check fails, it
4529 /// returns `None`.
4530 ///
4531 /// If the check succeeds, then `bytes` will be initialized to zero. This
4532 /// can be useful when re-using buffers to ensure that sensitive data
4533 /// previously stored in the buffer is not leaked.
4534 ///
4535 /// # Panics
4536 ///
4537 /// `new_slice` panics if `T` is a zero-sized type.
4538 #[inline(always)]
4539 pub fn new_slice_unaligned_zeroed(bytes: B) -> Option<Ref<B, [T]>> {
4540 map_zeroed(Self::new_slice_unaligned(bytes))
4541 }
4542
4543 /// Constructs a new `Ref` of a slice type with no alignment requirement
4544 /// from the prefix of a byte slice, after zeroing the bytes.
4545 ///
4546 /// `new_slice_from_prefix` verifies that `bytes.len() >= size_of::<T>() *
4547 /// count`. It consumes the first `size_of::<T>() * count` bytes from
4548 /// `bytes` to construct a `Ref`, and returns the remaining bytes to the
4549 /// caller. It also ensures that `sizeof::<T>() * count` does not overflow a
4550 /// `usize`. If either the length, or overflow checks fail, it returns
4551 /// `None`.
4552 ///
4553 /// If the checks succeed, then the prefix will be initialized to zero. This
4554 /// can be useful when re-using buffers to ensure that sensitive data
4555 /// previously stored in the buffer is not leaked.
4556 ///
4557 /// # Panics
4558 ///
4559 /// `new_slice_unaligned_from_prefix_zeroed` panics if `T` is a zero-sized
4560 /// type.
4561 #[inline(always)]
4562 pub fn new_slice_unaligned_from_prefix_zeroed(
4563 bytes: B,
4564 count: usize,
4565 ) -> Option<(Ref<B, [T]>, B)> {
4566 map_prefix_tuple_zeroed(Self::new_slice_unaligned_from_prefix(bytes, count))
4567 }
4568
4569 /// Constructs a new `Ref` of a slice type with no alignment requirement
4570 /// from the suffix of a byte slice, after zeroing the bytes.
4571 ///
4572 /// `new_slice_from_suffix` verifies that `bytes.len() >= size_of::<T>() *
4573 /// count`. It consumes the last `size_of::<T>() * count` bytes from `bytes`
4574 /// to construct a `Ref`, and returns the remaining bytes to the caller. It
4575 /// also ensures that `sizeof::<T>() * count` does not overflow a `usize`.
4576 /// If either the length, or overflow checks fail, it returns `None`.
4577 ///
4578 /// If the checks succeed, then the suffix will be initialized to zero. This
4579 /// can be useful when re-using buffers to ensure that sensitive data
4580 /// previously stored in the buffer is not leaked.
4581 ///
4582 /// # Panics
4583 ///
4584 /// `new_slice_unaligned_from_suffix_zeroed` panics if `T` is a zero-sized
4585 /// type.
4586 #[inline(always)]
4587 pub fn new_slice_unaligned_from_suffix_zeroed(
4588 bytes: B,
4589 count: usize,
4590 ) -> Option<(B, Ref<B, [T]>)> {
4591 map_suffix_tuple_zeroed(Self::new_slice_unaligned_from_suffix(bytes, count))
4592 }
4593}
4594
4595impl<'a, B, T> Ref<B, T>
4596where
4597 B: 'a + ByteSlice,
4598 T: FromBytes,
4599{
4600 /// Converts this `Ref` into a reference.
4601 ///
4602 /// `into_ref` consumes the `Ref`, and returns a reference to `T`.
4603 #[inline(always)]
4604 pub fn into_ref(self) -> &'a T {
4605 // SAFETY: This is sound because `B` is guaranteed to live for the
4606 // lifetime `'a`, meaning that a) the returned reference cannot outlive
4607 // the `B` from which `self` was constructed and, b) no mutable methods
4608 // on that `B` can be called during the lifetime of the returned
4609 // reference. See the documentation on `deref_helper` for what
4610 // invariants we are required to uphold.
4611 unsafe { self.deref_helper() }
4612 }
4613}
4614
4615impl<'a, B, T> Ref<B, T>
4616where
4617 B: 'a + ByteSliceMut,
4618 T: FromBytes + AsBytes,
4619{
4620 /// Converts this `Ref` into a mutable reference.
4621 ///
4622 /// `into_mut` consumes the `Ref`, and returns a mutable reference to `T`.
4623 #[inline(always)]
4624 pub fn into_mut(mut self) -> &'a mut T {
4625 // SAFETY: This is sound because `B` is guaranteed to live for the
4626 // lifetime `'a`, meaning that a) the returned reference cannot outlive
4627 // the `B` from which `self` was constructed and, b) no other methods -
4628 // mutable or immutable - on that `B` can be called during the lifetime
4629 // of the returned reference. See the documentation on
4630 // `deref_mut_helper` for what invariants we are required to uphold.
4631 unsafe { self.deref_mut_helper() }
4632 }
4633}
4634
4635impl<'a, B, T> Ref<B, [T]>
4636where
4637 B: 'a + ByteSlice,
4638 T: FromBytes,
4639{
4640 /// Converts this `Ref` into a slice reference.
4641 ///
4642 /// `into_slice` consumes the `Ref`, and returns a reference to `[T]`.
4643 #[inline(always)]
4644 pub fn into_slice(self) -> &'a [T] {
4645 // SAFETY: This is sound because `B` is guaranteed to live for the
4646 // lifetime `'a`, meaning that a) the returned reference cannot outlive
4647 // the `B` from which `self` was constructed and, b) no mutable methods
4648 // on that `B` can be called during the lifetime of the returned
4649 // reference. See the documentation on `deref_slice_helper` for what
4650 // invariants we are required to uphold.
4651 unsafe { self.deref_slice_helper() }
4652 }
4653}
4654
4655impl<'a, B, T> Ref<B, [T]>
4656where
4657 B: 'a + ByteSliceMut,
4658 T: FromBytes + AsBytes,
4659{
4660 /// Converts this `Ref` into a mutable slice reference.
4661 ///
4662 /// `into_mut_slice` consumes the `Ref`, and returns a mutable reference to
4663 /// `[T]`.
4664 #[inline(always)]
4665 pub fn into_mut_slice(mut self) -> &'a mut [T] {
4666 // SAFETY: This is sound because `B` is guaranteed to live for the
4667 // lifetime `'a`, meaning that a) the returned reference cannot outlive
4668 // the `B` from which `self` was constructed and, b) no other methods -
4669 // mutable or immutable - on that `B` can be called during the lifetime
4670 // of the returned reference. See the documentation on
4671 // `deref_mut_slice_helper` for what invariants we are required to
4672 // uphold.
4673 unsafe { self.deref_mut_slice_helper() }
4674 }
4675}
4676
4677impl<B, T> Ref<B, T>
4678where
4679 B: ByteSlice,
4680 T: FromBytes,
4681{
4682 /// Creates an immutable reference to `T` with a specific lifetime.
4683 ///
4684 /// # Safety
4685 ///
4686 /// The type bounds on this method guarantee that it is safe to create an
4687 /// immutable reference to `T` from `self`. However, since the lifetime `'a`
4688 /// is not required to be shorter than the lifetime of the reference to
4689 /// `self`, the caller must guarantee that the lifetime `'a` is valid for
4690 /// this reference. In particular, the referent must exist for all of `'a`,
4691 /// and no mutable references to the same memory may be constructed during
4692 /// `'a`.
4693 unsafe fn deref_helper<'a>(&self) -> &'a T {
4694 // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
4695 #[allow(clippy::undocumented_unsafe_blocks)]
4696 unsafe {
4697 &*self.0.as_ptr().cast::<T>()
4698 }
4699 }
4700}
4701
4702impl<B, T> Ref<B, T>
4703where
4704 B: ByteSliceMut,
4705 T: FromBytes + AsBytes,
4706{
4707 /// Creates a mutable reference to `T` with a specific lifetime.
4708 ///
4709 /// # Safety
4710 ///
4711 /// The type bounds on this method guarantee that it is safe to create a
4712 /// mutable reference to `T` from `self`. However, since the lifetime `'a`
4713 /// is not required to be shorter than the lifetime of the reference to
4714 /// `self`, the caller must guarantee that the lifetime `'a` is valid for
4715 /// this reference. In particular, the referent must exist for all of `'a`,
4716 /// and no other references - mutable or immutable - to the same memory may
4717 /// be constructed during `'a`.
4718 unsafe fn deref_mut_helper<'a>(&mut self) -> &'a mut T {
4719 // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
4720 #[allow(clippy::undocumented_unsafe_blocks)]
4721 unsafe {
4722 &mut *self.0.as_mut_ptr().cast::<T>()
4723 }
4724 }
4725}
4726
4727impl<B, T> Ref<B, [T]>
4728where
4729 B: ByteSlice,
4730 T: FromBytes,
4731{
4732 /// Creates an immutable reference to `[T]` with a specific lifetime.
4733 ///
4734 /// # Safety
4735 ///
4736 /// `deref_slice_helper` has the same safety requirements as `deref_helper`.
4737 unsafe fn deref_slice_helper<'a>(&self) -> &'a [T] {
4738 let len: usize = self.0.len();
4739 let elem_size: usize = mem::size_of::<T>();
4740 debug_assert_ne!(elem_size, 0);
4741 // `Ref<_, [T]>` maintains the invariant that `size_of::<T>() > 0`.
4742 // Thus, neither the mod nor division operations here can panic.
4743 #[allow(clippy::arithmetic_side_effects)]
4744 let elems: usize = {
4745 debug_assert_eq!(len % elem_size, 0);
4746 len / elem_size
4747 };
4748 // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
4749 #[allow(clippy::undocumented_unsafe_blocks)]
4750 unsafe {
4751 slice::from_raw_parts(self.0.as_ptr().cast::<T>(), len:elems)
4752 }
4753 }
4754}
4755
4756impl<B, T> Ref<B, [T]>
4757where
4758 B: ByteSliceMut,
4759 T: FromBytes + AsBytes,
4760{
4761 /// Creates a mutable reference to `[T]` with a specific lifetime.
4762 ///
4763 /// # Safety
4764 ///
4765 /// `deref_mut_slice_helper` has the same safety requirements as
4766 /// `deref_mut_helper`.
4767 unsafe fn deref_mut_slice_helper<'a>(&mut self) -> &'a mut [T] {
4768 let len = self.0.len();
4769 let elem_size = mem::size_of::<T>();
4770 debug_assert_ne!(elem_size, 0);
4771 // `Ref<_, [T]>` maintains the invariant that `size_of::<T>() > 0`.
4772 // Thus, neither the mod nor division operations here can panic.
4773 #[allow(clippy::arithmetic_side_effects)]
4774 let elems = {
4775 debug_assert_eq!(len % elem_size, 0);
4776 len / elem_size
4777 };
4778 // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
4779 #[allow(clippy::undocumented_unsafe_blocks)]
4780 unsafe {
4781 slice::from_raw_parts_mut(self.0.as_mut_ptr().cast::<T>(), elems)
4782 }
4783 }
4784}
4785
4786impl<B, T> Ref<B, T>
4787where
4788 B: ByteSlice,
4789 T: ?Sized,
4790{
4791 /// Gets the underlying bytes.
4792 #[inline]
4793 pub fn bytes(&self) -> &[u8] {
4794 &self.0
4795 }
4796}
4797
4798impl<B, T> Ref<B, T>
4799where
4800 B: ByteSliceMut,
4801 T: ?Sized,
4802{
4803 /// Gets the underlying bytes mutably.
4804 #[inline]
4805 pub fn bytes_mut(&mut self) -> &mut [u8] {
4806 &mut self.0
4807 }
4808}
4809
4810impl<B, T> Ref<B, T>
4811where
4812 B: ByteSlice,
4813 T: FromBytes,
4814{
4815 /// Reads a copy of `T`.
4816 #[inline]
4817 pub fn read(&self) -> T {
4818 // SAFETY: Because of the invariants on `Ref`, we know that `self.0` is
4819 // at least `size_of::<T>()` bytes long, and that it is at least as
4820 // aligned as `align_of::<T>()`. Because `T: FromBytes`, it is sound to
4821 // interpret these bytes as a `T`.
4822 unsafe { ptr::read(self.0.as_ptr().cast::<T>()) }
4823 }
4824}
4825
4826impl<B, T> Ref<B, T>
4827where
4828 B: ByteSliceMut,
4829 T: AsBytes,
4830{
4831 /// Writes the bytes of `t` and then forgets `t`.
4832 #[inline]
4833 pub fn write(&mut self, t: T) {
4834 // SAFETY: Because of the invariants on `Ref`, we know that `self.0` is
4835 // at least `size_of::<T>()` bytes long, and that it is at least as
4836 // aligned as `align_of::<T>()`. Writing `t` to the buffer will allow
4837 // all of the bytes of `t` to be accessed as a `[u8]`, but because `T:
4838 // AsBytes`, we know this is sound.
4839 unsafe { ptr::write(self.0.as_mut_ptr().cast::<T>(), src:t) }
4840 }
4841}
4842
4843impl<B, T> Deref for Ref<B, T>
4844where
4845 B: ByteSlice,
4846 T: FromBytes,
4847{
4848 type Target = T;
4849 #[inline]
4850 fn deref(&self) -> &T {
4851 // SAFETY: This is sound because the lifetime of `self` is the same as
4852 // the lifetime of the return value, meaning that a) the returned
4853 // reference cannot outlive `self` and, b) no mutable methods on `self`
4854 // can be called during the lifetime of the returned reference. See the
4855 // documentation on `deref_helper` for what invariants we are required
4856 // to uphold.
4857 unsafe { self.deref_helper() }
4858 }
4859}
4860
4861impl<B, T> DerefMut for Ref<B, T>
4862where
4863 B: ByteSliceMut,
4864 T: FromBytes + AsBytes,
4865{
4866 #[inline]
4867 fn deref_mut(&mut self) -> &mut T {
4868 // SAFETY: This is sound because the lifetime of `self` is the same as
4869 // the lifetime of the return value, meaning that a) the returned
4870 // reference cannot outlive `self` and, b) no other methods on `self`
4871 // can be called during the lifetime of the returned reference. See the
4872 // documentation on `deref_mut_helper` for what invariants we are
4873 // required to uphold.
4874 unsafe { self.deref_mut_helper() }
4875 }
4876}
4877
4878impl<B, T> Deref for Ref<B, [T]>
4879where
4880 B: ByteSlice,
4881 T: FromBytes,
4882{
4883 type Target = [T];
4884 #[inline]
4885 fn deref(&self) -> &[T] {
4886 // SAFETY: This is sound because the lifetime of `self` is the same as
4887 // the lifetime of the return value, meaning that a) the returned
4888 // reference cannot outlive `self` and, b) no mutable methods on `self`
4889 // can be called during the lifetime of the returned reference. See the
4890 // documentation on `deref_slice_helper` for what invariants we are
4891 // required to uphold.
4892 unsafe { self.deref_slice_helper() }
4893 }
4894}
4895
4896impl<B, T> DerefMut for Ref<B, [T]>
4897where
4898 B: ByteSliceMut,
4899 T: FromBytes + AsBytes,
4900{
4901 #[inline]
4902 fn deref_mut(&mut self) -> &mut [T] {
4903 // SAFETY: This is sound because the lifetime of `self` is the same as
4904 // the lifetime of the return value, meaning that a) the returned
4905 // reference cannot outlive `self` and, b) no other methods on `self`
4906 // can be called during the lifetime of the returned reference. See the
4907 // documentation on `deref_mut_slice_helper` for what invariants we are
4908 // required to uphold.
4909 unsafe { self.deref_mut_slice_helper() }
4910 }
4911}
4912
4913impl<T, B> Display for Ref<B, T>
4914where
4915 B: ByteSlice,
4916 T: FromBytes + Display,
4917{
4918 #[inline]
4919 fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
4920 let inner: &T = self;
4921 inner.fmt(fmt)
4922 }
4923}
4924
4925impl<T, B> Display for Ref<B, [T]>
4926where
4927 B: ByteSlice,
4928 T: FromBytes,
4929 [T]: Display,
4930{
4931 #[inline]
4932 fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
4933 let inner: &[T] = self;
4934 inner.fmt(fmt)
4935 }
4936}
4937
4938impl<T, B> Debug for Ref<B, T>
4939where
4940 B: ByteSlice,
4941 T: FromBytes + Debug,
4942{
4943 #[inline]
4944 fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
4945 let inner: &T = self;
4946 fmt.debug_tuple(name:"Ref").field(&inner).finish()
4947 }
4948}
4949
4950impl<T, B> Debug for Ref<B, [T]>
4951where
4952 B: ByteSlice,
4953 T: FromBytes + Debug,
4954{
4955 #[inline]
4956 fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
4957 let inner: &[T] = self;
4958 fmt.debug_tuple(name:"Ref").field(&inner).finish()
4959 }
4960}
4961
4962impl<T, B> Eq for Ref<B, T>
4963where
4964 B: ByteSlice,
4965 T: FromBytes + Eq,
4966{
4967}
4968
4969impl<T, B> Eq for Ref<B, [T]>
4970where
4971 B: ByteSlice,
4972 T: FromBytes + Eq,
4973{
4974}
4975
4976impl<T, B> PartialEq for Ref<B, T>
4977where
4978 B: ByteSlice,
4979 T: FromBytes + PartialEq,
4980{
4981 #[inline]
4982 fn eq(&self, other: &Self) -> bool {
4983 self.deref().eq(other.deref())
4984 }
4985}
4986
4987impl<T, B> PartialEq for Ref<B, [T]>
4988where
4989 B: ByteSlice,
4990 T: FromBytes + PartialEq,
4991{
4992 #[inline]
4993 fn eq(&self, other: &Self) -> bool {
4994 self.deref().eq(other.deref())
4995 }
4996}
4997
4998impl<T, B> Ord for Ref<B, T>
4999where
5000 B: ByteSlice,
5001 T: FromBytes + Ord,
5002{
5003 #[inline]
5004 fn cmp(&self, other: &Self) -> Ordering {
5005 let inner: &T = self;
5006 let other_inner: &T = other;
5007 inner.cmp(other_inner)
5008 }
5009}
5010
5011impl<T, B> Ord for Ref<B, [T]>
5012where
5013 B: ByteSlice,
5014 T: FromBytes + Ord,
5015{
5016 #[inline]
5017 fn cmp(&self, other: &Self) -> Ordering {
5018 let inner: &[T] = self;
5019 let other_inner: &[T] = other;
5020 inner.cmp(other_inner)
5021 }
5022}
5023
5024impl<T, B> PartialOrd for Ref<B, T>
5025where
5026 B: ByteSlice,
5027 T: FromBytes + PartialOrd,
5028{
5029 #[inline]
5030 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
5031 let inner: &T = self;
5032 let other_inner: &T = other;
5033 inner.partial_cmp(other_inner)
5034 }
5035}
5036
5037impl<T, B> PartialOrd for Ref<B, [T]>
5038where
5039 B: ByteSlice,
5040 T: FromBytes + PartialOrd,
5041{
5042 #[inline]
5043 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
5044 let inner: &[T] = self;
5045 let other_inner: &[T] = other;
5046 inner.partial_cmp(other_inner)
5047 }
5048}
5049
5050mod sealed {
5051 pub trait ByteSliceSealed {}
5052}
5053
5054// ByteSlice and ByteSliceMut abstract over [u8] references (&[u8], &mut [u8],
5055// Ref<[u8]>, RefMut<[u8]>, etc). We rely on various behaviors of these
5056// references such as that a given reference will never changes its length
5057// between calls to deref() or deref_mut(), and that split_at() works as
5058// expected. If ByteSlice or ByteSliceMut were not sealed, consumers could
5059// implement them in a way that violated these behaviors, and would break our
5060// unsafe code. Thus, we seal them and implement it only for known-good
5061// reference types. For the same reason, they're unsafe traits.
5062
5063#[allow(clippy::missing_safety_doc)] // TODO(fxbug.dev/99068)
5064/// A mutable or immutable reference to a byte slice.
5065///
5066/// `ByteSlice` abstracts over the mutability of a byte slice reference, and is
5067/// implemented for various special reference types such as `Ref<[u8]>` and
5068/// `RefMut<[u8]>`.
5069///
5070/// Note that, while it would be technically possible, `ByteSlice` is not
5071/// implemented for [`Vec<u8>`], as the only way to implement the [`split_at`]
5072/// method would involve reallocation, and `split_at` must be a very cheap
5073/// operation in order for the utilities in this crate to perform as designed.
5074///
5075/// [`split_at`]: crate::ByteSlice::split_at
5076// It may seem overkill to go to this length to ensure that this doc link never
5077// breaks. We do this because it simplifies CI - it means that generating docs
5078// always succeeds, so we don't need special logic to only generate docs under
5079// certain features.
5080#[cfg_attr(feature = "alloc", doc = "[`Vec<u8>`]: alloc::vec::Vec")]
5081#[cfg_attr(
5082 not(feature = "alloc"),
5083 doc = "[`Vec<u8>`]: https://doc.rust-lang.org/std/vec/struct.Vec.html"
5084)]
5085pub unsafe trait ByteSlice:
5086 Deref<Target = [u8]> + Sized + self::sealed::ByteSliceSealed
5087{
5088 /// Gets a raw pointer to the first byte in the slice.
5089 #[inline]
5090 fn as_ptr(&self) -> *const u8 {
5091 <[u8]>::as_ptr(self)
5092 }
5093
5094 /// Splits the slice at the midpoint.
5095 ///
5096 /// `x.split_at(mid)` returns `x[..mid]` and `x[mid..]`.
5097 ///
5098 /// # Panics
5099 ///
5100 /// `x.split_at(mid)` panics if `mid > x.len()`.
5101 fn split_at(self, mid: usize) -> (Self, Self);
5102}
5103
5104#[allow(clippy::missing_safety_doc)] // TODO(fxbug.dev/99068)
5105/// A mutable reference to a byte slice.
5106///
5107/// `ByteSliceMut` abstracts over various ways of storing a mutable reference to
5108/// a byte slice, and is implemented for various special reference types such as
5109/// `RefMut<[u8]>`.
5110pub unsafe trait ByteSliceMut: ByteSlice + DerefMut {
5111 /// Gets a mutable raw pointer to the first byte in the slice.
5112 #[inline]
5113 fn as_mut_ptr(&mut self) -> *mut u8 {
5114 <[u8]>::as_mut_ptr(self)
5115 }
5116}
5117
5118impl<'a> sealed::ByteSliceSealed for &'a [u8] {}
5119// TODO(#429): Add a "SAFETY" comment and remove this `allow`.
5120#[allow(clippy::undocumented_unsafe_blocks)]
5121unsafe impl<'a> ByteSlice for &'a [u8] {
5122 #[inline]
5123 fn split_at(self, mid: usize) -> (Self, Self) {
5124 <[u8]>::split_at(self, mid)
5125 }
5126}
5127
5128impl<'a> sealed::ByteSliceSealed for &'a mut [u8] {}
5129// TODO(#429): Add a "SAFETY" comment and remove this `allow`.
5130#[allow(clippy::undocumented_unsafe_blocks)]
5131unsafe impl<'a> ByteSlice for &'a mut [u8] {
5132 #[inline]
5133 fn split_at(self, mid: usize) -> (Self, Self) {
5134 <[u8]>::split_at_mut(self, mid)
5135 }
5136}
5137
5138impl<'a> sealed::ByteSliceSealed for cell::Ref<'a, [u8]> {}
5139// TODO(#429): Add a "SAFETY" comment and remove this `allow`.
5140#[allow(clippy::undocumented_unsafe_blocks)]
5141unsafe impl<'a> ByteSlice for cell::Ref<'a, [u8]> {
5142 #[inline]
5143 fn split_at(self, mid: usize) -> (Self, Self) {
5144 cell::Ref::map_split(self, |slice: &[u8]| <[u8]>::split_at(self:slice, mid))
5145 }
5146}
5147
5148impl<'a> sealed::ByteSliceSealed for RefMut<'a, [u8]> {}
5149// TODO(#429): Add a "SAFETY" comment and remove this `allow`.
5150#[allow(clippy::undocumented_unsafe_blocks)]
5151unsafe impl<'a> ByteSlice for RefMut<'a, [u8]> {
5152 #[inline]
5153 fn split_at(self, mid: usize) -> (Self, Self) {
5154 RefMut::map_split(self, |slice: &mut [u8]| <[u8]>::split_at_mut(self:slice, mid))
5155 }
5156}
5157
5158// TODO(#429): Add a "SAFETY" comment and remove this `allow`.
5159#[allow(clippy::undocumented_unsafe_blocks)]
5160unsafe impl<'a> ByteSliceMut for &'a mut [u8] {}
5161
5162// TODO(#429): Add a "SAFETY" comment and remove this `allow`.
5163#[allow(clippy::undocumented_unsafe_blocks)]
5164unsafe impl<'a> ByteSliceMut for RefMut<'a, [u8]> {}
5165
5166#[cfg(feature = "alloc")]
5167#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
5168mod alloc_support {
5169 use alloc::vec::Vec;
5170
5171 use super::*;
5172
5173 /// Extends a `Vec<T>` by pushing `additional` new items onto the end of the
5174 /// vector. The new items are initialized with zeroes.
5175 ///
5176 /// # Panics
5177 ///
5178 /// Panics if `Vec::reserve(additional)` fails to reserve enough memory.
5179 #[inline(always)]
5180 pub fn extend_vec_zeroed<T: FromZeroes>(v: &mut Vec<T>, additional: usize) {
5181 insert_vec_zeroed(v, v.len(), additional);
5182 }
5183
5184 /// Inserts `additional` new items into `Vec<T>` at `position`.
5185 /// The new items are initialized with zeroes.
5186 ///
5187 /// # Panics
5188 ///
5189 /// * Panics if `position > v.len()`.
5190 /// * Panics if `Vec::reserve(additional)` fails to reserve enough memory.
5191 #[inline]
5192 pub fn insert_vec_zeroed<T: FromZeroes>(v: &mut Vec<T>, position: usize, additional: usize) {
5193 assert!(position <= v.len());
5194 v.reserve(additional);
5195 // SAFETY: The `reserve` call guarantees that these cannot overflow:
5196 // * `ptr.add(position)`
5197 // * `position + additional`
5198 // * `v.len() + additional`
5199 //
5200 // `v.len() - position` cannot overflow because we asserted that
5201 // `position <= v.len()`.
5202 unsafe {
5203 // This is a potentially overlapping copy.
5204 let ptr = v.as_mut_ptr();
5205 #[allow(clippy::arithmetic_side_effects)]
5206 ptr.add(position).copy_to(ptr.add(position + additional), v.len() - position);
5207 ptr.add(position).write_bytes(0, additional);
5208 #[allow(clippy::arithmetic_side_effects)]
5209 v.set_len(v.len() + additional);
5210 }
5211 }
5212
5213 #[cfg(test)]
5214 mod tests {
5215 use core::convert::TryFrom as _;
5216
5217 use super::*;
5218
5219 #[test]
5220 fn test_extend_vec_zeroed() {
5221 // Test extending when there is an existing allocation.
5222 let mut v = vec![100u64, 200, 300];
5223 extend_vec_zeroed(&mut v, 3);
5224 assert_eq!(v.len(), 6);
5225 assert_eq!(&*v, &[100, 200, 300, 0, 0, 0]);
5226 drop(v);
5227
5228 // Test extending when there is no existing allocation.
5229 let mut v: Vec<u64> = Vec::new();
5230 extend_vec_zeroed(&mut v, 3);
5231 assert_eq!(v.len(), 3);
5232 assert_eq!(&*v, &[0, 0, 0]);
5233 drop(v);
5234 }
5235
5236 #[test]
5237 fn test_extend_vec_zeroed_zst() {
5238 // Test extending when there is an existing (fake) allocation.
5239 let mut v = vec![(), (), ()];
5240 extend_vec_zeroed(&mut v, 3);
5241 assert_eq!(v.len(), 6);
5242 assert_eq!(&*v, &[(), (), (), (), (), ()]);
5243 drop(v);
5244
5245 // Test extending when there is no existing (fake) allocation.
5246 let mut v: Vec<()> = Vec::new();
5247 extend_vec_zeroed(&mut v, 3);
5248 assert_eq!(&*v, &[(), (), ()]);
5249 drop(v);
5250 }
5251
5252 #[test]
5253 fn test_insert_vec_zeroed() {
5254 // Insert at start (no existing allocation).
5255 let mut v: Vec<u64> = Vec::new();
5256 insert_vec_zeroed(&mut v, 0, 2);
5257 assert_eq!(v.len(), 2);
5258 assert_eq!(&*v, &[0, 0]);
5259 drop(v);
5260
5261 // Insert at start.
5262 let mut v = vec![100u64, 200, 300];
5263 insert_vec_zeroed(&mut v, 0, 2);
5264 assert_eq!(v.len(), 5);
5265 assert_eq!(&*v, &[0, 0, 100, 200, 300]);
5266 drop(v);
5267
5268 // Insert at middle.
5269 let mut v = vec![100u64, 200, 300];
5270 insert_vec_zeroed(&mut v, 1, 1);
5271 assert_eq!(v.len(), 4);
5272 assert_eq!(&*v, &[100, 0, 200, 300]);
5273 drop(v);
5274
5275 // Insert at end.
5276 let mut v = vec![100u64, 200, 300];
5277 insert_vec_zeroed(&mut v, 3, 1);
5278 assert_eq!(v.len(), 4);
5279 assert_eq!(&*v, &[100, 200, 300, 0]);
5280 drop(v);
5281 }
5282
5283 #[test]
5284 fn test_insert_vec_zeroed_zst() {
5285 // Insert at start (no existing fake allocation).
5286 let mut v: Vec<()> = Vec::new();
5287 insert_vec_zeroed(&mut v, 0, 2);
5288 assert_eq!(v.len(), 2);
5289 assert_eq!(&*v, &[(), ()]);
5290 drop(v);
5291
5292 // Insert at start.
5293 let mut v = vec![(), (), ()];
5294 insert_vec_zeroed(&mut v, 0, 2);
5295 assert_eq!(v.len(), 5);
5296 assert_eq!(&*v, &[(), (), (), (), ()]);
5297 drop(v);
5298
5299 // Insert at middle.
5300 let mut v = vec![(), (), ()];
5301 insert_vec_zeroed(&mut v, 1, 1);
5302 assert_eq!(v.len(), 4);
5303 assert_eq!(&*v, &[(), (), (), ()]);
5304 drop(v);
5305
5306 // Insert at end.
5307 let mut v = vec![(), (), ()];
5308 insert_vec_zeroed(&mut v, 3, 1);
5309 assert_eq!(v.len(), 4);
5310 assert_eq!(&*v, &[(), (), (), ()]);
5311 drop(v);
5312 }
5313
5314 #[test]
5315 fn test_new_box_zeroed() {
5316 assert_eq!(*u64::new_box_zeroed(), 0);
5317 }
5318
5319 #[test]
5320 fn test_new_box_zeroed_array() {
5321 drop(<[u32; 0x1000]>::new_box_zeroed());
5322 }
5323
5324 #[test]
5325 fn test_new_box_zeroed_zst() {
5326 // This test exists in order to exercise unsafe code, especially
5327 // when running under Miri.
5328 #[allow(clippy::unit_cmp)]
5329 {
5330 assert_eq!(*<()>::new_box_zeroed(), ());
5331 }
5332 }
5333
5334 #[test]
5335 fn test_new_box_slice_zeroed() {
5336 let mut s: Box<[u64]> = u64::new_box_slice_zeroed(3);
5337 assert_eq!(s.len(), 3);
5338 assert_eq!(&*s, &[0, 0, 0]);
5339 s[1] = 3;
5340 assert_eq!(&*s, &[0, 3, 0]);
5341 }
5342
5343 #[test]
5344 fn test_new_box_slice_zeroed_empty() {
5345 let s: Box<[u64]> = u64::new_box_slice_zeroed(0);
5346 assert_eq!(s.len(), 0);
5347 }
5348
5349 #[test]
5350 fn test_new_box_slice_zeroed_zst() {
5351 let mut s: Box<[()]> = <()>::new_box_slice_zeroed(3);
5352 assert_eq!(s.len(), 3);
5353 assert!(s.get(10).is_none());
5354 // This test exists in order to exercise unsafe code, especially
5355 // when running under Miri.
5356 #[allow(clippy::unit_cmp)]
5357 {
5358 assert_eq!(s[1], ());
5359 }
5360 s[2] = ();
5361 }
5362
5363 #[test]
5364 fn test_new_box_slice_zeroed_zst_empty() {
5365 let s: Box<[()]> = <()>::new_box_slice_zeroed(0);
5366 assert_eq!(s.len(), 0);
5367 }
5368
5369 #[test]
5370 #[should_panic(expected = "mem::size_of::<Self>() * len overflows `usize`")]
5371 fn test_new_box_slice_zeroed_panics_mul_overflow() {
5372 let _ = u16::new_box_slice_zeroed(usize::MAX);
5373 }
5374
5375 #[test]
5376 #[should_panic(expected = "assertion failed: size <= max_alloc")]
5377 fn test_new_box_slice_zeroed_panics_isize_overflow() {
5378 let max = usize::try_from(isize::MAX).unwrap();
5379 let _ = u16::new_box_slice_zeroed((max / mem::size_of::<u16>()) + 1);
5380 }
5381 }
5382}
5383
5384#[cfg(feature = "alloc")]
5385#[doc(inline)]
5386pub use alloc_support::*;
5387
5388#[cfg(test)]
5389mod tests {
5390 #![allow(clippy::unreadable_literal)]
5391
5392 use core::{cell::UnsafeCell, convert::TryInto as _, ops::Deref};
5393
5394 use static_assertions::assert_impl_all;
5395
5396 use super::*;
5397 use crate::util::testutil::*;
5398
5399 // An unsized type.
5400 //
5401 // This is used to test the custom derives of our traits. The `[u8]` type
5402 // gets a hand-rolled impl, so it doesn't exercise our custom derives.
5403 #[derive(Debug, Eq, PartialEq, FromZeroes, FromBytes, AsBytes, Unaligned)]
5404 #[repr(transparent)]
5405 struct Unsized([u8]);
5406
5407 impl Unsized {
5408 fn from_mut_slice(slc: &mut [u8]) -> &mut Unsized {
5409 // SAFETY: This *probably* sound - since the layouts of `[u8]` and
5410 // `Unsized` are the same, so are the layouts of `&mut [u8]` and
5411 // `&mut Unsized`. [1] Even if it turns out that this isn't actually
5412 // guaranteed by the language spec, we can just change this since
5413 // it's in test code.
5414 //
5415 // [1] https://github.com/rust-lang/unsafe-code-guidelines/issues/375
5416 unsafe { mem::transmute(slc) }
5417 }
5418 }
5419
5420 /// Tests of when a sized `DstLayout` is extended with a sized field.
5421 #[allow(clippy::decimal_literal_representation)]
5422 #[test]
5423 fn test_dst_layout_extend_sized_with_sized() {
5424 // This macro constructs a layout corresponding to a `u8` and extends it
5425 // with a zero-sized trailing field of given alignment `n`. The macro
5426 // tests that the resulting layout has both size and alignment `min(n,
5427 // P)` for all valid values of `repr(packed(P))`.
5428 macro_rules! test_align_is_size {
5429 ($n:expr) => {
5430 let base = DstLayout::for_type::<u8>();
5431 let trailing_field = DstLayout::for_type::<elain::Align<$n>>();
5432
5433 let packs =
5434 core::iter::once(None).chain((0..29).map(|p| NonZeroUsize::new(2usize.pow(p))));
5435
5436 for pack in packs {
5437 let composite = base.extend(trailing_field, pack);
5438 let max_align = pack.unwrap_or(DstLayout::CURRENT_MAX_ALIGN);
5439 let align = $n.min(max_align.get());
5440 assert_eq!(
5441 composite,
5442 DstLayout {
5443 align: NonZeroUsize::new(align).unwrap(),
5444 size_info: SizeInfo::Sized { _size: align }
5445 }
5446 )
5447 }
5448 };
5449 }
5450
5451 test_align_is_size!(1);
5452 test_align_is_size!(2);
5453 test_align_is_size!(4);
5454 test_align_is_size!(8);
5455 test_align_is_size!(16);
5456 test_align_is_size!(32);
5457 test_align_is_size!(64);
5458 test_align_is_size!(128);
5459 test_align_is_size!(256);
5460 test_align_is_size!(512);
5461 test_align_is_size!(1024);
5462 test_align_is_size!(2048);
5463 test_align_is_size!(4096);
5464 test_align_is_size!(8192);
5465 test_align_is_size!(16384);
5466 test_align_is_size!(32768);
5467 test_align_is_size!(65536);
5468 test_align_is_size!(131072);
5469 test_align_is_size!(262144);
5470 test_align_is_size!(524288);
5471 test_align_is_size!(1048576);
5472 test_align_is_size!(2097152);
5473 test_align_is_size!(4194304);
5474 test_align_is_size!(8388608);
5475 test_align_is_size!(16777216);
5476 test_align_is_size!(33554432);
5477 test_align_is_size!(67108864);
5478 test_align_is_size!(33554432);
5479 test_align_is_size!(134217728);
5480 test_align_is_size!(268435456);
5481 }
5482
5483 /// Tests of when a sized `DstLayout` is extended with a DST field.
5484 #[test]
5485 fn test_dst_layout_extend_sized_with_dst() {
5486 // Test that for all combinations of real-world alignments and
5487 // `repr_packed` values, that the extension of a sized `DstLayout`` with
5488 // a DST field correctly computes the trailing offset in the composite
5489 // layout.
5490
5491 let aligns = (0..29).map(|p| NonZeroUsize::new(2usize.pow(p)).unwrap());
5492 let packs = core::iter::once(None).chain(aligns.clone().map(Some));
5493
5494 for align in aligns {
5495 for pack in packs.clone() {
5496 let base = DstLayout::for_type::<u8>();
5497 let elem_size = 42;
5498 let trailing_field_offset = 11;
5499
5500 let trailing_field = DstLayout {
5501 align,
5502 size_info: SizeInfo::SliceDst(TrailingSliceLayout {
5503 _elem_size: elem_size,
5504 _offset: 11,
5505 }),
5506 };
5507
5508 let composite = base.extend(trailing_field, pack);
5509
5510 let max_align = pack.unwrap_or(DstLayout::CURRENT_MAX_ALIGN).get();
5511
5512 let align = align.get().min(max_align);
5513
5514 assert_eq!(
5515 composite,
5516 DstLayout {
5517 align: NonZeroUsize::new(align).unwrap(),
5518 size_info: SizeInfo::SliceDst(TrailingSliceLayout {
5519 _elem_size: elem_size,
5520 _offset: align + trailing_field_offset,
5521 }),
5522 }
5523 )
5524 }
5525 }
5526 }
5527
5528 /// Tests that calling `pad_to_align` on a sized `DstLayout` adds the
5529 /// expected amount of trailing padding.
5530 #[test]
5531 fn test_dst_layout_pad_to_align_with_sized() {
5532 // For all valid alignments `align`, construct a one-byte layout aligned
5533 // to `align`, call `pad_to_align`, and assert that the size of the
5534 // resulting layout is equal to `align`.
5535 for align in (0..29).map(|p| NonZeroUsize::new(2usize.pow(p)).unwrap()) {
5536 let layout = DstLayout { align, size_info: SizeInfo::Sized { _size: 1 } };
5537
5538 assert_eq!(
5539 layout.pad_to_align(),
5540 DstLayout { align, size_info: SizeInfo::Sized { _size: align.get() } }
5541 );
5542 }
5543
5544 // Test explicitly-provided combinations of unpadded and padded
5545 // counterparts.
5546
5547 macro_rules! test {
5548 (unpadded { size: $unpadded_size:expr, align: $unpadded_align:expr }
5549 => padded { size: $padded_size:expr, align: $padded_align:expr }) => {
5550 let unpadded = DstLayout {
5551 align: NonZeroUsize::new($unpadded_align).unwrap(),
5552 size_info: SizeInfo::Sized { _size: $unpadded_size },
5553 };
5554 let padded = unpadded.pad_to_align();
5555
5556 assert_eq!(
5557 padded,
5558 DstLayout {
5559 align: NonZeroUsize::new($padded_align).unwrap(),
5560 size_info: SizeInfo::Sized { _size: $padded_size },
5561 }
5562 );
5563 };
5564 }
5565
5566 test!(unpadded { size: 0, align: 4 } => padded { size: 0, align: 4 });
5567 test!(unpadded { size: 1, align: 4 } => padded { size: 4, align: 4 });
5568 test!(unpadded { size: 2, align: 4 } => padded { size: 4, align: 4 });
5569 test!(unpadded { size: 3, align: 4 } => padded { size: 4, align: 4 });
5570 test!(unpadded { size: 4, align: 4 } => padded { size: 4, align: 4 });
5571 test!(unpadded { size: 5, align: 4 } => padded { size: 8, align: 4 });
5572 test!(unpadded { size: 6, align: 4 } => padded { size: 8, align: 4 });
5573 test!(unpadded { size: 7, align: 4 } => padded { size: 8, align: 4 });
5574 test!(unpadded { size: 8, align: 4 } => padded { size: 8, align: 4 });
5575
5576 let current_max_align = DstLayout::CURRENT_MAX_ALIGN.get();
5577
5578 test!(unpadded { size: 1, align: current_max_align }
5579 => padded { size: current_max_align, align: current_max_align });
5580
5581 test!(unpadded { size: current_max_align + 1, align: current_max_align }
5582 => padded { size: current_max_align * 2, align: current_max_align });
5583 }
5584
5585 /// Tests that calling `pad_to_align` on a DST `DstLayout` is a no-op.
5586 #[test]
5587 fn test_dst_layout_pad_to_align_with_dst() {
5588 for align in (0..29).map(|p| NonZeroUsize::new(2usize.pow(p)).unwrap()) {
5589 for offset in 0..10 {
5590 for elem_size in 0..10 {
5591 let layout = DstLayout {
5592 align,
5593 size_info: SizeInfo::SliceDst(TrailingSliceLayout {
5594 _offset: offset,
5595 _elem_size: elem_size,
5596 }),
5597 };
5598 assert_eq!(layout.pad_to_align(), layout);
5599 }
5600 }
5601 }
5602 }
5603
5604 // This test takes a long time when running under Miri, so we skip it in
5605 // that case. This is acceptable because this is a logic test that doesn't
5606 // attempt to expose UB.
5607 #[test]
5608 #[cfg_attr(miri, ignore)]
5609 fn testvalidate_cast_and_convert_metadata() {
5610 impl From<usize> for SizeInfo {
5611 fn from(_size: usize) -> SizeInfo {
5612 SizeInfo::Sized { _size }
5613 }
5614 }
5615
5616 impl From<(usize, usize)> for SizeInfo {
5617 fn from((_offset, _elem_size): (usize, usize)) -> SizeInfo {
5618 SizeInfo::SliceDst(TrailingSliceLayout { _offset, _elem_size })
5619 }
5620 }
5621
5622 fn layout<S: Into<SizeInfo>>(s: S, align: usize) -> DstLayout {
5623 DstLayout { size_info: s.into(), align: NonZeroUsize::new(align).unwrap() }
5624 }
5625
5626 /// This macro accepts arguments in the form of:
5627 ///
5628 /// layout(_, _, _).validate(_, _, _), Ok(Some((_, _)))
5629 /// | | | | | | | |
5630 /// base_size ----+ | | | | | | |
5631 /// align -----------+ | | | | | |
5632 /// trailing_size ------+ | | | | |
5633 /// addr ---------------------------+ | | | |
5634 /// bytes_len -------------------------+ | | |
5635 /// cast_type ----------------------------+ | |
5636 /// elems ---------------------------------------------+ |
5637 /// split_at ---------------------------------------------+
5638 ///
5639 /// `.validate` is shorthand for `.validate_cast_and_convert_metadata`
5640 /// for brevity.
5641 ///
5642 /// Each argument can either be an iterator or a wildcard. Each
5643 /// wildcarded variable is implicitly replaced by an iterator over a
5644 /// representative sample of values for that variable. Each `test!`
5645 /// invocation iterates over every combination of values provided by
5646 /// each variable's iterator (ie, the cartesian product) and validates
5647 /// that the results are expected.
5648 ///
5649 /// The final argument uses the same syntax, but it has a different
5650 /// meaning:
5651 /// - If it is `Ok(pat)`, then the pattern `pat` is supplied to
5652 /// `assert_matches!` to validate the computed result for each
5653 /// combination of input values.
5654 /// - If it is `Err(msg)`, then `test!` validates that the call to
5655 /// `validate_cast_and_convert_metadata` panics with the given panic
5656 /// message.
5657 ///
5658 /// Note that the meta-variables that match these variables have the
5659 /// `tt` type, and some valid expressions are not valid `tt`s (such as
5660 /// `a..b`). In this case, wrap the expression in parentheses, and it
5661 /// will become valid `tt`.
5662 macro_rules! test {
5663 ($(:$sizes:expr =>)?
5664 layout($size:tt, $align:tt)
5665 .validate($addr:tt, $bytes_len:tt, $cast_type:tt), $expect:pat $(,)?
5666 ) => {
5667 itertools::iproduct!(
5668 test!(@generate_size $size),
5669 test!(@generate_align $align),
5670 test!(@generate_usize $addr),
5671 test!(@generate_usize $bytes_len),
5672 test!(@generate_cast_type $cast_type)
5673 ).for_each(|(size_info, align, addr, bytes_len, cast_type)| {
5674 // Temporarily disable the panic hook installed by the test
5675 // harness. If we don't do this, all panic messages will be
5676 // kept in an internal log. On its own, this isn't a
5677 // problem, but if a non-caught panic ever happens (ie, in
5678 // code later in this test not in this macro), all of the
5679 // previously-buffered messages will be dumped, hiding the
5680 // real culprit.
5681 let previous_hook = std::panic::take_hook();
5682 // I don't understand why, but this seems to be required in
5683 // addition to the previous line.
5684 std::panic::set_hook(Box::new(|_| {}));
5685 let actual = std::panic::catch_unwind(|| {
5686 layout(size_info, align).validate_cast_and_convert_metadata(addr, bytes_len, cast_type)
5687 }).map_err(|d| {
5688 *d.downcast::<&'static str>().expect("expected string panic message").as_ref()
5689 });
5690 std::panic::set_hook(previous_hook);
5691
5692 assert_matches::assert_matches!(
5693 actual, $expect,
5694 "layout({size_info:?}, {align}).validate_cast_and_convert_metadata({addr}, {bytes_len}, {cast_type:?})",
5695 );
5696 });
5697 };
5698 (@generate_usize _) => { 0..8 };
5699 // Generate sizes for both Sized and !Sized types.
5700 (@generate_size _) => {
5701 test!(@generate_size (_)).chain(test!(@generate_size (_, _)))
5702 };
5703 // Generate sizes for both Sized and !Sized types by chaining
5704 // specified iterators for each.
5705 (@generate_size ($sized_sizes:tt | $unsized_sizes:tt)) => {
5706 test!(@generate_size ($sized_sizes)).chain(test!(@generate_size $unsized_sizes))
5707 };
5708 // Generate sizes for Sized types.
5709 (@generate_size (_)) => { test!(@generate_size (0..8)) };
5710 (@generate_size ($sizes:expr)) => { $sizes.into_iter().map(Into::<SizeInfo>::into) };
5711 // Generate sizes for !Sized types.
5712 (@generate_size ($min_sizes:tt, $elem_sizes:tt)) => {
5713 itertools::iproduct!(
5714 test!(@generate_min_size $min_sizes),
5715 test!(@generate_elem_size $elem_sizes)
5716 ).map(Into::<SizeInfo>::into)
5717 };
5718 (@generate_fixed_size _) => { (0..8).into_iter().map(Into::<SizeInfo>::into) };
5719 (@generate_min_size _) => { 0..8 };
5720 (@generate_elem_size _) => { 1..8 };
5721 (@generate_align _) => { [1, 2, 4, 8, 16] };
5722 (@generate_opt_usize _) => { [None].into_iter().chain((0..8).map(Some).into_iter()) };
5723 (@generate_cast_type _) => { [_CastType::_Prefix, _CastType::_Suffix] };
5724 (@generate_cast_type $variant:ident) => { [_CastType::$variant] };
5725 // Some expressions need to be wrapped in parentheses in order to be
5726 // valid `tt`s (required by the top match pattern). See the comment
5727 // below for more details. This arm removes these parentheses to
5728 // avoid generating an `unused_parens` warning.
5729 (@$_:ident ($vals:expr)) => { $vals };
5730 (@$_:ident $vals:expr) => { $vals };
5731 }
5732
5733 const EVENS: [usize; 8] = [0, 2, 4, 6, 8, 10, 12, 14];
5734 const ODDS: [usize; 8] = [1, 3, 5, 7, 9, 11, 13, 15];
5735
5736 // base_size is too big for the memory region.
5737 test!(layout(((1..8) | ((1..8), (1..8))), _).validate(_, [0], _), Ok(None));
5738 test!(layout(((2..8) | ((2..8), (2..8))), _).validate(_, [1], _), Ok(None));
5739
5740 // addr is unaligned for prefix cast
5741 test!(layout(_, [2]).validate(ODDS, _, _Prefix), Ok(None));
5742 test!(layout(_, [2]).validate(ODDS, _, _Prefix), Ok(None));
5743
5744 // addr is aligned, but end of buffer is unaligned for suffix cast
5745 test!(layout(_, [2]).validate(EVENS, ODDS, _Suffix), Ok(None));
5746 test!(layout(_, [2]).validate(EVENS, ODDS, _Suffix), Ok(None));
5747
5748 // Unfortunately, these constants cannot easily be used in the
5749 // implementation of `validate_cast_and_convert_metadata`, since
5750 // `panic!` consumes a string literal, not an expression.
5751 //
5752 // It's important that these messages be in a separate module. If they
5753 // were at the function's top level, we'd pass them to `test!` as, e.g.,
5754 // `Err(TRAILING)`, which would run into a subtle Rust footgun - the
5755 // `TRAILING` identifier would be treated as a pattern to match rather
5756 // than a value to check for equality.
5757 mod msgs {
5758 pub(super) const TRAILING: &str =
5759 "attempted to cast to slice type with zero-sized element";
5760 pub(super) const OVERFLOW: &str = "`addr` + `bytes_len` > usize::MAX";
5761 }
5762
5763 // casts with ZST trailing element types are unsupported
5764 test!(layout((_, [0]), _).validate(_, _, _), Err(msgs::TRAILING),);
5765
5766 // addr + bytes_len must not overflow usize
5767 test!(layout(_, _).validate([usize::MAX], (1..100), _), Err(msgs::OVERFLOW));
5768 test!(layout(_, _).validate((1..100), [usize::MAX], _), Err(msgs::OVERFLOW));
5769 test!(
5770 layout(_, _).validate(
5771 [usize::MAX / 2 + 1, usize::MAX],
5772 [usize::MAX / 2 + 1, usize::MAX],
5773 _
5774 ),
5775 Err(msgs::OVERFLOW)
5776 );
5777
5778 // Validates that `validate_cast_and_convert_metadata` satisfies its own
5779 // documented safety postconditions, and also a few other properties
5780 // that aren't documented but we want to guarantee anyway.
5781 fn validate_behavior(
5782 (layout, addr, bytes_len, cast_type): (DstLayout, usize, usize, _CastType),
5783 ) {
5784 if let Some((elems, split_at)) =
5785 layout.validate_cast_and_convert_metadata(addr, bytes_len, cast_type)
5786 {
5787 let (size_info, align) = (layout.size_info, layout.align);
5788 let debug_str = format!(
5789 "layout({size_info:?}, {align}).validate_cast_and_convert_metadata({addr}, {bytes_len}, {cast_type:?}) => ({elems}, {split_at})",
5790 );
5791
5792 // If this is a sized type (no trailing slice), then `elems` is
5793 // meaningless, but in practice we set it to 0. Callers are not
5794 // allowed to rely on this, but a lot of math is nicer if
5795 // they're able to, and some callers might accidentally do that.
5796 let sized = matches!(layout.size_info, SizeInfo::Sized { .. });
5797 assert!(!(sized && elems != 0), "{}", debug_str);
5798
5799 let resulting_size = match layout.size_info {
5800 SizeInfo::Sized { _size } => _size,
5801 SizeInfo::SliceDst(TrailingSliceLayout {
5802 _offset: offset,
5803 _elem_size: elem_size,
5804 }) => {
5805 let padded_size = |elems| {
5806 let without_padding = offset + elems * elem_size;
5807 without_padding
5808 + util::core_layout::padding_needed_for(without_padding, align)
5809 };
5810
5811 let resulting_size = padded_size(elems);
5812 // Test that `validate_cast_and_convert_metadata`
5813 // computed the largest possible value that fits in the
5814 // given range.
5815 assert!(padded_size(elems + 1) > bytes_len, "{}", debug_str);
5816 resulting_size
5817 }
5818 };
5819
5820 // Test safety postconditions guaranteed by
5821 // `validate_cast_and_convert_metadata`.
5822 assert!(resulting_size <= bytes_len, "{}", debug_str);
5823 match cast_type {
5824 _CastType::_Prefix => {
5825 assert_eq!(addr % align, 0, "{}", debug_str);
5826 assert_eq!(resulting_size, split_at, "{}", debug_str);
5827 }
5828 _CastType::_Suffix => {
5829 assert_eq!(split_at, bytes_len - resulting_size, "{}", debug_str);
5830 assert_eq!((addr + split_at) % align, 0, "{}", debug_str);
5831 }
5832 }
5833 } else {
5834 let min_size = match layout.size_info {
5835 SizeInfo::Sized { _size } => _size,
5836 SizeInfo::SliceDst(TrailingSliceLayout { _offset, .. }) => {
5837 _offset + util::core_layout::padding_needed_for(_offset, layout.align)
5838 }
5839 };
5840
5841 // If a cast is invalid, it is either because...
5842 // 1. there are insufficent bytes at the given region for type:
5843 let insufficient_bytes = bytes_len < min_size;
5844 // 2. performing the cast would misalign type:
5845 let base = match cast_type {
5846 _CastType::_Prefix => 0,
5847 _CastType::_Suffix => bytes_len,
5848 };
5849 let misaligned = (base + addr) % layout.align != 0;
5850
5851 assert!(insufficient_bytes || misaligned);
5852 }
5853 }
5854
5855 let sizes = 0..8;
5856 let elem_sizes = 1..8;
5857 let size_infos = sizes
5858 .clone()
5859 .map(Into::<SizeInfo>::into)
5860 .chain(itertools::iproduct!(sizes, elem_sizes).map(Into::<SizeInfo>::into));
5861 let layouts = itertools::iproduct!(size_infos, [1, 2, 4, 8, 16, 32])
5862 .filter(|(size_info, align)| !matches!(size_info, SizeInfo::Sized { _size } if _size % align != 0))
5863 .map(|(size_info, align)| layout(size_info, align));
5864 itertools::iproduct!(layouts, 0..8, 0..8, [_CastType::_Prefix, _CastType::_Suffix])
5865 .for_each(validate_behavior);
5866 }
5867
5868 #[test]
5869 #[cfg(__INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS)]
5870 fn test_validate_rust_layout() {
5871 use core::ptr::NonNull;
5872
5873 // This test synthesizes pointers with various metadata and uses Rust's
5874 // built-in APIs to confirm that Rust makes decisions about type layout
5875 // which are consistent with what we believe is guaranteed by the
5876 // language. If this test fails, it doesn't just mean our code is wrong
5877 // - it means we're misunderstanding the language's guarantees.
5878
5879 #[derive(Debug)]
5880 struct MacroArgs {
5881 offset: usize,
5882 align: NonZeroUsize,
5883 elem_size: Option<usize>,
5884 }
5885
5886 /// # Safety
5887 ///
5888 /// `test` promises to only call `addr_of_slice_field` on a `NonNull<T>`
5889 /// which points to a valid `T`.
5890 ///
5891 /// `with_elems` must produce a pointer which points to a valid `T`.
5892 fn test<T: ?Sized, W: Fn(usize) -> NonNull<T>>(
5893 args: MacroArgs,
5894 with_elems: W,
5895 addr_of_slice_field: Option<fn(NonNull<T>) -> NonNull<u8>>,
5896 ) {
5897 let dst = args.elem_size.is_some();
5898 let layout = {
5899 let size_info = match args.elem_size {
5900 Some(elem_size) => SizeInfo::SliceDst(TrailingSliceLayout {
5901 _offset: args.offset,
5902 _elem_size: elem_size,
5903 }),
5904 None => SizeInfo::Sized {
5905 // Rust only supports types whose sizes are a multiple
5906 // of their alignment. If the macro created a type like
5907 // this:
5908 //
5909 // #[repr(C, align(2))]
5910 // struct Foo([u8; 1]);
5911 //
5912 // ...then Rust will automatically round the type's size
5913 // up to 2.
5914 _size: args.offset
5915 + util::core_layout::padding_needed_for(args.offset, args.align),
5916 },
5917 };
5918 DstLayout { size_info, align: args.align }
5919 };
5920
5921 for elems in 0..128 {
5922 let ptr = with_elems(elems);
5923
5924 if let Some(addr_of_slice_field) = addr_of_slice_field {
5925 let slc_field_ptr = addr_of_slice_field(ptr).as_ptr();
5926 // SAFETY: Both `slc_field_ptr` and `ptr` are pointers to
5927 // the same valid Rust object.
5928 let offset: usize =
5929 unsafe { slc_field_ptr.byte_offset_from(ptr.as_ptr()).try_into().unwrap() };
5930 assert_eq!(offset, args.offset);
5931 }
5932
5933 // SAFETY: `ptr` points to a valid `T`.
5934 let (size, align) = unsafe {
5935 (mem::size_of_val_raw(ptr.as_ptr()), mem::align_of_val_raw(ptr.as_ptr()))
5936 };
5937
5938 // Avoid expensive allocation when running under Miri.
5939 let assert_msg = if !cfg!(miri) {
5940 format!("\n{args:?}\nsize:{size}, align:{align}")
5941 } else {
5942 String::new()
5943 };
5944
5945 let without_padding =
5946 args.offset + args.elem_size.map(|elem_size| elems * elem_size).unwrap_or(0);
5947 assert!(size >= without_padding, "{}", assert_msg);
5948 assert_eq!(align, args.align.get(), "{}", assert_msg);
5949
5950 // This encodes the most important part of the test: our
5951 // understanding of how Rust determines the layout of repr(C)
5952 // types. Sized repr(C) types are trivial, but DST types have
5953 // some subtlety. Note that:
5954 // - For sized types, `without_padding` is just the size of the
5955 // type that we constructed for `Foo`. Since we may have
5956 // requested a larger alignment, `Foo` may actually be larger
5957 // than this, hence `padding_needed_for`.
5958 // - For unsized types, `without_padding` is dynamically
5959 // computed from the offset, the element size, and element
5960 // count. We expect that the size of the object should be
5961 // `offset + elem_size * elems` rounded up to the next
5962 // alignment.
5963 let expected_size = without_padding
5964 + util::core_layout::padding_needed_for(without_padding, args.align);
5965 assert_eq!(expected_size, size, "{}", assert_msg);
5966
5967 // For zero-sized element types,
5968 // `validate_cast_and_convert_metadata` just panics, so we skip
5969 // testing those types.
5970 if args.elem_size.map(|elem_size| elem_size > 0).unwrap_or(true) {
5971 let addr = ptr.addr().get();
5972 let (got_elems, got_split_at) = layout
5973 .validate_cast_and_convert_metadata(addr, size, _CastType::_Prefix)
5974 .unwrap();
5975 // Avoid expensive allocation when running under Miri.
5976 let assert_msg = if !cfg!(miri) {
5977 format!(
5978 "{}\nvalidate_cast_and_convert_metadata({addr}, {size})",
5979 assert_msg
5980 )
5981 } else {
5982 String::new()
5983 };
5984 assert_eq!(got_split_at, size, "{}", assert_msg);
5985 if dst {
5986 assert!(got_elems >= elems, "{}", assert_msg);
5987 if got_elems != elems {
5988 // If `validate_cast_and_convert_metadata`
5989 // returned more elements than `elems`, that
5990 // means that `elems` is not the maximum number
5991 // of elements that can fit in `size` - in other
5992 // words, there is enough padding at the end of
5993 // the value to fit at least one more element.
5994 // If we use this metadata to synthesize a
5995 // pointer, despite having a different element
5996 // count, we still expect it to have the same
5997 // size.
5998 let got_ptr = with_elems(got_elems);
5999 // SAFETY: `got_ptr` is a pointer to a valid `T`.
6000 let size_of_got_ptr = unsafe { mem::size_of_val_raw(got_ptr.as_ptr()) };
6001 assert_eq!(size_of_got_ptr, size, "{}", assert_msg);
6002 }
6003 } else {
6004 // For sized casts, the returned element value is
6005 // technically meaningless, and we don't guarantee any
6006 // particular value. In practice, it's always zero.
6007 assert_eq!(got_elems, 0, "{}", assert_msg)
6008 }
6009 }
6010 }
6011 }
6012
6013 macro_rules! validate_against_rust {
6014 ($offset:literal, $align:literal $(, $elem_size:literal)?) => {{
6015 #[repr(C, align($align))]
6016 struct Foo([u8; $offset]$(, [[u8; $elem_size]])?);
6017
6018 let args = MacroArgs {
6019 offset: $offset,
6020 align: $align.try_into().unwrap(),
6021 elem_size: {
6022 #[allow(unused)]
6023 let ret = None::<usize>;
6024 $(let ret = Some($elem_size);)?
6025 ret
6026 }
6027 };
6028
6029 #[repr(C, align($align))]
6030 struct FooAlign;
6031 // Create an aligned buffer to use in order to synthesize
6032 // pointers to `Foo`. We don't ever load values from these
6033 // pointers - we just do arithmetic on them - so having a "real"
6034 // block of memory as opposed to a validly-aligned-but-dangling
6035 // pointer is only necessary to make Miri happy since we run it
6036 // with "strict provenance" checking enabled.
6037 let aligned_buf = Align::<_, FooAlign>::new([0u8; 1024]);
6038 let with_elems = |elems| {
6039 let slc = NonNull::slice_from_raw_parts(NonNull::from(&aligned_buf.t), elems);
6040 #[allow(clippy::as_conversions)]
6041 NonNull::new(slc.as_ptr() as *mut Foo).unwrap()
6042 };
6043 let addr_of_slice_field = {
6044 #[allow(unused)]
6045 let f = None::<fn(NonNull<Foo>) -> NonNull<u8>>;
6046 $(
6047 // SAFETY: `test` promises to only call `f` with a `ptr`
6048 // to a valid `Foo`.
6049 let f: Option<fn(NonNull<Foo>) -> NonNull<u8>> = Some(|ptr: NonNull<Foo>| unsafe {
6050 NonNull::new(ptr::addr_of_mut!((*ptr.as_ptr()).1)).unwrap().cast::<u8>()
6051 });
6052 let _ = $elem_size;
6053 )?
6054 f
6055 };
6056
6057 test::<Foo, _>(args, with_elems, addr_of_slice_field);
6058 }};
6059 }
6060
6061 // Every permutation of:
6062 // - offset in [0, 4]
6063 // - align in [1, 16]
6064 // - elem_size in [0, 4] (plus no elem_size)
6065 validate_against_rust!(0, 1);
6066 validate_against_rust!(0, 1, 0);
6067 validate_against_rust!(0, 1, 1);
6068 validate_against_rust!(0, 1, 2);
6069 validate_against_rust!(0, 1, 3);
6070 validate_against_rust!(0, 1, 4);
6071 validate_against_rust!(0, 2);
6072 validate_against_rust!(0, 2, 0);
6073 validate_against_rust!(0, 2, 1);
6074 validate_against_rust!(0, 2, 2);
6075 validate_against_rust!(0, 2, 3);
6076 validate_against_rust!(0, 2, 4);
6077 validate_against_rust!(0, 4);
6078 validate_against_rust!(0, 4, 0);
6079 validate_against_rust!(0, 4, 1);
6080 validate_against_rust!(0, 4, 2);
6081 validate_against_rust!(0, 4, 3);
6082 validate_against_rust!(0, 4, 4);
6083 validate_against_rust!(0, 8);
6084 validate_against_rust!(0, 8, 0);
6085 validate_against_rust!(0, 8, 1);
6086 validate_against_rust!(0, 8, 2);
6087 validate_against_rust!(0, 8, 3);
6088 validate_against_rust!(0, 8, 4);
6089 validate_against_rust!(0, 16);
6090 validate_against_rust!(0, 16, 0);
6091 validate_against_rust!(0, 16, 1);
6092 validate_against_rust!(0, 16, 2);
6093 validate_against_rust!(0, 16, 3);
6094 validate_against_rust!(0, 16, 4);
6095 validate_against_rust!(1, 1);
6096 validate_against_rust!(1, 1, 0);
6097 validate_against_rust!(1, 1, 1);
6098 validate_against_rust!(1, 1, 2);
6099 validate_against_rust!(1, 1, 3);
6100 validate_against_rust!(1, 1, 4);
6101 validate_against_rust!(1, 2);
6102 validate_against_rust!(1, 2, 0);
6103 validate_against_rust!(1, 2, 1);
6104 validate_against_rust!(1, 2, 2);
6105 validate_against_rust!(1, 2, 3);
6106 validate_against_rust!(1, 2, 4);
6107 validate_against_rust!(1, 4);
6108 validate_against_rust!(1, 4, 0);
6109 validate_against_rust!(1, 4, 1);
6110 validate_against_rust!(1, 4, 2);
6111 validate_against_rust!(1, 4, 3);
6112 validate_against_rust!(1, 4, 4);
6113 validate_against_rust!(1, 8);
6114 validate_against_rust!(1, 8, 0);
6115 validate_against_rust!(1, 8, 1);
6116 validate_against_rust!(1, 8, 2);
6117 validate_against_rust!(1, 8, 3);
6118 validate_against_rust!(1, 8, 4);
6119 validate_against_rust!(1, 16);
6120 validate_against_rust!(1, 16, 0);
6121 validate_against_rust!(1, 16, 1);
6122 validate_against_rust!(1, 16, 2);
6123 validate_against_rust!(1, 16, 3);
6124 validate_against_rust!(1, 16, 4);
6125 validate_against_rust!(2, 1);
6126 validate_against_rust!(2, 1, 0);
6127 validate_against_rust!(2, 1, 1);
6128 validate_against_rust!(2, 1, 2);
6129 validate_against_rust!(2, 1, 3);
6130 validate_against_rust!(2, 1, 4);
6131 validate_against_rust!(2, 2);
6132 validate_against_rust!(2, 2, 0);
6133 validate_against_rust!(2, 2, 1);
6134 validate_against_rust!(2, 2, 2);
6135 validate_against_rust!(2, 2, 3);
6136 validate_against_rust!(2, 2, 4);
6137 validate_against_rust!(2, 4);
6138 validate_against_rust!(2, 4, 0);
6139 validate_against_rust!(2, 4, 1);
6140 validate_against_rust!(2, 4, 2);
6141 validate_against_rust!(2, 4, 3);
6142 validate_against_rust!(2, 4, 4);
6143 validate_against_rust!(2, 8);
6144 validate_against_rust!(2, 8, 0);
6145 validate_against_rust!(2, 8, 1);
6146 validate_against_rust!(2, 8, 2);
6147 validate_against_rust!(2, 8, 3);
6148 validate_against_rust!(2, 8, 4);
6149 validate_against_rust!(2, 16);
6150 validate_against_rust!(2, 16, 0);
6151 validate_against_rust!(2, 16, 1);
6152 validate_against_rust!(2, 16, 2);
6153 validate_against_rust!(2, 16, 3);
6154 validate_against_rust!(2, 16, 4);
6155 validate_against_rust!(3, 1);
6156 validate_against_rust!(3, 1, 0);
6157 validate_against_rust!(3, 1, 1);
6158 validate_against_rust!(3, 1, 2);
6159 validate_against_rust!(3, 1, 3);
6160 validate_against_rust!(3, 1, 4);
6161 validate_against_rust!(3, 2);
6162 validate_against_rust!(3, 2, 0);
6163 validate_against_rust!(3, 2, 1);
6164 validate_against_rust!(3, 2, 2);
6165 validate_against_rust!(3, 2, 3);
6166 validate_against_rust!(3, 2, 4);
6167 validate_against_rust!(3, 4);
6168 validate_against_rust!(3, 4, 0);
6169 validate_against_rust!(3, 4, 1);
6170 validate_against_rust!(3, 4, 2);
6171 validate_against_rust!(3, 4, 3);
6172 validate_against_rust!(3, 4, 4);
6173 validate_against_rust!(3, 8);
6174 validate_against_rust!(3, 8, 0);
6175 validate_against_rust!(3, 8, 1);
6176 validate_against_rust!(3, 8, 2);
6177 validate_against_rust!(3, 8, 3);
6178 validate_against_rust!(3, 8, 4);
6179 validate_against_rust!(3, 16);
6180 validate_against_rust!(3, 16, 0);
6181 validate_against_rust!(3, 16, 1);
6182 validate_against_rust!(3, 16, 2);
6183 validate_against_rust!(3, 16, 3);
6184 validate_against_rust!(3, 16, 4);
6185 validate_against_rust!(4, 1);
6186 validate_against_rust!(4, 1, 0);
6187 validate_against_rust!(4, 1, 1);
6188 validate_against_rust!(4, 1, 2);
6189 validate_against_rust!(4, 1, 3);
6190 validate_against_rust!(4, 1, 4);
6191 validate_against_rust!(4, 2);
6192 validate_against_rust!(4, 2, 0);
6193 validate_against_rust!(4, 2, 1);
6194 validate_against_rust!(4, 2, 2);
6195 validate_against_rust!(4, 2, 3);
6196 validate_against_rust!(4, 2, 4);
6197 validate_against_rust!(4, 4);
6198 validate_against_rust!(4, 4, 0);
6199 validate_against_rust!(4, 4, 1);
6200 validate_against_rust!(4, 4, 2);
6201 validate_against_rust!(4, 4, 3);
6202 validate_against_rust!(4, 4, 4);
6203 validate_against_rust!(4, 8);
6204 validate_against_rust!(4, 8, 0);
6205 validate_against_rust!(4, 8, 1);
6206 validate_against_rust!(4, 8, 2);
6207 validate_against_rust!(4, 8, 3);
6208 validate_against_rust!(4, 8, 4);
6209 validate_against_rust!(4, 16);
6210 validate_against_rust!(4, 16, 0);
6211 validate_against_rust!(4, 16, 1);
6212 validate_against_rust!(4, 16, 2);
6213 validate_against_rust!(4, 16, 3);
6214 validate_against_rust!(4, 16, 4);
6215 }
6216
6217 #[test]
6218 fn test_known_layout() {
6219 // Test that `$ty` and `ManuallyDrop<$ty>` have the expected layout.
6220 // Test that `PhantomData<$ty>` has the same layout as `()` regardless
6221 // of `$ty`.
6222 macro_rules! test {
6223 ($ty:ty, $expect:expr) => {
6224 let expect = $expect;
6225 assert_eq!(<$ty as KnownLayout>::LAYOUT, expect);
6226 assert_eq!(<ManuallyDrop<$ty> as KnownLayout>::LAYOUT, expect);
6227 assert_eq!(<PhantomData<$ty> as KnownLayout>::LAYOUT, <() as KnownLayout>::LAYOUT);
6228 };
6229 }
6230
6231 let layout = |offset, align, _trailing_slice_elem_size| DstLayout {
6232 align: NonZeroUsize::new(align).unwrap(),
6233 size_info: match _trailing_slice_elem_size {
6234 None => SizeInfo::Sized { _size: offset },
6235 Some(elem_size) => SizeInfo::SliceDst(TrailingSliceLayout {
6236 _offset: offset,
6237 _elem_size: elem_size,
6238 }),
6239 },
6240 };
6241
6242 test!((), layout(0, 1, None));
6243 test!(u8, layout(1, 1, None));
6244 // Use `align_of` because `u64` alignment may be smaller than 8 on some
6245 // platforms.
6246 test!(u64, layout(8, mem::align_of::<u64>(), None));
6247 test!(AU64, layout(8, 8, None));
6248
6249 test!(Option<&'static ()>, usize::LAYOUT);
6250
6251 test!([()], layout(0, 1, Some(0)));
6252 test!([u8], layout(0, 1, Some(1)));
6253 test!(str, layout(0, 1, Some(1)));
6254 }
6255
6256 #[cfg(feature = "derive")]
6257 #[test]
6258 fn test_known_layout_derive() {
6259 // In this and other files (`late_compile_pass.rs`,
6260 // `mid_compile_pass.rs`, and `struct.rs`), we test success and failure
6261 // modes of `derive(KnownLayout)` for the following combination of
6262 // properties:
6263 //
6264 // +------------+--------------------------------------+-----------+
6265 // | | trailing field properties | |
6266 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6267 // |------------+----------+----------------+----------+-----------|
6268 // | N | N | N | N | KL00 |
6269 // | N | N | N | Y | KL01 |
6270 // | N | N | Y | N | KL02 |
6271 // | N | N | Y | Y | KL03 |
6272 // | N | Y | N | N | KL04 |
6273 // | N | Y | N | Y | KL05 |
6274 // | N | Y | Y | N | KL06 |
6275 // | N | Y | Y | Y | KL07 |
6276 // | Y | N | N | N | KL08 |
6277 // | Y | N | N | Y | KL09 |
6278 // | Y | N | Y | N | KL10 |
6279 // | Y | N | Y | Y | KL11 |
6280 // | Y | Y | N | N | KL12 |
6281 // | Y | Y | N | Y | KL13 |
6282 // | Y | Y | Y | N | KL14 |
6283 // | Y | Y | Y | Y | KL15 |
6284 // +------------+----------+----------------+----------+-----------+
6285
6286 struct NotKnownLayout<T = ()> {
6287 _t: T,
6288 }
6289
6290 #[derive(KnownLayout)]
6291 #[repr(C)]
6292 struct AlignSize<const ALIGN: usize, const SIZE: usize>
6293 where
6294 elain::Align<ALIGN>: elain::Alignment,
6295 {
6296 _align: elain::Align<ALIGN>,
6297 _size: [u8; SIZE],
6298 }
6299
6300 type AU16 = AlignSize<2, 2>;
6301 type AU32 = AlignSize<4, 4>;
6302
6303 fn _assert_kl<T: ?Sized + KnownLayout>(_: &T) {}
6304
6305 let sized_layout = |align, size| DstLayout {
6306 align: NonZeroUsize::new(align).unwrap(),
6307 size_info: SizeInfo::Sized { _size: size },
6308 };
6309
6310 let unsized_layout = |align, elem_size, offset| DstLayout {
6311 align: NonZeroUsize::new(align).unwrap(),
6312 size_info: SizeInfo::SliceDst(TrailingSliceLayout {
6313 _offset: offset,
6314 _elem_size: elem_size,
6315 }),
6316 };
6317
6318 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6319 // | N | N | N | Y | KL01 |
6320 #[derive(KnownLayout)]
6321 struct KL01(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
6322
6323 let expected = DstLayout::for_type::<KL01>();
6324
6325 assert_eq!(<KL01 as KnownLayout>::LAYOUT, expected);
6326 assert_eq!(<KL01 as KnownLayout>::LAYOUT, sized_layout(4, 8));
6327
6328 // ...with `align(N)`:
6329 #[derive(KnownLayout)]
6330 #[repr(align(64))]
6331 struct KL01Align(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
6332
6333 let expected = DstLayout::for_type::<KL01Align>();
6334
6335 assert_eq!(<KL01Align as KnownLayout>::LAYOUT, expected);
6336 assert_eq!(<KL01Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
6337
6338 // ...with `packed`:
6339 #[derive(KnownLayout)]
6340 #[repr(packed)]
6341 struct KL01Packed(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
6342
6343 let expected = DstLayout::for_type::<KL01Packed>();
6344
6345 assert_eq!(<KL01Packed as KnownLayout>::LAYOUT, expected);
6346 assert_eq!(<KL01Packed as KnownLayout>::LAYOUT, sized_layout(1, 6));
6347
6348 // ...with `packed(N)`:
6349 #[derive(KnownLayout)]
6350 #[repr(packed(2))]
6351 struct KL01PackedN(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
6352
6353 assert_impl_all!(KL01PackedN: KnownLayout);
6354
6355 let expected = DstLayout::for_type::<KL01PackedN>();
6356
6357 assert_eq!(<KL01PackedN as KnownLayout>::LAYOUT, expected);
6358 assert_eq!(<KL01PackedN as KnownLayout>::LAYOUT, sized_layout(2, 6));
6359
6360 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6361 // | N | N | Y | Y | KL03 |
6362 #[derive(KnownLayout)]
6363 struct KL03(NotKnownLayout, u8);
6364
6365 let expected = DstLayout::for_type::<KL03>();
6366
6367 assert_eq!(<KL03 as KnownLayout>::LAYOUT, expected);
6368 assert_eq!(<KL03 as KnownLayout>::LAYOUT, sized_layout(1, 1));
6369
6370 // ... with `align(N)`
6371 #[derive(KnownLayout)]
6372 #[repr(align(64))]
6373 struct KL03Align(NotKnownLayout<AU32>, u8);
6374
6375 let expected = DstLayout::for_type::<KL03Align>();
6376
6377 assert_eq!(<KL03Align as KnownLayout>::LAYOUT, expected);
6378 assert_eq!(<KL03Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
6379
6380 // ... with `packed`:
6381 #[derive(KnownLayout)]
6382 #[repr(packed)]
6383 struct KL03Packed(NotKnownLayout<AU32>, u8);
6384
6385 let expected = DstLayout::for_type::<KL03Packed>();
6386
6387 assert_eq!(<KL03Packed as KnownLayout>::LAYOUT, expected);
6388 assert_eq!(<KL03Packed as KnownLayout>::LAYOUT, sized_layout(1, 5));
6389
6390 // ... with `packed(N)`
6391 #[derive(KnownLayout)]
6392 #[repr(packed(2))]
6393 struct KL03PackedN(NotKnownLayout<AU32>, u8);
6394
6395 assert_impl_all!(KL03PackedN: KnownLayout);
6396
6397 let expected = DstLayout::for_type::<KL03PackedN>();
6398
6399 assert_eq!(<KL03PackedN as KnownLayout>::LAYOUT, expected);
6400 assert_eq!(<KL03PackedN as KnownLayout>::LAYOUT, sized_layout(2, 6));
6401
6402 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6403 // | N | Y | N | Y | KL05 |
6404 #[derive(KnownLayout)]
6405 struct KL05<T>(u8, T);
6406
6407 fn _test_kl05<T>(t: T) -> impl KnownLayout {
6408 KL05(0u8, t)
6409 }
6410
6411 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6412 // | N | Y | Y | Y | KL07 |
6413 #[derive(KnownLayout)]
6414 struct KL07<T: KnownLayout>(u8, T);
6415
6416 fn _test_kl07<T: KnownLayout>(t: T) -> impl KnownLayout {
6417 let _ = KL07(0u8, t);
6418 }
6419
6420 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6421 // | Y | N | Y | N | KL10 |
6422 #[derive(KnownLayout)]
6423 #[repr(C)]
6424 struct KL10(NotKnownLayout<AU32>, [u8]);
6425
6426 let expected = DstLayout::new_zst(None)
6427 .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), None)
6428 .extend(<[u8] as KnownLayout>::LAYOUT, None)
6429 .pad_to_align();
6430
6431 assert_eq!(<KL10 as KnownLayout>::LAYOUT, expected);
6432 assert_eq!(<KL10 as KnownLayout>::LAYOUT, unsized_layout(4, 1, 4));
6433
6434 // ...with `align(N)`:
6435 #[derive(KnownLayout)]
6436 #[repr(C, align(64))]
6437 struct KL10Align(NotKnownLayout<AU32>, [u8]);
6438
6439 let repr_align = NonZeroUsize::new(64);
6440
6441 let expected = DstLayout::new_zst(repr_align)
6442 .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), None)
6443 .extend(<[u8] as KnownLayout>::LAYOUT, None)
6444 .pad_to_align();
6445
6446 assert_eq!(<KL10Align as KnownLayout>::LAYOUT, expected);
6447 assert_eq!(<KL10Align as KnownLayout>::LAYOUT, unsized_layout(64, 1, 4));
6448
6449 // ...with `packed`:
6450 #[derive(KnownLayout)]
6451 #[repr(C, packed)]
6452 struct KL10Packed(NotKnownLayout<AU32>, [u8]);
6453
6454 let repr_packed = NonZeroUsize::new(1);
6455
6456 let expected = DstLayout::new_zst(None)
6457 .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), repr_packed)
6458 .extend(<[u8] as KnownLayout>::LAYOUT, repr_packed)
6459 .pad_to_align();
6460
6461 assert_eq!(<KL10Packed as KnownLayout>::LAYOUT, expected);
6462 assert_eq!(<KL10Packed as KnownLayout>::LAYOUT, unsized_layout(1, 1, 4));
6463
6464 // ...with `packed(N)`:
6465 #[derive(KnownLayout)]
6466 #[repr(C, packed(2))]
6467 struct KL10PackedN(NotKnownLayout<AU32>, [u8]);
6468
6469 let repr_packed = NonZeroUsize::new(2);
6470
6471 let expected = DstLayout::new_zst(None)
6472 .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), repr_packed)
6473 .extend(<[u8] as KnownLayout>::LAYOUT, repr_packed)
6474 .pad_to_align();
6475
6476 assert_eq!(<KL10PackedN as KnownLayout>::LAYOUT, expected);
6477 assert_eq!(<KL10PackedN as KnownLayout>::LAYOUT, unsized_layout(2, 1, 4));
6478
6479 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6480 // | Y | N | Y | Y | KL11 |
6481 #[derive(KnownLayout)]
6482 #[repr(C)]
6483 struct KL11(NotKnownLayout<AU64>, u8);
6484
6485 let expected = DstLayout::new_zst(None)
6486 .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), None)
6487 .extend(<u8 as KnownLayout>::LAYOUT, None)
6488 .pad_to_align();
6489
6490 assert_eq!(<KL11 as KnownLayout>::LAYOUT, expected);
6491 assert_eq!(<KL11 as KnownLayout>::LAYOUT, sized_layout(8, 16));
6492
6493 // ...with `align(N)`:
6494 #[derive(KnownLayout)]
6495 #[repr(C, align(64))]
6496 struct KL11Align(NotKnownLayout<AU64>, u8);
6497
6498 let repr_align = NonZeroUsize::new(64);
6499
6500 let expected = DstLayout::new_zst(repr_align)
6501 .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), None)
6502 .extend(<u8 as KnownLayout>::LAYOUT, None)
6503 .pad_to_align();
6504
6505 assert_eq!(<KL11Align as KnownLayout>::LAYOUT, expected);
6506 assert_eq!(<KL11Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
6507
6508 // ...with `packed`:
6509 #[derive(KnownLayout)]
6510 #[repr(C, packed)]
6511 struct KL11Packed(NotKnownLayout<AU64>, u8);
6512
6513 let repr_packed = NonZeroUsize::new(1);
6514
6515 let expected = DstLayout::new_zst(None)
6516 .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), repr_packed)
6517 .extend(<u8 as KnownLayout>::LAYOUT, repr_packed)
6518 .pad_to_align();
6519
6520 assert_eq!(<KL11Packed as KnownLayout>::LAYOUT, expected);
6521 assert_eq!(<KL11Packed as KnownLayout>::LAYOUT, sized_layout(1, 9));
6522
6523 // ...with `packed(N)`:
6524 #[derive(KnownLayout)]
6525 #[repr(C, packed(2))]
6526 struct KL11PackedN(NotKnownLayout<AU64>, u8);
6527
6528 let repr_packed = NonZeroUsize::new(2);
6529
6530 let expected = DstLayout::new_zst(None)
6531 .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), repr_packed)
6532 .extend(<u8 as KnownLayout>::LAYOUT, repr_packed)
6533 .pad_to_align();
6534
6535 assert_eq!(<KL11PackedN as KnownLayout>::LAYOUT, expected);
6536 assert_eq!(<KL11PackedN as KnownLayout>::LAYOUT, sized_layout(2, 10));
6537
6538 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6539 // | Y | Y | Y | N | KL14 |
6540 #[derive(KnownLayout)]
6541 #[repr(C)]
6542 struct KL14<T: ?Sized + KnownLayout>(u8, T);
6543
6544 fn _test_kl14<T: ?Sized + KnownLayout>(kl: &KL14<T>) {
6545 _assert_kl(kl)
6546 }
6547
6548 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6549 // | Y | Y | Y | Y | KL15 |
6550 #[derive(KnownLayout)]
6551 #[repr(C)]
6552 struct KL15<T: KnownLayout>(u8, T);
6553
6554 fn _test_kl15<T: KnownLayout>(t: T) -> impl KnownLayout {
6555 let _ = KL15(0u8, t);
6556 }
6557
6558 // Test a variety of combinations of field types:
6559 // - ()
6560 // - u8
6561 // - AU16
6562 // - [()]
6563 // - [u8]
6564 // - [AU16]
6565
6566 #[allow(clippy::upper_case_acronyms)]
6567 #[derive(KnownLayout)]
6568 #[repr(C)]
6569 struct KLTU<T, U: ?Sized>(T, U);
6570
6571 assert_eq!(<KLTU<(), ()> as KnownLayout>::LAYOUT, sized_layout(1, 0));
6572
6573 assert_eq!(<KLTU<(), u8> as KnownLayout>::LAYOUT, sized_layout(1, 1));
6574
6575 assert_eq!(<KLTU<(), AU16> as KnownLayout>::LAYOUT, sized_layout(2, 2));
6576
6577 assert_eq!(<KLTU<(), [()]> as KnownLayout>::LAYOUT, unsized_layout(1, 0, 0));
6578
6579 assert_eq!(<KLTU<(), [u8]> as KnownLayout>::LAYOUT, unsized_layout(1, 1, 0));
6580
6581 assert_eq!(<KLTU<(), [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 0));
6582
6583 assert_eq!(<KLTU<u8, ()> as KnownLayout>::LAYOUT, sized_layout(1, 1));
6584
6585 assert_eq!(<KLTU<u8, u8> as KnownLayout>::LAYOUT, sized_layout(1, 2));
6586
6587 assert_eq!(<KLTU<u8, AU16> as KnownLayout>::LAYOUT, sized_layout(2, 4));
6588
6589 assert_eq!(<KLTU<u8, [()]> as KnownLayout>::LAYOUT, unsized_layout(1, 0, 1));
6590
6591 assert_eq!(<KLTU<u8, [u8]> as KnownLayout>::LAYOUT, unsized_layout(1, 1, 1));
6592
6593 assert_eq!(<KLTU<u8, [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 2));
6594
6595 assert_eq!(<KLTU<AU16, ()> as KnownLayout>::LAYOUT, sized_layout(2, 2));
6596
6597 assert_eq!(<KLTU<AU16, u8> as KnownLayout>::LAYOUT, sized_layout(2, 4));
6598
6599 assert_eq!(<KLTU<AU16, AU16> as KnownLayout>::LAYOUT, sized_layout(2, 4));
6600
6601 assert_eq!(<KLTU<AU16, [()]> as KnownLayout>::LAYOUT, unsized_layout(2, 0, 2));
6602
6603 assert_eq!(<KLTU<AU16, [u8]> as KnownLayout>::LAYOUT, unsized_layout(2, 1, 2));
6604
6605 assert_eq!(<KLTU<AU16, [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 2));
6606
6607 // Test a variety of field counts.
6608
6609 #[derive(KnownLayout)]
6610 #[repr(C)]
6611 struct KLF0;
6612
6613 assert_eq!(<KLF0 as KnownLayout>::LAYOUT, sized_layout(1, 0));
6614
6615 #[derive(KnownLayout)]
6616 #[repr(C)]
6617 struct KLF1([u8]);
6618
6619 assert_eq!(<KLF1 as KnownLayout>::LAYOUT, unsized_layout(1, 1, 0));
6620
6621 #[derive(KnownLayout)]
6622 #[repr(C)]
6623 struct KLF2(NotKnownLayout<u8>, [u8]);
6624
6625 assert_eq!(<KLF2 as KnownLayout>::LAYOUT, unsized_layout(1, 1, 1));
6626
6627 #[derive(KnownLayout)]
6628 #[repr(C)]
6629 struct KLF3(NotKnownLayout<u8>, NotKnownLayout<AU16>, [u8]);
6630
6631 assert_eq!(<KLF3 as KnownLayout>::LAYOUT, unsized_layout(2, 1, 4));
6632
6633 #[derive(KnownLayout)]
6634 #[repr(C)]
6635 struct KLF4(NotKnownLayout<u8>, NotKnownLayout<AU16>, NotKnownLayout<AU32>, [u8]);
6636
6637 assert_eq!(<KLF4 as KnownLayout>::LAYOUT, unsized_layout(4, 1, 8));
6638 }
6639
6640 #[test]
6641 fn test_object_safety() {
6642 fn _takes_from_zeroes(_: &dyn FromZeroes) {}
6643 fn _takes_from_bytes(_: &dyn FromBytes) {}
6644 fn _takes_unaligned(_: &dyn Unaligned) {}
6645 }
6646
6647 #[test]
6648 fn test_from_zeroes_only() {
6649 // Test types that implement `FromZeroes` but not `FromBytes`.
6650
6651 assert!(!bool::new_zeroed());
6652 assert_eq!(char::new_zeroed(), '\0');
6653
6654 #[cfg(feature = "alloc")]
6655 {
6656 assert_eq!(bool::new_box_zeroed(), Box::new(false));
6657 assert_eq!(char::new_box_zeroed(), Box::new('\0'));
6658
6659 assert_eq!(bool::new_box_slice_zeroed(3).as_ref(), [false, false, false]);
6660 assert_eq!(char::new_box_slice_zeroed(3).as_ref(), ['\0', '\0', '\0']);
6661
6662 assert_eq!(bool::new_vec_zeroed(3).as_ref(), [false, false, false]);
6663 assert_eq!(char::new_vec_zeroed(3).as_ref(), ['\0', '\0', '\0']);
6664 }
6665
6666 let mut string = "hello".to_string();
6667 let s: &mut str = string.as_mut();
6668 assert_eq!(s, "hello");
6669 s.zero();
6670 assert_eq!(s, "\0\0\0\0\0");
6671 }
6672
6673 #[test]
6674 fn test_read_write() {
6675 const VAL: u64 = 0x12345678;
6676 #[cfg(target_endian = "big")]
6677 const VAL_BYTES: [u8; 8] = VAL.to_be_bytes();
6678 #[cfg(target_endian = "little")]
6679 const VAL_BYTES: [u8; 8] = VAL.to_le_bytes();
6680
6681 // Test `FromBytes::{read_from, read_from_prefix, read_from_suffix}`.
6682
6683 assert_eq!(u64::read_from(&VAL_BYTES[..]), Some(VAL));
6684 // The first 8 bytes are from `VAL_BYTES` and the second 8 bytes are all
6685 // zeroes.
6686 let bytes_with_prefix: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]);
6687 assert_eq!(u64::read_from_prefix(&bytes_with_prefix[..]), Some(VAL));
6688 assert_eq!(u64::read_from_suffix(&bytes_with_prefix[..]), Some(0));
6689 // The first 8 bytes are all zeroes and the second 8 bytes are from
6690 // `VAL_BYTES`
6691 let bytes_with_suffix: [u8; 16] = transmute!([[0; 8], VAL_BYTES]);
6692 assert_eq!(u64::read_from_prefix(&bytes_with_suffix[..]), Some(0));
6693 assert_eq!(u64::read_from_suffix(&bytes_with_suffix[..]), Some(VAL));
6694
6695 // Test `AsBytes::{write_to, write_to_prefix, write_to_suffix}`.
6696
6697 let mut bytes = [0u8; 8];
6698 assert_eq!(VAL.write_to(&mut bytes[..]), Some(()));
6699 assert_eq!(bytes, VAL_BYTES);
6700 let mut bytes = [0u8; 16];
6701 assert_eq!(VAL.write_to_prefix(&mut bytes[..]), Some(()));
6702 let want: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]);
6703 assert_eq!(bytes, want);
6704 let mut bytes = [0u8; 16];
6705 assert_eq!(VAL.write_to_suffix(&mut bytes[..]), Some(()));
6706 let want: [u8; 16] = transmute!([[0; 8], VAL_BYTES]);
6707 assert_eq!(bytes, want);
6708 }
6709
6710 #[test]
6711 fn test_transmute() {
6712 // Test that memory is transmuted as expected.
6713 let array_of_u8s = [0u8, 1, 2, 3, 4, 5, 6, 7];
6714 let array_of_arrays = [[0, 1], [2, 3], [4, 5], [6, 7]];
6715 let x: [[u8; 2]; 4] = transmute!(array_of_u8s);
6716 assert_eq!(x, array_of_arrays);
6717 let x: [u8; 8] = transmute!(array_of_arrays);
6718 assert_eq!(x, array_of_u8s);
6719
6720 // Test that the source expression's value is forgotten rather than
6721 // dropped.
6722 #[derive(AsBytes)]
6723 #[repr(transparent)]
6724 struct PanicOnDrop(());
6725 impl Drop for PanicOnDrop {
6726 fn drop(&mut self) {
6727 panic!("PanicOnDrop::drop");
6728 }
6729 }
6730 #[allow(clippy::let_unit_value)]
6731 let _: () = transmute!(PanicOnDrop(()));
6732
6733 // Test that `transmute!` is legal in a const context.
6734 const ARRAY_OF_U8S: [u8; 8] = [0u8, 1, 2, 3, 4, 5, 6, 7];
6735 const ARRAY_OF_ARRAYS: [[u8; 2]; 4] = [[0, 1], [2, 3], [4, 5], [6, 7]];
6736 const X: [[u8; 2]; 4] = transmute!(ARRAY_OF_U8S);
6737 assert_eq!(X, ARRAY_OF_ARRAYS);
6738 }
6739
6740 #[test]
6741 fn test_transmute_ref() {
6742 // Test that memory is transmuted as expected.
6743 let array_of_u8s = [0u8, 1, 2, 3, 4, 5, 6, 7];
6744 let array_of_arrays = [[0, 1], [2, 3], [4, 5], [6, 7]];
6745 let x: &[[u8; 2]; 4] = transmute_ref!(&array_of_u8s);
6746 assert_eq!(*x, array_of_arrays);
6747 let x: &[u8; 8] = transmute_ref!(&array_of_arrays);
6748 assert_eq!(*x, array_of_u8s);
6749
6750 // Test that `transmute_ref!` is legal in a const context.
6751 const ARRAY_OF_U8S: [u8; 8] = [0u8, 1, 2, 3, 4, 5, 6, 7];
6752 const ARRAY_OF_ARRAYS: [[u8; 2]; 4] = [[0, 1], [2, 3], [4, 5], [6, 7]];
6753 #[allow(clippy::redundant_static_lifetimes)]
6754 const X: &'static [[u8; 2]; 4] = transmute_ref!(&ARRAY_OF_U8S);
6755 assert_eq!(*X, ARRAY_OF_ARRAYS);
6756
6757 // Test that it's legal to transmute a reference while shrinking the
6758 // lifetime (note that `X` has the lifetime `'static`).
6759 let x: &[u8; 8] = transmute_ref!(X);
6760 assert_eq!(*x, ARRAY_OF_U8S);
6761
6762 // Test that `transmute_ref!` supports decreasing alignment.
6763 let u = AU64(0);
6764 let array = [0, 0, 0, 0, 0, 0, 0, 0];
6765 let x: &[u8; 8] = transmute_ref!(&u);
6766 assert_eq!(*x, array);
6767
6768 // Test that a mutable reference can be turned into an immutable one.
6769 let mut x = 0u8;
6770 #[allow(clippy::useless_transmute)]
6771 let y: &u8 = transmute_ref!(&mut x);
6772 assert_eq!(*y, 0);
6773 }
6774
6775 #[test]
6776 fn test_transmute_mut() {
6777 // Test that memory is transmuted as expected.
6778 let mut array_of_u8s = [0u8, 1, 2, 3, 4, 5, 6, 7];
6779 let mut array_of_arrays = [[0, 1], [2, 3], [4, 5], [6, 7]];
6780 let x: &mut [[u8; 2]; 4] = transmute_mut!(&mut array_of_u8s);
6781 assert_eq!(*x, array_of_arrays);
6782 let x: &mut [u8; 8] = transmute_mut!(&mut array_of_arrays);
6783 assert_eq!(*x, array_of_u8s);
6784
6785 {
6786 // Test that it's legal to transmute a reference while shrinking the
6787 // lifetime.
6788 let x: &mut [u8; 8] = transmute_mut!(&mut array_of_arrays);
6789 assert_eq!(*x, array_of_u8s);
6790 }
6791 // Test that `transmute_mut!` supports decreasing alignment.
6792 let mut u = AU64(0);
6793 let array = [0, 0, 0, 0, 0, 0, 0, 0];
6794 let x: &[u8; 8] = transmute_mut!(&mut u);
6795 assert_eq!(*x, array);
6796
6797 // Test that a mutable reference can be turned into an immutable one.
6798 let mut x = 0u8;
6799 #[allow(clippy::useless_transmute)]
6800 let y: &u8 = transmute_mut!(&mut x);
6801 assert_eq!(*y, 0);
6802 }
6803
6804 #[test]
6805 fn test_macros_evaluate_args_once() {
6806 let mut ctr = 0;
6807 let _: usize = transmute!({
6808 ctr += 1;
6809 0usize
6810 });
6811 assert_eq!(ctr, 1);
6812
6813 let mut ctr = 0;
6814 let _: &usize = transmute_ref!({
6815 ctr += 1;
6816 &0usize
6817 });
6818 assert_eq!(ctr, 1);
6819 }
6820
6821 #[test]
6822 fn test_include_value() {
6823 const AS_U32: u32 = include_value!("../testdata/include_value/data");
6824 assert_eq!(AS_U32, u32::from_ne_bytes([b'a', b'b', b'c', b'd']));
6825 const AS_I32: i32 = include_value!("../testdata/include_value/data");
6826 assert_eq!(AS_I32, i32::from_ne_bytes([b'a', b'b', b'c', b'd']));
6827 }
6828
6829 #[test]
6830 fn test_address() {
6831 // Test that the `Deref` and `DerefMut` implementations return a
6832 // reference which points to the right region of memory.
6833
6834 let buf = [0];
6835 let r = Ref::<_, u8>::new(&buf[..]).unwrap();
6836 let buf_ptr = buf.as_ptr();
6837 let deref_ptr: *const u8 = r.deref();
6838 assert_eq!(buf_ptr, deref_ptr);
6839
6840 let buf = [0];
6841 let r = Ref::<_, [u8]>::new_slice(&buf[..]).unwrap();
6842 let buf_ptr = buf.as_ptr();
6843 let deref_ptr = r.deref().as_ptr();
6844 assert_eq!(buf_ptr, deref_ptr);
6845 }
6846
6847 // Verify that values written to a `Ref` are properly shared between the
6848 // typed and untyped representations, that reads via `deref` and `read`
6849 // behave the same, and that writes via `deref_mut` and `write` behave the
6850 // same.
6851 fn test_new_helper(mut r: Ref<&mut [u8], AU64>) {
6852 // assert that the value starts at 0
6853 assert_eq!(*r, AU64(0));
6854 assert_eq!(r.read(), AU64(0));
6855
6856 // Assert that values written to the typed value are reflected in the
6857 // byte slice.
6858 const VAL1: AU64 = AU64(0xFF00FF00FF00FF00);
6859 *r = VAL1;
6860 assert_eq!(r.bytes(), &VAL1.to_bytes());
6861 *r = AU64(0);
6862 r.write(VAL1);
6863 assert_eq!(r.bytes(), &VAL1.to_bytes());
6864
6865 // Assert that values written to the byte slice are reflected in the
6866 // typed value.
6867 const VAL2: AU64 = AU64(!VAL1.0); // different from `VAL1`
6868 r.bytes_mut().copy_from_slice(&VAL2.to_bytes()[..]);
6869 assert_eq!(*r, VAL2);
6870 assert_eq!(r.read(), VAL2);
6871 }
6872
6873 // Verify that values written to a `Ref` are properly shared between the
6874 // typed and untyped representations; pass a value with `typed_len` `AU64`s
6875 // backed by an array of `typed_len * 8` bytes.
6876 fn test_new_helper_slice(mut r: Ref<&mut [u8], [AU64]>, typed_len: usize) {
6877 // Assert that the value starts out zeroed.
6878 assert_eq!(&*r, vec![AU64(0); typed_len].as_slice());
6879
6880 // Check the backing storage is the exact same slice.
6881 let untyped_len = typed_len * 8;
6882 assert_eq!(r.bytes().len(), untyped_len);
6883 assert_eq!(r.bytes().as_ptr(), r.as_ptr().cast::<u8>());
6884
6885 // Assert that values written to the typed value are reflected in the
6886 // byte slice.
6887 const VAL1: AU64 = AU64(0xFF00FF00FF00FF00);
6888 for typed in &mut *r {
6889 *typed = VAL1;
6890 }
6891 assert_eq!(r.bytes(), VAL1.0.to_ne_bytes().repeat(typed_len).as_slice());
6892
6893 // Assert that values written to the byte slice are reflected in the
6894 // typed value.
6895 const VAL2: AU64 = AU64(!VAL1.0); // different from VAL1
6896 r.bytes_mut().copy_from_slice(&VAL2.0.to_ne_bytes().repeat(typed_len));
6897 assert!(r.iter().copied().all(|x| x == VAL2));
6898 }
6899
6900 // Verify that values written to a `Ref` are properly shared between the
6901 // typed and untyped representations, that reads via `deref` and `read`
6902 // behave the same, and that writes via `deref_mut` and `write` behave the
6903 // same.
6904 fn test_new_helper_unaligned(mut r: Ref<&mut [u8], [u8; 8]>) {
6905 // assert that the value starts at 0
6906 assert_eq!(*r, [0; 8]);
6907 assert_eq!(r.read(), [0; 8]);
6908
6909 // Assert that values written to the typed value are reflected in the
6910 // byte slice.
6911 const VAL1: [u8; 8] = [0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00];
6912 *r = VAL1;
6913 assert_eq!(r.bytes(), &VAL1);
6914 *r = [0; 8];
6915 r.write(VAL1);
6916 assert_eq!(r.bytes(), &VAL1);
6917
6918 // Assert that values written to the byte slice are reflected in the
6919 // typed value.
6920 const VAL2: [u8; 8] = [0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF]; // different from VAL1
6921 r.bytes_mut().copy_from_slice(&VAL2[..]);
6922 assert_eq!(*r, VAL2);
6923 assert_eq!(r.read(), VAL2);
6924 }
6925
6926 // Verify that values written to a `Ref` are properly shared between the
6927 // typed and untyped representations; pass a value with `len` `u8`s backed
6928 // by an array of `len` bytes.
6929 fn test_new_helper_slice_unaligned(mut r: Ref<&mut [u8], [u8]>, len: usize) {
6930 // Assert that the value starts out zeroed.
6931 assert_eq!(&*r, vec![0u8; len].as_slice());
6932
6933 // Check the backing storage is the exact same slice.
6934 assert_eq!(r.bytes().len(), len);
6935 assert_eq!(r.bytes().as_ptr(), r.as_ptr());
6936
6937 // Assert that values written to the typed value are reflected in the
6938 // byte slice.
6939 let mut expected_bytes = [0xFF, 0x00].iter().copied().cycle().take(len).collect::<Vec<_>>();
6940 r.copy_from_slice(&expected_bytes);
6941 assert_eq!(r.bytes(), expected_bytes.as_slice());
6942
6943 // Assert that values written to the byte slice are reflected in the
6944 // typed value.
6945 for byte in &mut expected_bytes {
6946 *byte = !*byte; // different from `expected_len`
6947 }
6948 r.bytes_mut().copy_from_slice(&expected_bytes);
6949 assert_eq!(&*r, expected_bytes.as_slice());
6950 }
6951
6952 #[test]
6953 fn test_new_aligned_sized() {
6954 // Test that a properly-aligned, properly-sized buffer works for new,
6955 // new_from_prefix, and new_from_suffix, and that new_from_prefix and
6956 // new_from_suffix return empty slices. Test that a properly-aligned
6957 // buffer whose length is a multiple of the element size works for
6958 // new_slice. Test that xxx_zeroed behaves the same, and zeroes the
6959 // memory.
6960
6961 // A buffer with an alignment of 8.
6962 let mut buf = Align::<[u8; 8], AU64>::default();
6963 // `buf.t` should be aligned to 8, so this should always succeed.
6964 test_new_helper(Ref::<_, AU64>::new(&mut buf.t[..]).unwrap());
6965 let ascending: [u8; 8] = (0..8).collect::<Vec<_>>().try_into().unwrap();
6966 buf.t = ascending;
6967 test_new_helper(Ref::<_, AU64>::new_zeroed(&mut buf.t[..]).unwrap());
6968 {
6969 // In a block so that `r` and `suffix` don't live too long.
6970 buf.set_default();
6971 let (r, suffix) = Ref::<_, AU64>::new_from_prefix(&mut buf.t[..]).unwrap();
6972 assert!(suffix.is_empty());
6973 test_new_helper(r);
6974 }
6975 {
6976 buf.t = ascending;
6977 let (r, suffix) = Ref::<_, AU64>::new_from_prefix_zeroed(&mut buf.t[..]).unwrap();
6978 assert!(suffix.is_empty());
6979 test_new_helper(r);
6980 }
6981 {
6982 buf.set_default();
6983 let (prefix, r) = Ref::<_, AU64>::new_from_suffix(&mut buf.t[..]).unwrap();
6984 assert!(prefix.is_empty());
6985 test_new_helper(r);
6986 }
6987 {
6988 buf.t = ascending;
6989 let (prefix, r) = Ref::<_, AU64>::new_from_suffix_zeroed(&mut buf.t[..]).unwrap();
6990 assert!(prefix.is_empty());
6991 test_new_helper(r);
6992 }
6993
6994 // A buffer with alignment 8 and length 24. We choose this length very
6995 // intentionally: if we instead used length 16, then the prefix and
6996 // suffix lengths would be identical. In the past, we used length 16,
6997 // which resulted in this test failing to discover the bug uncovered in
6998 // #506.
6999 let mut buf = Align::<[u8; 24], AU64>::default();
7000 // `buf.t` should be aligned to 8 and have a length which is a multiple
7001 // of `size_of::<AU64>()`, so this should always succeed.
7002 test_new_helper_slice(Ref::<_, [AU64]>::new_slice(&mut buf.t[..]).unwrap(), 3);
7003 let ascending: [u8; 24] = (0..24).collect::<Vec<_>>().try_into().unwrap();
7004 // 16 ascending bytes followed by 8 zeros.
7005 let mut ascending_prefix = ascending;
7006 ascending_prefix[16..].copy_from_slice(&[0, 0, 0, 0, 0, 0, 0, 0]);
7007 // 8 zeros followed by 16 ascending bytes.
7008 let mut ascending_suffix = ascending;
7009 ascending_suffix[..8].copy_from_slice(&[0, 0, 0, 0, 0, 0, 0, 0]);
7010 test_new_helper_slice(Ref::<_, [AU64]>::new_slice_zeroed(&mut buf.t[..]).unwrap(), 3);
7011
7012 {
7013 buf.t = ascending_suffix;
7014 let (r, suffix) = Ref::<_, [AU64]>::new_slice_from_prefix(&mut buf.t[..], 1).unwrap();
7015 assert_eq!(suffix, &ascending[8..]);
7016 test_new_helper_slice(r, 1);
7017 }
7018 {
7019 buf.t = ascending_suffix;
7020 let (r, suffix) =
7021 Ref::<_, [AU64]>::new_slice_from_prefix_zeroed(&mut buf.t[..], 1).unwrap();
7022 assert_eq!(suffix, &ascending[8..]);
7023 test_new_helper_slice(r, 1);
7024 }
7025 {
7026 buf.t = ascending_prefix;
7027 let (prefix, r) = Ref::<_, [AU64]>::new_slice_from_suffix(&mut buf.t[..], 1).unwrap();
7028 assert_eq!(prefix, &ascending[..16]);
7029 test_new_helper_slice(r, 1);
7030 }
7031 {
7032 buf.t = ascending_prefix;
7033 let (prefix, r) =
7034 Ref::<_, [AU64]>::new_slice_from_suffix_zeroed(&mut buf.t[..], 1).unwrap();
7035 assert_eq!(prefix, &ascending[..16]);
7036 test_new_helper_slice(r, 1);
7037 }
7038 }
7039
7040 #[test]
7041 fn test_new_unaligned_sized() {
7042 // Test that an unaligned, properly-sized buffer works for
7043 // `new_unaligned`, `new_unaligned_from_prefix`, and
7044 // `new_unaligned_from_suffix`, and that `new_unaligned_from_prefix`
7045 // `new_unaligned_from_suffix` return empty slices. Test that an
7046 // unaligned buffer whose length is a multiple of the element size works
7047 // for `new_slice`. Test that `xxx_zeroed` behaves the same, and zeroes
7048 // the memory.
7049
7050 let mut buf = [0u8; 8];
7051 test_new_helper_unaligned(Ref::<_, [u8; 8]>::new_unaligned(&mut buf[..]).unwrap());
7052 buf = [0xFFu8; 8];
7053 test_new_helper_unaligned(Ref::<_, [u8; 8]>::new_unaligned_zeroed(&mut buf[..]).unwrap());
7054 {
7055 // In a block so that `r` and `suffix` don't live too long.
7056 buf = [0u8; 8];
7057 let (r, suffix) = Ref::<_, [u8; 8]>::new_unaligned_from_prefix(&mut buf[..]).unwrap();
7058 assert!(suffix.is_empty());
7059 test_new_helper_unaligned(r);
7060 }
7061 {
7062 buf = [0xFFu8; 8];
7063 let (r, suffix) =
7064 Ref::<_, [u8; 8]>::new_unaligned_from_prefix_zeroed(&mut buf[..]).unwrap();
7065 assert!(suffix.is_empty());
7066 test_new_helper_unaligned(r);
7067 }
7068 {
7069 buf = [0u8; 8];
7070 let (prefix, r) = Ref::<_, [u8; 8]>::new_unaligned_from_suffix(&mut buf[..]).unwrap();
7071 assert!(prefix.is_empty());
7072 test_new_helper_unaligned(r);
7073 }
7074 {
7075 buf = [0xFFu8; 8];
7076 let (prefix, r) =
7077 Ref::<_, [u8; 8]>::new_unaligned_from_suffix_zeroed(&mut buf[..]).unwrap();
7078 assert!(prefix.is_empty());
7079 test_new_helper_unaligned(r);
7080 }
7081
7082 let mut buf = [0u8; 16];
7083 // `buf.t` should be aligned to 8 and have a length which is a multiple
7084 // of `size_of::AU64>()`, so this should always succeed.
7085 test_new_helper_slice_unaligned(
7086 Ref::<_, [u8]>::new_slice_unaligned(&mut buf[..]).unwrap(),
7087 16,
7088 );
7089 buf = [0xFFu8; 16];
7090 test_new_helper_slice_unaligned(
7091 Ref::<_, [u8]>::new_slice_unaligned_zeroed(&mut buf[..]).unwrap(),
7092 16,
7093 );
7094
7095 {
7096 buf = [0u8; 16];
7097 let (r, suffix) =
7098 Ref::<_, [u8]>::new_slice_unaligned_from_prefix(&mut buf[..], 8).unwrap();
7099 assert_eq!(suffix, [0; 8]);
7100 test_new_helper_slice_unaligned(r, 8);
7101 }
7102 {
7103 buf = [0xFFu8; 16];
7104 let (r, suffix) =
7105 Ref::<_, [u8]>::new_slice_unaligned_from_prefix_zeroed(&mut buf[..], 8).unwrap();
7106 assert_eq!(suffix, [0xFF; 8]);
7107 test_new_helper_slice_unaligned(r, 8);
7108 }
7109 {
7110 buf = [0u8; 16];
7111 let (prefix, r) =
7112 Ref::<_, [u8]>::new_slice_unaligned_from_suffix(&mut buf[..], 8).unwrap();
7113 assert_eq!(prefix, [0; 8]);
7114 test_new_helper_slice_unaligned(r, 8);
7115 }
7116 {
7117 buf = [0xFFu8; 16];
7118 let (prefix, r) =
7119 Ref::<_, [u8]>::new_slice_unaligned_from_suffix_zeroed(&mut buf[..], 8).unwrap();
7120 assert_eq!(prefix, [0xFF; 8]);
7121 test_new_helper_slice_unaligned(r, 8);
7122 }
7123 }
7124
7125 #[test]
7126 fn test_new_oversized() {
7127 // Test that a properly-aligned, overly-sized buffer works for
7128 // `new_from_prefix` and `new_from_suffix`, and that they return the
7129 // remainder and prefix of the slice respectively. Test that
7130 // `xxx_zeroed` behaves the same, and zeroes the memory.
7131
7132 let mut buf = Align::<[u8; 16], AU64>::default();
7133 {
7134 // In a block so that `r` and `suffix` don't live too long. `buf.t`
7135 // should be aligned to 8, so this should always succeed.
7136 let (r, suffix) = Ref::<_, AU64>::new_from_prefix(&mut buf.t[..]).unwrap();
7137 assert_eq!(suffix.len(), 8);
7138 test_new_helper(r);
7139 }
7140 {
7141 buf.t = [0xFFu8; 16];
7142 // `buf.t` should be aligned to 8, so this should always succeed.
7143 let (r, suffix) = Ref::<_, AU64>::new_from_prefix_zeroed(&mut buf.t[..]).unwrap();
7144 // Assert that the suffix wasn't zeroed.
7145 assert_eq!(suffix, &[0xFFu8; 8]);
7146 test_new_helper(r);
7147 }
7148 {
7149 buf.set_default();
7150 // `buf.t` should be aligned to 8, so this should always succeed.
7151 let (prefix, r) = Ref::<_, AU64>::new_from_suffix(&mut buf.t[..]).unwrap();
7152 assert_eq!(prefix.len(), 8);
7153 test_new_helper(r);
7154 }
7155 {
7156 buf.t = [0xFFu8; 16];
7157 // `buf.t` should be aligned to 8, so this should always succeed.
7158 let (prefix, r) = Ref::<_, AU64>::new_from_suffix_zeroed(&mut buf.t[..]).unwrap();
7159 // Assert that the prefix wasn't zeroed.
7160 assert_eq!(prefix, &[0xFFu8; 8]);
7161 test_new_helper(r);
7162 }
7163 }
7164
7165 #[test]
7166 fn test_new_unaligned_oversized() {
7167 // Test than an unaligned, overly-sized buffer works for
7168 // `new_unaligned_from_prefix` and `new_unaligned_from_suffix`, and that
7169 // they return the remainder and prefix of the slice respectively. Test
7170 // that `xxx_zeroed` behaves the same, and zeroes the memory.
7171
7172 let mut buf = [0u8; 16];
7173 {
7174 // In a block so that `r` and `suffix` don't live too long.
7175 let (r, suffix) = Ref::<_, [u8; 8]>::new_unaligned_from_prefix(&mut buf[..]).unwrap();
7176 assert_eq!(suffix.len(), 8);
7177 test_new_helper_unaligned(r);
7178 }
7179 {
7180 buf = [0xFFu8; 16];
7181 let (r, suffix) =
7182 Ref::<_, [u8; 8]>::new_unaligned_from_prefix_zeroed(&mut buf[..]).unwrap();
7183 // Assert that the suffix wasn't zeroed.
7184 assert_eq!(suffix, &[0xFF; 8]);
7185 test_new_helper_unaligned(r);
7186 }
7187 {
7188 buf = [0u8; 16];
7189 let (prefix, r) = Ref::<_, [u8; 8]>::new_unaligned_from_suffix(&mut buf[..]).unwrap();
7190 assert_eq!(prefix.len(), 8);
7191 test_new_helper_unaligned(r);
7192 }
7193 {
7194 buf = [0xFFu8; 16];
7195 let (prefix, r) =
7196 Ref::<_, [u8; 8]>::new_unaligned_from_suffix_zeroed(&mut buf[..]).unwrap();
7197 // Assert that the prefix wasn't zeroed.
7198 assert_eq!(prefix, &[0xFF; 8]);
7199 test_new_helper_unaligned(r);
7200 }
7201 }
7202
7203 #[test]
7204 fn test_ref_from_mut_from() {
7205 // Test `FromBytes::{ref_from, mut_from}{,_prefix,_suffix}` success cases
7206 // Exhaustive coverage for these methods is covered by the `Ref` tests above,
7207 // which these helper methods defer to.
7208
7209 let mut buf =
7210 Align::<[u8; 16], AU64>::new([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
7211
7212 assert_eq!(
7213 AU64::ref_from(&buf.t[8..]).unwrap().0.to_ne_bytes(),
7214 [8, 9, 10, 11, 12, 13, 14, 15]
7215 );
7216 let suffix = AU64::mut_from(&mut buf.t[8..]).unwrap();
7217 suffix.0 = 0x0101010101010101;
7218 // The `[u8:9]` is a non-half size of the full buffer, which would catch
7219 // `from_prefix` having the same implementation as `from_suffix` (issues #506, #511).
7220 assert_eq!(<[u8; 9]>::ref_from_suffix(&buf.t[..]).unwrap(), &[7u8, 1, 1, 1, 1, 1, 1, 1, 1]);
7221 let suffix = AU64::mut_from_suffix(&mut buf.t[1..]).unwrap();
7222 suffix.0 = 0x0202020202020202;
7223 <[u8; 10]>::mut_from_suffix(&mut buf.t[..]).unwrap()[0] = 42;
7224 assert_eq!(<[u8; 9]>::ref_from_prefix(&buf.t[..]).unwrap(), &[0, 1, 2, 3, 4, 5, 42, 7, 2]);
7225 <[u8; 2]>::mut_from_prefix(&mut buf.t[..]).unwrap()[1] = 30;
7226 assert_eq!(buf.t, [0, 30, 2, 3, 4, 5, 42, 7, 2, 2, 2, 2, 2, 2, 2, 2]);
7227 }
7228
7229 #[test]
7230 fn test_ref_from_mut_from_error() {
7231 // Test `FromBytes::{ref_from, mut_from}{,_prefix,_suffix}` error cases.
7232
7233 // Fail because the buffer is too large.
7234 let mut buf = Align::<[u8; 16], AU64>::default();
7235 // `buf.t` should be aligned to 8, so only the length check should fail.
7236 assert!(AU64::ref_from(&buf.t[..]).is_none());
7237 assert!(AU64::mut_from(&mut buf.t[..]).is_none());
7238 assert!(<[u8; 8]>::ref_from(&buf.t[..]).is_none());
7239 assert!(<[u8; 8]>::mut_from(&mut buf.t[..]).is_none());
7240
7241 // Fail because the buffer is too small.
7242 let mut buf = Align::<[u8; 4], AU64>::default();
7243 assert!(AU64::ref_from(&buf.t[..]).is_none());
7244 assert!(AU64::mut_from(&mut buf.t[..]).is_none());
7245 assert!(<[u8; 8]>::ref_from(&buf.t[..]).is_none());
7246 assert!(<[u8; 8]>::mut_from(&mut buf.t[..]).is_none());
7247 assert!(AU64::ref_from_prefix(&buf.t[..]).is_none());
7248 assert!(AU64::mut_from_prefix(&mut buf.t[..]).is_none());
7249 assert!(AU64::ref_from_suffix(&buf.t[..]).is_none());
7250 assert!(AU64::mut_from_suffix(&mut buf.t[..]).is_none());
7251 assert!(<[u8; 8]>::ref_from_prefix(&buf.t[..]).is_none());
7252 assert!(<[u8; 8]>::mut_from_prefix(&mut buf.t[..]).is_none());
7253 assert!(<[u8; 8]>::ref_from_suffix(&buf.t[..]).is_none());
7254 assert!(<[u8; 8]>::mut_from_suffix(&mut buf.t[..]).is_none());
7255
7256 // Fail because the alignment is insufficient.
7257 let mut buf = Align::<[u8; 13], AU64>::default();
7258 assert!(AU64::ref_from(&buf.t[1..]).is_none());
7259 assert!(AU64::mut_from(&mut buf.t[1..]).is_none());
7260 assert!(AU64::ref_from(&buf.t[1..]).is_none());
7261 assert!(AU64::mut_from(&mut buf.t[1..]).is_none());
7262 assert!(AU64::ref_from_prefix(&buf.t[1..]).is_none());
7263 assert!(AU64::mut_from_prefix(&mut buf.t[1..]).is_none());
7264 assert!(AU64::ref_from_suffix(&buf.t[..]).is_none());
7265 assert!(AU64::mut_from_suffix(&mut buf.t[..]).is_none());
7266 }
7267
7268 #[test]
7269 #[allow(clippy::cognitive_complexity)]
7270 fn test_new_error() {
7271 // Fail because the buffer is too large.
7272
7273 // A buffer with an alignment of 8.
7274 let mut buf = Align::<[u8; 16], AU64>::default();
7275 // `buf.t` should be aligned to 8, so only the length check should fail.
7276 assert!(Ref::<_, AU64>::new(&buf.t[..]).is_none());
7277 assert!(Ref::<_, AU64>::new_zeroed(&mut buf.t[..]).is_none());
7278 assert!(Ref::<_, [u8; 8]>::new_unaligned(&buf.t[..]).is_none());
7279 assert!(Ref::<_, [u8; 8]>::new_unaligned_zeroed(&mut buf.t[..]).is_none());
7280
7281 // Fail because the buffer is too small.
7282
7283 // A buffer with an alignment of 8.
7284 let mut buf = Align::<[u8; 4], AU64>::default();
7285 // `buf.t` should be aligned to 8, so only the length check should fail.
7286 assert!(Ref::<_, AU64>::new(&buf.t[..]).is_none());
7287 assert!(Ref::<_, AU64>::new_zeroed(&mut buf.t[..]).is_none());
7288 assert!(Ref::<_, [u8; 8]>::new_unaligned(&buf.t[..]).is_none());
7289 assert!(Ref::<_, [u8; 8]>::new_unaligned_zeroed(&mut buf.t[..]).is_none());
7290 assert!(Ref::<_, AU64>::new_from_prefix(&buf.t[..]).is_none());
7291 assert!(Ref::<_, AU64>::new_from_prefix_zeroed(&mut buf.t[..]).is_none());
7292 assert!(Ref::<_, AU64>::new_from_suffix(&buf.t[..]).is_none());
7293 assert!(Ref::<_, AU64>::new_from_suffix_zeroed(&mut buf.t[..]).is_none());
7294 assert!(Ref::<_, [u8; 8]>::new_unaligned_from_prefix(&buf.t[..]).is_none());
7295 assert!(Ref::<_, [u8; 8]>::new_unaligned_from_prefix_zeroed(&mut buf.t[..]).is_none());
7296 assert!(Ref::<_, [u8; 8]>::new_unaligned_from_suffix(&buf.t[..]).is_none());
7297 assert!(Ref::<_, [u8; 8]>::new_unaligned_from_suffix_zeroed(&mut buf.t[..]).is_none());
7298
7299 // Fail because the length is not a multiple of the element size.
7300
7301 let mut buf = Align::<[u8; 12], AU64>::default();
7302 // `buf.t` has length 12, but element size is 8.
7303 assert!(Ref::<_, [AU64]>::new_slice(&buf.t[..]).is_none());
7304 assert!(Ref::<_, [AU64]>::new_slice_zeroed(&mut buf.t[..]).is_none());
7305 assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned(&buf.t[..]).is_none());
7306 assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_zeroed(&mut buf.t[..]).is_none());
7307
7308 // Fail because the buffer is too short.
7309 let mut buf = Align::<[u8; 12], AU64>::default();
7310 // `buf.t` has length 12, but the element size is 8 (and we're expecting
7311 // two of them).
7312 assert!(Ref::<_, [AU64]>::new_slice_from_prefix(&buf.t[..], 2).is_none());
7313 assert!(Ref::<_, [AU64]>::new_slice_from_prefix_zeroed(&mut buf.t[..], 2).is_none());
7314 assert!(Ref::<_, [AU64]>::new_slice_from_suffix(&buf.t[..], 2).is_none());
7315 assert!(Ref::<_, [AU64]>::new_slice_from_suffix_zeroed(&mut buf.t[..], 2).is_none());
7316 assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix(&buf.t[..], 2).is_none());
7317 assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix_zeroed(&mut buf.t[..], 2)
7318 .is_none());
7319 assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix(&buf.t[..], 2).is_none());
7320 assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix_zeroed(&mut buf.t[..], 2)
7321 .is_none());
7322
7323 // Fail because the alignment is insufficient.
7324
7325 // A buffer with an alignment of 8. An odd buffer size is chosen so that
7326 // the last byte of the buffer has odd alignment.
7327 let mut buf = Align::<[u8; 13], AU64>::default();
7328 // Slicing from 1, we get a buffer with size 12 (so the length check
7329 // should succeed) but an alignment of only 1, which is insufficient.
7330 assert!(Ref::<_, AU64>::new(&buf.t[1..]).is_none());
7331 assert!(Ref::<_, AU64>::new_zeroed(&mut buf.t[1..]).is_none());
7332 assert!(Ref::<_, AU64>::new_from_prefix(&buf.t[1..]).is_none());
7333 assert!(Ref::<_, AU64>::new_from_prefix_zeroed(&mut buf.t[1..]).is_none());
7334 assert!(Ref::<_, [AU64]>::new_slice(&buf.t[1..]).is_none());
7335 assert!(Ref::<_, [AU64]>::new_slice_zeroed(&mut buf.t[1..]).is_none());
7336 assert!(Ref::<_, [AU64]>::new_slice_from_prefix(&buf.t[1..], 1).is_none());
7337 assert!(Ref::<_, [AU64]>::new_slice_from_prefix_zeroed(&mut buf.t[1..], 1).is_none());
7338 assert!(Ref::<_, [AU64]>::new_slice_from_suffix(&buf.t[1..], 1).is_none());
7339 assert!(Ref::<_, [AU64]>::new_slice_from_suffix_zeroed(&mut buf.t[1..], 1).is_none());
7340 // Slicing is unnecessary here because `new_from_suffix[_zeroed]` use
7341 // the suffix of the slice, which has odd alignment.
7342 assert!(Ref::<_, AU64>::new_from_suffix(&buf.t[..]).is_none());
7343 assert!(Ref::<_, AU64>::new_from_suffix_zeroed(&mut buf.t[..]).is_none());
7344
7345 // Fail due to arithmetic overflow.
7346
7347 let mut buf = Align::<[u8; 16], AU64>::default();
7348 let unreasonable_len = usize::MAX / mem::size_of::<AU64>() + 1;
7349 assert!(Ref::<_, [AU64]>::new_slice_from_prefix(&buf.t[..], unreasonable_len).is_none());
7350 assert!(Ref::<_, [AU64]>::new_slice_from_prefix_zeroed(&mut buf.t[..], unreasonable_len)
7351 .is_none());
7352 assert!(Ref::<_, [AU64]>::new_slice_from_suffix(&buf.t[..], unreasonable_len).is_none());
7353 assert!(Ref::<_, [AU64]>::new_slice_from_suffix_zeroed(&mut buf.t[..], unreasonable_len)
7354 .is_none());
7355 assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix(&buf.t[..], unreasonable_len)
7356 .is_none());
7357 assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix_zeroed(
7358 &mut buf.t[..],
7359 unreasonable_len
7360 )
7361 .is_none());
7362 assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix(&buf.t[..], unreasonable_len)
7363 .is_none());
7364 assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix_zeroed(
7365 &mut buf.t[..],
7366 unreasonable_len
7367 )
7368 .is_none());
7369 }
7370
7371 // Tests for ensuring that, if a ZST is passed into a slice-like function,
7372 // we always panic. Since these tests need to be separate per-function, and
7373 // they tend to take up a lot of space, we generate them using a macro in a
7374 // submodule instead. The submodule ensures that we can just re-use the name
7375 // of the function under test for the name of the test itself.
7376 mod test_zst_panics {
7377 macro_rules! zst_test {
7378 ($name:ident($($tt:tt)*), $constructor_in_panic_msg:tt) => {
7379 #[test]
7380 #[should_panic = concat!("Ref::", $constructor_in_panic_msg, " called on a zero-sized type")]
7381 fn $name() {
7382 let mut buffer = [0u8];
7383 let r = $crate::Ref::<_, [()]>::$name(&mut buffer[..], $($tt)*);
7384 unreachable!("should have panicked, got {:?}", r);
7385 }
7386 }
7387 }
7388 zst_test!(new_slice(), "new_slice");
7389 zst_test!(new_slice_zeroed(), "new_slice");
7390 zst_test!(new_slice_from_prefix(1), "new_slice");
7391 zst_test!(new_slice_from_prefix_zeroed(1), "new_slice");
7392 zst_test!(new_slice_from_suffix(1), "new_slice");
7393 zst_test!(new_slice_from_suffix_zeroed(1), "new_slice");
7394 zst_test!(new_slice_unaligned(), "new_slice_unaligned");
7395 zst_test!(new_slice_unaligned_zeroed(), "new_slice_unaligned");
7396 zst_test!(new_slice_unaligned_from_prefix(1), "new_slice_unaligned");
7397 zst_test!(new_slice_unaligned_from_prefix_zeroed(1), "new_slice_unaligned");
7398 zst_test!(new_slice_unaligned_from_suffix(1), "new_slice_unaligned");
7399 zst_test!(new_slice_unaligned_from_suffix_zeroed(1), "new_slice_unaligned");
7400 }
7401
7402 #[test]
7403 fn test_as_bytes_methods() {
7404 /// Run a series of tests by calling `AsBytes` methods on `t`.
7405 ///
7406 /// `bytes` is the expected byte sequence returned from `t.as_bytes()`
7407 /// before `t` has been modified. `post_mutation` is the expected
7408 /// sequence returned from `t.as_bytes()` after `t.as_bytes_mut()[0]`
7409 /// has had its bits flipped (by applying `^= 0xFF`).
7410 ///
7411 /// `N` is the size of `t` in bytes.
7412 fn test<T: FromBytes + AsBytes + Debug + Eq + ?Sized, const N: usize>(
7413 t: &mut T,
7414 bytes: &[u8],
7415 post_mutation: &T,
7416 ) {
7417 // Test that we can access the underlying bytes, and that we get the
7418 // right bytes and the right number of bytes.
7419 assert_eq!(t.as_bytes(), bytes);
7420
7421 // Test that changes to the underlying byte slices are reflected in
7422 // the original object.
7423 t.as_bytes_mut()[0] ^= 0xFF;
7424 assert_eq!(t, post_mutation);
7425 t.as_bytes_mut()[0] ^= 0xFF;
7426
7427 // `write_to` rejects slices that are too small or too large.
7428 assert_eq!(t.write_to(&mut vec![0; N - 1][..]), None);
7429 assert_eq!(t.write_to(&mut vec![0; N + 1][..]), None);
7430
7431 // `write_to` works as expected.
7432 let mut bytes = [0; N];
7433 assert_eq!(t.write_to(&mut bytes[..]), Some(()));
7434 assert_eq!(bytes, t.as_bytes());
7435
7436 // `write_to_prefix` rejects slices that are too small.
7437 assert_eq!(t.write_to_prefix(&mut vec![0; N - 1][..]), None);
7438
7439 // `write_to_prefix` works with exact-sized slices.
7440 let mut bytes = [0; N];
7441 assert_eq!(t.write_to_prefix(&mut bytes[..]), Some(()));
7442 assert_eq!(bytes, t.as_bytes());
7443
7444 // `write_to_prefix` works with too-large slices, and any bytes past
7445 // the prefix aren't modified.
7446 let mut too_many_bytes = vec![0; N + 1];
7447 too_many_bytes[N] = 123;
7448 assert_eq!(t.write_to_prefix(&mut too_many_bytes[..]), Some(()));
7449 assert_eq!(&too_many_bytes[..N], t.as_bytes());
7450 assert_eq!(too_many_bytes[N], 123);
7451
7452 // `write_to_suffix` rejects slices that are too small.
7453 assert_eq!(t.write_to_suffix(&mut vec![0; N - 1][..]), None);
7454
7455 // `write_to_suffix` works with exact-sized slices.
7456 let mut bytes = [0; N];
7457 assert_eq!(t.write_to_suffix(&mut bytes[..]), Some(()));
7458 assert_eq!(bytes, t.as_bytes());
7459
7460 // `write_to_suffix` works with too-large slices, and any bytes
7461 // before the suffix aren't modified.
7462 let mut too_many_bytes = vec![0; N + 1];
7463 too_many_bytes[0] = 123;
7464 assert_eq!(t.write_to_suffix(&mut too_many_bytes[..]), Some(()));
7465 assert_eq!(&too_many_bytes[1..], t.as_bytes());
7466 assert_eq!(too_many_bytes[0], 123);
7467 }
7468
7469 #[derive(Debug, Eq, PartialEq, FromZeroes, FromBytes, AsBytes)]
7470 #[repr(C)]
7471 struct Foo {
7472 a: u32,
7473 b: Wrapping<u32>,
7474 c: Option<NonZeroU32>,
7475 }
7476
7477 let expected_bytes: Vec<u8> = if cfg!(target_endian = "little") {
7478 vec![1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0]
7479 } else {
7480 vec![0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0]
7481 };
7482 let post_mutation_expected_a =
7483 if cfg!(target_endian = "little") { 0x00_00_00_FE } else { 0xFF_00_00_01 };
7484 test::<_, 12>(
7485 &mut Foo { a: 1, b: Wrapping(2), c: None },
7486 expected_bytes.as_bytes(),
7487 &Foo { a: post_mutation_expected_a, b: Wrapping(2), c: None },
7488 );
7489 test::<_, 3>(
7490 Unsized::from_mut_slice(&mut [1, 2, 3]),
7491 &[1, 2, 3],
7492 Unsized::from_mut_slice(&mut [0xFE, 2, 3]),
7493 );
7494 }
7495
7496 #[test]
7497 fn test_array() {
7498 #[derive(FromZeroes, FromBytes, AsBytes)]
7499 #[repr(C)]
7500 struct Foo {
7501 a: [u16; 33],
7502 }
7503
7504 let foo = Foo { a: [0xFFFF; 33] };
7505 let expected = [0xFFu8; 66];
7506 assert_eq!(foo.as_bytes(), &expected[..]);
7507 }
7508
7509 #[test]
7510 fn test_display_debug() {
7511 let buf = Align::<[u8; 8], u64>::default();
7512 let r = Ref::<_, u64>::new(&buf.t[..]).unwrap();
7513 assert_eq!(format!("{}", r), "0");
7514 assert_eq!(format!("{:?}", r), "Ref(0)");
7515
7516 let buf = Align::<[u8; 8], u64>::default();
7517 let r = Ref::<_, [u64]>::new_slice(&buf.t[..]).unwrap();
7518 assert_eq!(format!("{:?}", r), "Ref([0])");
7519 }
7520
7521 #[test]
7522 fn test_eq() {
7523 let buf1 = 0_u64;
7524 let r1 = Ref::<_, u64>::new(buf1.as_bytes()).unwrap();
7525 let buf2 = 0_u64;
7526 let r2 = Ref::<_, u64>::new(buf2.as_bytes()).unwrap();
7527 assert_eq!(r1, r2);
7528 }
7529
7530 #[test]
7531 fn test_ne() {
7532 let buf1 = 0_u64;
7533 let r1 = Ref::<_, u64>::new(buf1.as_bytes()).unwrap();
7534 let buf2 = 1_u64;
7535 let r2 = Ref::<_, u64>::new(buf2.as_bytes()).unwrap();
7536 assert_ne!(r1, r2);
7537 }
7538
7539 #[test]
7540 fn test_ord() {
7541 let buf1 = 0_u64;
7542 let r1 = Ref::<_, u64>::new(buf1.as_bytes()).unwrap();
7543 let buf2 = 1_u64;
7544 let r2 = Ref::<_, u64>::new(buf2.as_bytes()).unwrap();
7545 assert!(r1 < r2);
7546 }
7547
7548 #[test]
7549 fn test_new_zeroed() {
7550 assert!(!bool::new_zeroed());
7551 assert_eq!(u64::new_zeroed(), 0);
7552 // This test exists in order to exercise unsafe code, especially when
7553 // running under Miri.
7554 #[allow(clippy::unit_cmp)]
7555 {
7556 assert_eq!(<()>::new_zeroed(), ());
7557 }
7558 }
7559
7560 #[test]
7561 fn test_transparent_packed_generic_struct() {
7562 #[derive(AsBytes, FromZeroes, FromBytes, Unaligned)]
7563 #[repr(transparent)]
7564 struct Foo<T> {
7565 _t: T,
7566 _phantom: PhantomData<()>,
7567 }
7568
7569 assert_impl_all!(Foo<u32>: FromZeroes, FromBytes, AsBytes);
7570 assert_impl_all!(Foo<u8>: Unaligned);
7571
7572 #[derive(AsBytes, FromZeroes, FromBytes, Unaligned)]
7573 #[repr(packed)]
7574 struct Bar<T, U> {
7575 _t: T,
7576 _u: U,
7577 }
7578
7579 assert_impl_all!(Bar<u8, AU64>: FromZeroes, FromBytes, AsBytes, Unaligned);
7580 }
7581
7582 #[test]
7583 fn test_impls() {
7584 use core::borrow::Borrow;
7585
7586 // A type that can supply test cases for testing
7587 // `TryFromBytes::is_bit_valid`. All types passed to `assert_impls!`
7588 // must implement this trait; that macro uses it to generate runtime
7589 // tests for `TryFromBytes` impls.
7590 //
7591 // All `T: FromBytes` types are provided with a blanket impl. Other
7592 // types must implement `TryFromBytesTestable` directly (ie using
7593 // `impl_try_from_bytes_testable!`).
7594 trait TryFromBytesTestable {
7595 fn with_passing_test_cases<F: Fn(&Self)>(f: F);
7596 fn with_failing_test_cases<F: Fn(&[u8])>(f: F);
7597 }
7598
7599 impl<T: FromBytes> TryFromBytesTestable for T {
7600 fn with_passing_test_cases<F: Fn(&Self)>(f: F) {
7601 // Test with a zeroed value.
7602 f(&Self::new_zeroed());
7603
7604 let ffs = {
7605 let mut t = Self::new_zeroed();
7606 let ptr: *mut T = &mut t;
7607 // SAFETY: `T: FromBytes`
7608 unsafe { ptr::write_bytes(ptr.cast::<u8>(), 0xFF, mem::size_of::<T>()) };
7609 t
7610 };
7611
7612 // Test with a value initialized with 0xFF.
7613 f(&ffs);
7614 }
7615
7616 fn with_failing_test_cases<F: Fn(&[u8])>(_f: F) {}
7617 }
7618
7619 // Implements `TryFromBytesTestable`.
7620 macro_rules! impl_try_from_bytes_testable {
7621 // Base case for recursion (when the list of types has run out).
7622 (=> @success $($success_case:expr),* $(, @failure $($failure_case:expr),*)?) => {};
7623 // Implements for type(s) with no type parameters.
7624 ($ty:ty $(,$tys:ty)* => @success $($success_case:expr),* $(, @failure $($failure_case:expr),*)?) => {
7625 impl TryFromBytesTestable for $ty {
7626 impl_try_from_bytes_testable!(
7627 @methods @success $($success_case),*
7628 $(, @failure $($failure_case),*)?
7629 );
7630 }
7631 impl_try_from_bytes_testable!($($tys),* => @success $($success_case),* $(, @failure $($failure_case),*)?);
7632 };
7633 // Implements for multiple types with no type parameters.
7634 ($($($ty:ty),* => @success $($success_case:expr), * $(, @failure $($failure_case:expr),*)?;)*) => {
7635 $(
7636 impl_try_from_bytes_testable!($($ty),* => @success $($success_case),* $(, @failure $($failure_case),*)*);
7637 )*
7638 };
7639 // Implements only the methods; caller must invoke this from inside
7640 // an impl block.
7641 (@methods @success $($success_case:expr),* $(, @failure $($failure_case:expr),*)?) => {
7642 fn with_passing_test_cases<F: Fn(&Self)>(_f: F) {
7643 $(
7644 _f($success_case.borrow());
7645 )*
7646 }
7647
7648 fn with_failing_test_cases<F: Fn(&[u8])>(_f: F) {
7649 $($(
7650 // `unused_qualifications` is spuriously triggered on
7651 // `Option::<Self>::None`.
7652 #[allow(unused_qualifications)]
7653 let case = $failure_case.as_bytes();
7654 _f(case.as_bytes());
7655 )*)?
7656 }
7657 };
7658 }
7659
7660 // Note that these impls are only for types which are not `FromBytes`.
7661 // `FromBytes` types are covered by a preceding blanket impl.
7662 impl_try_from_bytes_testable!(
7663 bool => @success true, false,
7664 @failure 2u8, 3u8, 0xFFu8;
7665 char => @success '\u{0}', '\u{D7FF}', '\u{E000}', '\u{10FFFF}',
7666 @failure 0xD800u32, 0xDFFFu32, 0x110000u32;
7667 str => @success "", "hello", "โค๏ธ๐Ÿงก๐Ÿ’›๐Ÿ’š๐Ÿ’™๐Ÿ’œ",
7668 @failure [0, 159, 146, 150];
7669 NonZeroU8, NonZeroI8, NonZeroU16, NonZeroI16, NonZeroU32,
7670 NonZeroI32, NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128,
7671 NonZeroUsize, NonZeroIsize
7672 => @success Self::new(1).unwrap(),
7673 // Doing this instead of `0` ensures that we always satisfy
7674 // the size and alignment requirements of `Self` (whereas
7675 // `0` may be any integer type with a different size or
7676 // alignment than some `NonZeroXxx` types).
7677 @failure Option::<Self>::None;
7678 );
7679
7680 // Asserts that `$ty` implements any `$trait` and doesn't implement any
7681 // `!$trait`. Note that all `$trait`s must come before any `!$trait`s.
7682 //
7683 // For `T: TryFromBytes`, uses `TryFromBytesTestable` to test success
7684 // and failure cases for `TryFromBytes::is_bit_valid`.
7685 macro_rules! assert_impls {
7686 ($ty:ty: TryFromBytes) => {
7687 <$ty as TryFromBytesTestable>::with_passing_test_cases(|val| {
7688 let c = Ptr::from(val);
7689 // SAFETY:
7690 // - Since `val` is a normal reference, `c` is guranteed to
7691 // be aligned, to point to a single allocation, and to
7692 // have a size which doesn't overflow `isize`.
7693 // - Since `val` is a valid `$ty`, `c`'s referent satisfies
7694 // the bit validity constraints of `is_bit_valid`, which
7695 // are a superset of the bit validity constraints of
7696 // `$ty`.
7697 let res = unsafe { <$ty as TryFromBytes>::is_bit_valid(c) };
7698 assert!(res, "{}::is_bit_valid({:?}): got false, expected true", stringify!($ty), val);
7699
7700 // TODO(#5): In addition to testing `is_bit_valid`, test the
7701 // methods built on top of it. This would both allow us to
7702 // test their implementations and actually convert the bytes
7703 // to `$ty`, giving Miri a chance to catch if this is
7704 // unsound (ie, if our `is_bit_valid` impl is buggy).
7705 //
7706 // The following code was tried, but it doesn't work because
7707 // a) some types are not `AsBytes` and, b) some types are
7708 // not `Sized`.
7709 //
7710 // let r = <$ty as TryFromBytes>::try_from_ref(val.as_bytes()).unwrap();
7711 // assert_eq!(r, &val);
7712 // let r = <$ty as TryFromBytes>::try_from_mut(val.as_bytes_mut()).unwrap();
7713 // assert_eq!(r, &mut val);
7714 // let v = <$ty as TryFromBytes>::try_read_from(val.as_bytes()).unwrap();
7715 // assert_eq!(v, val);
7716 });
7717 #[allow(clippy::as_conversions)]
7718 <$ty as TryFromBytesTestable>::with_failing_test_cases(|c| {
7719 let res = <$ty as TryFromBytes>::try_from_ref(c);
7720 assert!(res.is_none(), "{}::is_bit_valid({:?}): got true, expected false", stringify!($ty), c);
7721 });
7722
7723 #[allow(dead_code)]
7724 const _: () = { static_assertions::assert_impl_all!($ty: TryFromBytes); };
7725 };
7726 ($ty:ty: $trait:ident) => {
7727 #[allow(dead_code)]
7728 const _: () = { static_assertions::assert_impl_all!($ty: $trait); };
7729 };
7730 ($ty:ty: !$trait:ident) => {
7731 #[allow(dead_code)]
7732 const _: () = { static_assertions::assert_not_impl_any!($ty: $trait); };
7733 };
7734 ($ty:ty: $($trait:ident),* $(,)? $(!$negative_trait:ident),*) => {
7735 $(
7736 assert_impls!($ty: $trait);
7737 )*
7738
7739 $(
7740 assert_impls!($ty: !$negative_trait);
7741 )*
7742 };
7743 }
7744
7745 // NOTE: The negative impl assertions here are not necessarily
7746 // prescriptive. They merely serve as change detectors to make sure
7747 // we're aware of what trait impls are getting added with a given
7748 // change. Of course, some impls would be invalid (e.g., `bool:
7749 // FromBytes`), and so this change detection is very important.
7750
7751 assert_impls!((): KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
7752 assert_impls!(u8: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
7753 assert_impls!(i8: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
7754 assert_impls!(u16: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7755 assert_impls!(i16: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7756 assert_impls!(u32: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7757 assert_impls!(i32: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7758 assert_impls!(u64: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7759 assert_impls!(i64: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7760 assert_impls!(u128: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7761 assert_impls!(i128: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7762 assert_impls!(usize: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7763 assert_impls!(isize: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7764 assert_impls!(f32: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7765 assert_impls!(f64: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7766
7767 assert_impls!(bool: KnownLayout, TryFromBytes, FromZeroes, AsBytes, Unaligned, !FromBytes);
7768 assert_impls!(char: KnownLayout, TryFromBytes, FromZeroes, AsBytes, !FromBytes, !Unaligned);
7769 assert_impls!(str: KnownLayout, TryFromBytes, FromZeroes, AsBytes, Unaligned, !FromBytes);
7770
7771 assert_impls!(NonZeroU8: KnownLayout, TryFromBytes, AsBytes, Unaligned, !FromZeroes, !FromBytes);
7772 assert_impls!(NonZeroI8: KnownLayout, TryFromBytes, AsBytes, Unaligned, !FromZeroes, !FromBytes);
7773 assert_impls!(NonZeroU16: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
7774 assert_impls!(NonZeroI16: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
7775 assert_impls!(NonZeroU32: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
7776 assert_impls!(NonZeroI32: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
7777 assert_impls!(NonZeroU64: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
7778 assert_impls!(NonZeroI64: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
7779 assert_impls!(NonZeroU128: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
7780 assert_impls!(NonZeroI128: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
7781 assert_impls!(NonZeroUsize: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
7782 assert_impls!(NonZeroIsize: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
7783
7784 assert_impls!(Option<NonZeroU8>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
7785 assert_impls!(Option<NonZeroI8>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
7786 assert_impls!(Option<NonZeroU16>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7787 assert_impls!(Option<NonZeroI16>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7788 assert_impls!(Option<NonZeroU32>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7789 assert_impls!(Option<NonZeroI32>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7790 assert_impls!(Option<NonZeroU64>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7791 assert_impls!(Option<NonZeroI64>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7792 assert_impls!(Option<NonZeroU128>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7793 assert_impls!(Option<NonZeroI128>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7794 assert_impls!(Option<NonZeroUsize>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7795 assert_impls!(Option<NonZeroIsize>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7796
7797 // Implements none of the ZC traits.
7798 struct NotZerocopy;
7799
7800 #[rustfmt::skip]
7801 type FnManyArgs = fn(
7802 NotZerocopy, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8,
7803 ) -> (NotZerocopy, NotZerocopy);
7804
7805 // Allowed, because we're not actually using this type for FFI.
7806 #[allow(improper_ctypes_definitions)]
7807 #[rustfmt::skip]
7808 type ECFnManyArgs = extern "C" fn(
7809 NotZerocopy, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8,
7810 ) -> (NotZerocopy, NotZerocopy);
7811
7812 #[cfg(feature = "alloc")]
7813 assert_impls!(Option<Box<UnsafeCell<NotZerocopy>>>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
7814 assert_impls!(Option<Box<[UnsafeCell<NotZerocopy>]>>: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7815 assert_impls!(Option<&'static UnsafeCell<NotZerocopy>>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
7816 assert_impls!(Option<&'static [UnsafeCell<NotZerocopy>]>: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7817 assert_impls!(Option<&'static mut UnsafeCell<NotZerocopy>>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
7818 assert_impls!(Option<&'static mut [UnsafeCell<NotZerocopy>]>: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7819 assert_impls!(Option<NonNull<UnsafeCell<NotZerocopy>>>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
7820 assert_impls!(Option<NonNull<[UnsafeCell<NotZerocopy>]>>: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7821 assert_impls!(Option<fn()>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
7822 assert_impls!(Option<FnManyArgs>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
7823 assert_impls!(Option<extern "C" fn()>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
7824 assert_impls!(Option<ECFnManyArgs>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
7825
7826 assert_impls!(PhantomData<NotZerocopy>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
7827 assert_impls!(PhantomData<[u8]>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
7828
7829 assert_impls!(ManuallyDrop<u8>: KnownLayout, FromZeroes, FromBytes, AsBytes, Unaligned, !TryFromBytes);
7830 assert_impls!(ManuallyDrop<[u8]>: KnownLayout, FromZeroes, FromBytes, AsBytes, Unaligned, !TryFromBytes);
7831 assert_impls!(ManuallyDrop<NotZerocopy>: !TryFromBytes, !KnownLayout, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7832 assert_impls!(ManuallyDrop<[NotZerocopy]>: !TryFromBytes, !KnownLayout, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7833
7834 assert_impls!(MaybeUninit<u8>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, Unaligned, !AsBytes);
7835 assert_impls!(MaybeUninit<NotZerocopy>: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7836
7837 assert_impls!(Wrapping<u8>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
7838 assert_impls!(Wrapping<NotZerocopy>: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7839
7840 assert_impls!(Unalign<u8>: KnownLayout, FromZeroes, FromBytes, AsBytes, Unaligned, !TryFromBytes);
7841 assert_impls!(Unalign<NotZerocopy>: Unaligned, !KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes);
7842
7843 assert_impls!([u8]: KnownLayout, FromZeroes, FromBytes, AsBytes, Unaligned, !TryFromBytes);
7844 assert_impls!([NotZerocopy]: !KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7845 assert_impls!([u8; 0]: KnownLayout, FromZeroes, FromBytes, AsBytes, Unaligned, !TryFromBytes);
7846 assert_impls!([NotZerocopy; 0]: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7847 assert_impls!([u8; 1]: KnownLayout, FromZeroes, FromBytes, AsBytes, Unaligned, !TryFromBytes);
7848 assert_impls!([NotZerocopy; 1]: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7849
7850 assert_impls!(*const NotZerocopy: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
7851 assert_impls!(*mut NotZerocopy: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
7852 assert_impls!(*const [NotZerocopy]: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7853 assert_impls!(*mut [NotZerocopy]: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7854 assert_impls!(*const dyn Debug: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7855 assert_impls!(*mut dyn Debug: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7856
7857 #[cfg(feature = "simd")]
7858 {
7859 #[allow(unused_macros)]
7860 macro_rules! test_simd_arch_mod {
7861 ($arch:ident, $($typ:ident),*) => {
7862 {
7863 use core::arch::$arch::{$($typ),*};
7864 use crate::*;
7865 $( assert_impls!($typ: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); )*
7866 }
7867 };
7868 }
7869 #[cfg(target_arch = "x86")]
7870 test_simd_arch_mod!(x86, __m128, __m128d, __m128i, __m256, __m256d, __m256i);
7871
7872 #[cfg(all(feature = "simd-nightly", target_arch = "x86"))]
7873 test_simd_arch_mod!(x86, __m512bh, __m512, __m512d, __m512i);
7874
7875 #[cfg(target_arch = "x86_64")]
7876 test_simd_arch_mod!(x86_64, __m128, __m128d, __m128i, __m256, __m256d, __m256i);
7877
7878 #[cfg(all(feature = "simd-nightly", target_arch = "x86_64"))]
7879 test_simd_arch_mod!(x86_64, __m512bh, __m512, __m512d, __m512i);
7880
7881 #[cfg(target_arch = "wasm32")]
7882 test_simd_arch_mod!(wasm32, v128);
7883
7884 #[cfg(all(feature = "simd-nightly", target_arch = "powerpc"))]
7885 test_simd_arch_mod!(
7886 powerpc,
7887 vector_bool_long,
7888 vector_double,
7889 vector_signed_long,
7890 vector_unsigned_long
7891 );
7892
7893 #[cfg(all(feature = "simd-nightly", target_arch = "powerpc64"))]
7894 test_simd_arch_mod!(
7895 powerpc64,
7896 vector_bool_long,
7897 vector_double,
7898 vector_signed_long,
7899 vector_unsigned_long
7900 );
7901 #[cfg(target_arch = "aarch64")]
7902 #[rustfmt::skip]
7903 test_simd_arch_mod!(
7904 aarch64, float32x2_t, float32x4_t, float64x1_t, float64x2_t, int8x8_t, int8x8x2_t,
7905 int8x8x3_t, int8x8x4_t, int8x16_t, int8x16x2_t, int8x16x3_t, int8x16x4_t, int16x4_t,
7906 int16x8_t, int32x2_t, int32x4_t, int64x1_t, int64x2_t, poly8x8_t, poly8x8x2_t, poly8x8x3_t,
7907 poly8x8x4_t, poly8x16_t, poly8x16x2_t, poly8x16x3_t, poly8x16x4_t, poly16x4_t, poly16x8_t,
7908 poly64x1_t, poly64x2_t, uint8x8_t, uint8x8x2_t, uint8x8x3_t, uint8x8x4_t, uint8x16_t,
7909 uint8x16x2_t, uint8x16x3_t, uint8x16x4_t, uint16x4_t, uint16x8_t, uint32x2_t, uint32x4_t,
7910 uint64x1_t, uint64x2_t
7911 );
7912 #[cfg(all(feature = "simd-nightly", target_arch = "arm"))]
7913 #[rustfmt::skip]
7914 test_simd_arch_mod!(arm, int8x4_t, uint8x4_t);
7915 }
7916 }
7917}
7918
7919#[cfg(kani)]
7920mod proofs {
7921 use super::*;
7922
7923 impl kani::Arbitrary for DstLayout {
7924 fn any() -> Self {
7925 let align: NonZeroUsize = kani::any();
7926 let size_info: SizeInfo = kani::any();
7927
7928 kani::assume(align.is_power_of_two());
7929 kani::assume(align < DstLayout::THEORETICAL_MAX_ALIGN);
7930
7931 // For testing purposes, we most care about instantiations of
7932 // `DstLayout` that can correspond to actual Rust types. We use
7933 // `Layout` to verify that our `DstLayout` satisfies the validity
7934 // conditions of Rust layouts.
7935 kani::assume(
7936 match size_info {
7937 SizeInfo::Sized { _size } => Layout::from_size_align(_size, align.get()),
7938 SizeInfo::SliceDst(TrailingSliceLayout { _offset, _elem_size }) => {
7939 // `SliceDst`` cannot encode an exact size, but we know
7940 // it is at least `_offset` bytes.
7941 Layout::from_size_align(_offset, align.get())
7942 }
7943 }
7944 .is_ok(),
7945 );
7946
7947 Self { align: align, size_info: size_info }
7948 }
7949 }
7950
7951 impl kani::Arbitrary for SizeInfo {
7952 fn any() -> Self {
7953 let is_sized: bool = kani::any();
7954
7955 match is_sized {
7956 true => {
7957 let size: usize = kani::any();
7958
7959 kani::assume(size <= isize::MAX as _);
7960
7961 SizeInfo::Sized { _size: size }
7962 }
7963 false => SizeInfo::SliceDst(kani::any()),
7964 }
7965 }
7966 }
7967
7968 impl kani::Arbitrary for TrailingSliceLayout {
7969 fn any() -> Self {
7970 let elem_size: usize = kani::any();
7971 let offset: usize = kani::any();
7972
7973 kani::assume(elem_size < isize::MAX as _);
7974 kani::assume(offset < isize::MAX as _);
7975
7976 TrailingSliceLayout { _elem_size: elem_size, _offset: offset }
7977 }
7978 }
7979
7980 #[kani::proof]
7981 fn prove_dst_layout_extend() {
7982 use crate::util::{core_layout::padding_needed_for, max, min};
7983
7984 let base: DstLayout = kani::any();
7985 let field: DstLayout = kani::any();
7986 let packed: Option<NonZeroUsize> = kani::any();
7987
7988 if let Some(max_align) = packed {
7989 kani::assume(max_align.is_power_of_two());
7990 kani::assume(base.align <= max_align);
7991 }
7992
7993 // The base can only be extended if it's sized.
7994 kani::assume(matches!(base.size_info, SizeInfo::Sized { .. }));
7995 let base_size = if let SizeInfo::Sized { _size: size } = base.size_info {
7996 size
7997 } else {
7998 unreachable!();
7999 };
8000
8001 // Under the above conditions, `DstLayout::extend` will not panic.
8002 let composite = base.extend(field, packed);
8003
8004 // The field's alignment is clamped by `max_align` (i.e., the
8005 // `packed` attribute, if any) [1].
8006 //
8007 // [1] Per https://doc.rust-lang.org/reference/type-layout.html#the-alignment-modifiers:
8008 //
8009 // The alignments of each field, for the purpose of positioning
8010 // fields, is the smaller of the specified alignment and the
8011 // alignment of the field's type.
8012 let field_align = min(field.align, packed.unwrap_or(DstLayout::THEORETICAL_MAX_ALIGN));
8013
8014 // The struct's alignment is the maximum of its previous alignment and
8015 // `field_align`.
8016 assert_eq!(composite.align, max(base.align, field_align));
8017
8018 // Compute the minimum amount of inter-field padding needed to
8019 // satisfy the field's alignment, and offset of the trailing field.
8020 // [1]
8021 //
8022 // [1] Per https://doc.rust-lang.org/reference/type-layout.html#the-alignment-modifiers:
8023 //
8024 // Inter-field padding is guaranteed to be the minimum required in
8025 // order to satisfy each field's (possibly altered) alignment.
8026 let padding = padding_needed_for(base_size, field_align);
8027 let offset = base_size + padding;
8028
8029 // For testing purposes, we'll also construct `alloc::Layout`
8030 // stand-ins for `DstLayout`, and show that `extend` behaves
8031 // comparably on both types.
8032 let base_analog = Layout::from_size_align(base_size, base.align.get()).unwrap();
8033
8034 match field.size_info {
8035 SizeInfo::Sized { _size: field_size } => {
8036 if let SizeInfo::Sized { _size: composite_size } = composite.size_info {
8037 // If the trailing field is sized, the resulting layout
8038 // will be sized. Its size will be the sum of the
8039 // preceeding layout, the size of the new field, and the
8040 // size of inter-field padding between the two.
8041 assert_eq!(composite_size, offset + field_size);
8042
8043 let field_analog =
8044 Layout::from_size_align(field_size, field_align.get()).unwrap();
8045
8046 if let Ok((actual_composite, actual_offset)) = base_analog.extend(field_analog)
8047 {
8048 assert_eq!(actual_offset, offset);
8049 assert_eq!(actual_composite.size(), composite_size);
8050 assert_eq!(actual_composite.align(), composite.align.get());
8051 } else {
8052 // An error here reflects that composite of `base`
8053 // and `field` cannot correspond to a real Rust type
8054 // fragment, because such a fragment would violate
8055 // the basic invariants of a valid Rust layout. At
8056 // the time of writing, `DstLayout` is a little more
8057 // permissive than `Layout`, so we don't assert
8058 // anything in this branch (e.g., unreachability).
8059 }
8060 } else {
8061 panic!("The composite of two sized layouts must be sized.")
8062 }
8063 }
8064 SizeInfo::SliceDst(TrailingSliceLayout {
8065 _offset: field_offset,
8066 _elem_size: field_elem_size,
8067 }) => {
8068 if let SizeInfo::SliceDst(TrailingSliceLayout {
8069 _offset: composite_offset,
8070 _elem_size: composite_elem_size,
8071 }) = composite.size_info
8072 {
8073 // The offset of the trailing slice component is the sum
8074 // of the offset of the trailing field and the trailing
8075 // slice offset within that field.
8076 assert_eq!(composite_offset, offset + field_offset);
8077 // The elem size is unchanged.
8078 assert_eq!(composite_elem_size, field_elem_size);
8079
8080 let field_analog =
8081 Layout::from_size_align(field_offset, field_align.get()).unwrap();
8082
8083 if let Ok((actual_composite, actual_offset)) = base_analog.extend(field_analog)
8084 {
8085 assert_eq!(actual_offset, offset);
8086 assert_eq!(actual_composite.size(), composite_offset);
8087 assert_eq!(actual_composite.align(), composite.align.get());
8088 } else {
8089 // An error here reflects that composite of `base`
8090 // and `field` cannot correspond to a real Rust type
8091 // fragment, because such a fragment would violate
8092 // the basic invariants of a valid Rust layout. At
8093 // the time of writing, `DstLayout` is a little more
8094 // permissive than `Layout`, so we don't assert
8095 // anything in this branch (e.g., unreachability).
8096 }
8097 } else {
8098 panic!("The extension of a layout with a DST must result in a DST.")
8099 }
8100 }
8101 }
8102 }
8103
8104 #[kani::proof]
8105 #[kani::should_panic]
8106 fn prove_dst_layout_extend_dst_panics() {
8107 let base: DstLayout = kani::any();
8108 let field: DstLayout = kani::any();
8109 let packed: Option<NonZeroUsize> = kani::any();
8110
8111 if let Some(max_align) = packed {
8112 kani::assume(max_align.is_power_of_two());
8113 kani::assume(base.align <= max_align);
8114 }
8115
8116 kani::assume(matches!(base.size_info, SizeInfo::SliceDst(..)));
8117
8118 let _ = base.extend(field, packed);
8119 }
8120
8121 #[kani::proof]
8122 fn prove_dst_layout_pad_to_align() {
8123 use crate::util::core_layout::padding_needed_for;
8124
8125 let layout: DstLayout = kani::any();
8126
8127 let padded: DstLayout = layout.pad_to_align();
8128
8129 // Calling `pad_to_align` does not alter the `DstLayout`'s alignment.
8130 assert_eq!(padded.align, layout.align);
8131
8132 if let SizeInfo::Sized { _size: unpadded_size } = layout.size_info {
8133 if let SizeInfo::Sized { _size: padded_size } = padded.size_info {
8134 // If the layout is sized, it will remain sized after padding is
8135 // added. Its sum will be its unpadded size and the size of the
8136 // trailing padding needed to satisfy its alignment
8137 // requirements.
8138 let padding = padding_needed_for(unpadded_size, layout.align);
8139 assert_eq!(padded_size, unpadded_size + padding);
8140
8141 // Prove that calling `DstLayout::pad_to_align` behaves
8142 // identically to `Layout::pad_to_align`.
8143 let layout_analog =
8144 Layout::from_size_align(unpadded_size, layout.align.get()).unwrap();
8145 let padded_analog = layout_analog.pad_to_align();
8146 assert_eq!(padded_analog.align(), layout.align.get());
8147 assert_eq!(padded_analog.size(), padded_size);
8148 } else {
8149 panic!("The padding of a sized layout must result in a sized layout.")
8150 }
8151 } else {
8152 // If the layout is a DST, padding cannot be statically added.
8153 assert_eq!(padded.size_info, layout.size_info);
8154 }
8155 }
8156}
8157