1// Copyright 2018 The Fuchsia Authors
2//
3// Licensed under the 2-Clause BSD License <LICENSE-BSD or
4// https://opensource.org/license/bsd-2-clause>, Apache License, Version 2.0
5// <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT
6// license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option.
7// This file may not be copied, modified, or distributed except according to
8// those terms.
9
10// After updating the following doc comment, make sure to run the following
11// command to update `README.md` based on its contents:
12//
13// ./generate-readme.sh > README.md
14
15//! *<span style="font-size: 100%; color:grey;">Want to help improve zerocopy?
16//! Fill out our [user survey][user-survey]!</span>*
17//!
18//! ***<span style="font-size: 140%">Fast, safe, <span
19//! style="color:red;">compile error</span>. Pick two.</span>***
20//!
21//! Zerocopy makes zero-cost memory manipulation effortless. We write `unsafe`
22//! so you don't have to.
23//!
24//! # Overview
25//!
26//! Zerocopy provides four core marker traits, each of which can be derived
27//! (e.g., `#[derive(FromZeroes)]`):
28//! - [`FromZeroes`] indicates that a sequence of zero bytes represents a valid
29//! instance of a type
30//! - [`FromBytes`] indicates that a type may safely be converted from an
31//! arbitrary byte sequence
32//! - [`AsBytes`] indicates that a type may safely be converted *to* a byte
33//! sequence
34//! - [`Unaligned`] indicates that a type's alignment requirement is 1
35//!
36//! Types which implement a subset of these traits can then be converted to/from
37//! byte sequences with little to no runtime overhead.
38//!
39//! Zerocopy also provides byte-order aware integer types that support these
40//! conversions; see the [`byteorder`] module. These types are especially useful
41//! for network parsing.
42//!
43//! [user-survey]: https://docs.google.com/forms/d/e/1FAIpQLSdzBNTN9tzwsmtyZxRFNL02K36IWCdHWW2ZBckyQS2xiO3i8Q/viewform?usp=published_options
44//!
45//! # Cargo Features
46//!
47//! - **`alloc`**
48//! By default, `zerocopy` is `no_std`. When the `alloc` feature is enabled,
49//! the `alloc` crate is added as a dependency, and some allocation-related
50//! functionality is added.
51//!
52//! - **`byteorder`** (enabled by default)
53//! Adds the [`byteorder`] module and a dependency on the `byteorder` crate.
54//! The `byteorder` module provides byte order-aware equivalents of the
55//! multi-byte primitive numerical types. Unlike their primitive equivalents,
56//! the types in this module have no alignment requirement and support byte
57//! order conversions. This can be useful in handling file formats, network
58//! packet layouts, etc which don't provide alignment guarantees and which may
59//! use a byte order different from that of the execution platform.
60//!
61//! - **`derive`**
62//! Provides derives for the core marker traits via the `zerocopy-derive`
63//! crate. These derives are re-exported from `zerocopy`, so it is not
64//! necessary to depend on `zerocopy-derive` directly.
65//!
66//! However, you may experience better compile times if you instead directly
67//! depend on both `zerocopy` and `zerocopy-derive` in your `Cargo.toml`,
68//! since doing so will allow Rust to compile these crates in parallel. To do
69//! so, do *not* enable the `derive` feature, and list both dependencies in
70//! your `Cargo.toml` with the same leading non-zero version number; e.g:
71//!
72//! ```toml
73//! [dependencies]
74//! zerocopy = "0.X"
75//! zerocopy-derive = "0.X"
76//! ```
77//!
78//! - **`simd`**
79//! When the `simd` feature is enabled, `FromZeroes`, `FromBytes`, and
80//! `AsBytes` impls are emitted for all stable SIMD types which exist on the
81//! target platform. Note that the layout of SIMD types is not yet stabilized,
82//! so these impls may be removed in the future if layout changes make them
83//! invalid. For more information, see the Unsafe Code Guidelines Reference
84//! page on the [layout of packed SIMD vectors][simd-layout].
85//!
86//! - **`simd-nightly`**
87//! Enables the `simd` feature and adds support for SIMD types which are only
88//! available on nightly. Since these types are unstable, support for any type
89//! may be removed at any point in the future.
90//!
91//! [simd-layout]: https://rust-lang.github.io/unsafe-code-guidelines/layout/packed-simd-vectors.html
92//!
93//! # Security Ethos
94//!
95//! Zerocopy is expressly designed for use in security-critical contexts. We
96//! strive to ensure that that zerocopy code is sound under Rust's current
97//! memory model, and *any future memory model*. We ensure this by:
98//! - **...not 'guessing' about Rust's semantics.**
99//! We annotate `unsafe` code with a precise rationale for its soundness that
100//! cites a relevant section of Rust's official documentation. When Rust's
101//! documented semantics are unclear, we work with the Rust Operational
102//! Semantics Team to clarify Rust's documentation.
103//! - **...rigorously testing our implementation.**
104//! We run tests using [Miri], ensuring that zerocopy is sound across a wide
105//! array of supported target platforms of varying endianness and pointer
106//! width, and across both current and experimental memory models of Rust.
107//! - **...formally proving the correctness of our implementation.**
108//! We apply formal verification tools like [Kani][kani] to prove zerocopy's
109//! correctness.
110//!
111//! For more information, see our full [soundness policy].
112//!
113//! [Miri]: https://github.com/rust-lang/miri
114//! [Kani]: https://github.com/model-checking/kani
115//! [soundness policy]: https://github.com/google/zerocopy/blob/main/POLICIES.md#soundness
116//!
117//! # Relationship to Project Safe Transmute
118//!
119//! [Project Safe Transmute] is an official initiative of the Rust Project to
120//! develop language-level support for safer transmutation. The Project consults
121//! with crates like zerocopy to identify aspects of safer transmutation that
122//! would benefit from compiler support, and has developed an [experimental,
123//! compiler-supported analysis][mcp-transmutability] which determines whether,
124//! for a given type, any value of that type may be soundly transmuted into
125//! another type. Once this functionality is sufficiently mature, zerocopy
126//! intends to replace its internal transmutability analysis (implemented by our
127//! custom derives) with the compiler-supported one. This change will likely be
128//! an implementation detail that is invisible to zerocopy's users.
129//!
130//! Project Safe Transmute will not replace the need for most of zerocopy's
131//! higher-level abstractions. The experimental compiler analysis is a tool for
132//! checking the soundness of `unsafe` code, not a tool to avoid writing
133//! `unsafe` code altogether. For the foreseeable future, crates like zerocopy
134//! will still be required in order to provide higher-level abstractions on top
135//! of the building block provided by Project Safe Transmute.
136//!
137//! [Project Safe Transmute]: https://rust-lang.github.io/rfcs/2835-project-safe-transmute.html
138//! [mcp-transmutability]: https://github.com/rust-lang/compiler-team/issues/411
139//!
140//! # MSRV
141//!
142//! See our [MSRV policy].
143//!
144//! [MSRV policy]: https://github.com/google/zerocopy/blob/main/POLICIES.md#msrv
145//!
146//! # Changelog
147//!
148//! Zerocopy uses [GitHub Releases].
149//!
150//! [GitHub Releases]: https://github.com/google/zerocopy/releases
151
152// Sometimes we want to use lints which were added after our MSRV.
153// `unknown_lints` is `warn` by default and we deny warnings in CI, so without
154// this attribute, any unknown lint would cause a CI failure when testing with
155// our MSRV.
156#![allow(unknown_lints)]
157#![deny(renamed_and_removed_lints)]
158#![deny(
159 anonymous_parameters,
160 deprecated_in_future,
161 illegal_floating_point_literal_pattern,
162 late_bound_lifetime_arguments,
163 missing_copy_implementations,
164 missing_debug_implementations,
165 missing_docs,
166 path_statements,
167 patterns_in_fns_without_body,
168 rust_2018_idioms,
169 trivial_numeric_casts,
170 unreachable_pub,
171 unsafe_op_in_unsafe_fn,
172 unused_extern_crates,
173 unused_qualifications,
174 variant_size_differences
175)]
176#![cfg_attr(
177 __INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS,
178 deny(fuzzy_provenance_casts, lossy_provenance_casts)
179)]
180#![deny(
181 clippy::all,
182 clippy::alloc_instead_of_core,
183 clippy::arithmetic_side_effects,
184 clippy::as_underscore,
185 clippy::assertions_on_result_states,
186 clippy::as_conversions,
187 clippy::correctness,
188 clippy::dbg_macro,
189 clippy::decimal_literal_representation,
190 clippy::get_unwrap,
191 clippy::indexing_slicing,
192 clippy::missing_inline_in_public_items,
193 clippy::missing_safety_doc,
194 clippy::obfuscated_if_else,
195 clippy::perf,
196 clippy::print_stdout,
197 clippy::std_instead_of_core,
198 clippy::style,
199 clippy::suspicious,
200 clippy::todo,
201 clippy::undocumented_unsafe_blocks,
202 clippy::unimplemented,
203 clippy::unnested_or_patterns,
204 clippy::unwrap_used,
205 clippy::use_debug
206)]
207#![deny(
208 rustdoc::bare_urls,
209 rustdoc::broken_intra_doc_links,
210 rustdoc::invalid_codeblock_attributes,
211 rustdoc::invalid_html_tags,
212 rustdoc::invalid_rust_codeblocks,
213 rustdoc::missing_crate_level_docs,
214 rustdoc::private_intra_doc_links
215)]
216// In test code, it makes sense to weight more heavily towards concise, readable
217// code over correct or debuggable code.
218#![cfg_attr(any(test, kani), allow(
219 // In tests, you get line numbers and have access to source code, so panic
220 // messages are less important. You also often unwrap a lot, which would
221 // make expect'ing instead very verbose.
222 clippy::unwrap_used,
223 // In tests, there's no harm to "panic risks" - the worst that can happen is
224 // that your test will fail, and you'll fix it. By contrast, panic risks in
225 // production code introduce the possibly of code panicking unexpectedly "in
226 // the field".
227 clippy::arithmetic_side_effects,
228 clippy::indexing_slicing,
229))]
230#![cfg_attr(not(test), no_std)]
231#![cfg_attr(feature = "simd-nightly", feature(stdsimd))]
232#![cfg_attr(doc_cfg, feature(doc_cfg))]
233#![cfg_attr(
234 __INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS,
235 feature(layout_for_ptr, strict_provenance)
236)]
237
238// This is a hack to allow zerocopy-derive derives to work in this crate. They
239// assume that zerocopy is linked as an extern crate, so they access items from
240// it as `zerocopy::Xxx`. This makes that still work.
241#[cfg(any(feature = "derive", test))]
242extern crate self as zerocopy;
243
244#[macro_use]
245mod macros;
246
247#[cfg(feature = "byteorder")]
248#[cfg_attr(doc_cfg, doc(cfg(feature = "byteorder")))]
249pub mod byteorder;
250#[doc(hidden)]
251pub mod macro_util;
252mod post_monomorphization_compile_fail_tests;
253mod util;
254// TODO(#252): If we make this pub, come up with a better name.
255mod wrappers;
256
257#[cfg(feature = "byteorder")]
258#[cfg_attr(doc_cfg, doc(cfg(feature = "byteorder")))]
259pub use crate::byteorder::*;
260pub use crate::wrappers::*;
261
262#[cfg(any(feature = "derive", test))]
263#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
264pub use zerocopy_derive::Unaligned;
265
266// `pub use` separately here so that we can mark it `#[doc(hidden)]`.
267//
268// TODO(#29): Remove this or add a doc comment.
269#[cfg(any(feature = "derive", test))]
270#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
271#[doc(hidden)]
272pub use zerocopy_derive::KnownLayout;
273
274use core::{
275 cell::{self, RefMut},
276 cmp::Ordering,
277 fmt::{self, Debug, Display, Formatter},
278 hash::Hasher,
279 marker::PhantomData,
280 mem::{self, ManuallyDrop, MaybeUninit},
281 num::{
282 NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize, NonZeroU128,
283 NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize, Wrapping,
284 },
285 ops::{Deref, DerefMut},
286 ptr::{self, NonNull},
287 slice,
288};
289
290#[cfg(feature = "alloc")]
291extern crate alloc;
292#[cfg(feature = "alloc")]
293use alloc::{boxed::Box, vec::Vec};
294
295#[cfg(any(feature = "alloc", kani))]
296use core::alloc::Layout;
297
298// Used by `TryFromBytes::is_bit_valid`.
299#[doc(hidden)]
300pub use crate::util::ptr::Ptr;
301
302// For each polyfill, as soon as the corresponding feature is stable, the
303// polyfill import will be unused because method/function resolution will prefer
304// the inherent method/function over a trait method/function. Thus, we suppress
305// the `unused_imports` warning.
306//
307// See the documentation on `util::polyfills` for more information.
308#[allow(unused_imports)]
309use crate::util::polyfills::NonNullExt as _;
310
311#[rustversion::nightly]
312#[cfg(all(test, not(__INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS)))]
313const _: () = {
314 #[deprecated = "some tests may be skipped due to missing RUSTFLAGS=\"--cfg __INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS\""]
315 const _WARNING: () = ();
316 #[warn(deprecated)]
317 _WARNING
318};
319
320/// The target pointer width, counted in bits.
321const POINTER_WIDTH_BITS: usize = mem::size_of::<usize>() * 8;
322
323/// The layout of a type which might be dynamically-sized.
324///
325/// `DstLayout` describes the layout of sized types, slice types, and "slice
326/// DSTs" - ie, those that are known by the type system to have a trailing slice
327/// (as distinguished from `dyn Trait` types - such types *might* have a
328/// trailing slice type, but the type system isn't aware of it).
329///
330/// # Safety
331///
332/// Unlike [`core::alloc::Layout`], `DstLayout` is only used to describe full
333/// Rust types - ie, those that satisfy the layout requirements outlined by
334/// [the reference]. Callers may assume that an instance of `DstLayout`
335/// satisfies any conditions imposed on Rust types by the reference.
336///
337/// If `layout: DstLayout` describes a type, `T`, then it is guaranteed that:
338/// - `layout.align` is equal to `T`'s alignment
339/// - If `layout.size_info` is `SizeInfo::Sized { size }`, then `T: Sized` and
340/// `size_of::<T>() == size`
341/// - If `layout.size_info` is `SizeInfo::SliceDst(slice_layout)`, then
342/// - `T` is a slice DST
343/// - The `size` of an instance of `T` with `elems` trailing slice elements is
344/// equal to `slice_layout.offset + slice_layout.elem_size * elems` rounded up
345/// to the nearest multiple of `layout.align`. Any bytes in the range
346/// `[slice_layout.offset + slice_layout.elem_size * elems, size)` are padding
347/// and must not be assumed to be initialized.
348///
349/// [the reference]: https://doc.rust-lang.org/reference/type-layout.html
350#[doc(hidden)]
351#[allow(missing_debug_implementations, missing_copy_implementations)]
352#[cfg_attr(any(kani, test), derive(Copy, Clone, Debug, PartialEq, Eq))]
353pub struct DstLayout {
354 align: NonZeroUsize,
355 size_info: SizeInfo,
356}
357
358#[cfg_attr(any(kani, test), derive(Copy, Clone, Debug, PartialEq, Eq))]
359enum SizeInfo<E = usize> {
360 Sized { _size: usize },
361 SliceDst(TrailingSliceLayout<E>),
362}
363
364#[cfg_attr(any(kani, test), derive(Copy, Clone, Debug, PartialEq, Eq))]
365struct TrailingSliceLayout<E = usize> {
366 // The offset of the first byte of the trailing slice field. Note that this
367 // is NOT the same as the minimum size of the type. For example, consider
368 // the following type:
369 //
370 // struct Foo {
371 // a: u16,
372 // b: u8,
373 // c: [u8],
374 // }
375 //
376 // In `Foo`, `c` is at byte offset 3. When `c.len() == 0`, `c` is followed
377 // by a padding byte.
378 _offset: usize,
379 // The size of the element type of the trailing slice field.
380 _elem_size: E,
381}
382
383impl SizeInfo {
384 /// Attempts to create a `SizeInfo` from `Self` in which `elem_size` is a
385 /// `NonZeroUsize`. If `elem_size` is 0, returns `None`.
386 #[allow(unused)]
387 const fn try_to_nonzero_elem_size(&self) -> Option<SizeInfo<NonZeroUsize>> {
388 Some(match *self {
389 SizeInfo::Sized { _size: usize } => SizeInfo::Sized { _size },
390 SizeInfo::SliceDst(TrailingSliceLayout { _offset: usize, _elem_size: usize }) => {
391 if let Some(_elem_size: NonZero) = NonZeroUsize::new(_elem_size) {
392 SizeInfo::SliceDst(TrailingSliceLayout { _offset, _elem_size })
393 } else {
394 return None;
395 }
396 }
397 })
398 }
399}
400
401#[doc(hidden)]
402#[derive(Copy, Clone)]
403#[cfg_attr(test, derive(Debug))]
404#[allow(missing_debug_implementations)]
405pub enum _CastType {
406 _Prefix,
407 _Suffix,
408}
409
410impl DstLayout {
411 /// The minimum possible alignment of a type.
412 const MIN_ALIGN: NonZeroUsize = match NonZeroUsize::new(1) {
413 Some(min_align) => min_align,
414 None => unreachable!(),
415 };
416
417 /// The maximum theoretic possible alignment of a type.
418 ///
419 /// For compatibility with future Rust versions, this is defined as the
420 /// maximum power-of-two that fits into a `usize`. See also
421 /// [`DstLayout::CURRENT_MAX_ALIGN`].
422 const THEORETICAL_MAX_ALIGN: NonZeroUsize =
423 match NonZeroUsize::new(1 << (POINTER_WIDTH_BITS - 1)) {
424 Some(max_align) => max_align,
425 None => unreachable!(),
426 };
427
428 /// The current, documented max alignment of a type \[1\].
429 ///
430 /// \[1\] Per <https://doc.rust-lang.org/reference/type-layout.html#the-alignment-modifiers>:
431 ///
432 /// The alignment value must be a power of two from 1 up to
433 /// 2<sup>29</sup>.
434 #[cfg(not(kani))]
435 const CURRENT_MAX_ALIGN: NonZeroUsize = match NonZeroUsize::new(1 << 28) {
436 Some(max_align) => max_align,
437 None => unreachable!(),
438 };
439
440 /// Constructs a `DstLayout` for a zero-sized type with `repr_align`
441 /// alignment (or 1). If `repr_align` is provided, then it must be a power
442 /// of two.
443 ///
444 /// # Panics
445 ///
446 /// This function panics if the supplied `repr_align` is not a power of two.
447 ///
448 /// # Safety
449 ///
450 /// Unsafe code may assume that the contract of this function is satisfied.
451 #[doc(hidden)]
452 #[inline]
453 pub const fn new_zst(repr_align: Option<NonZeroUsize>) -> DstLayout {
454 let align = match repr_align {
455 Some(align) => align,
456 None => Self::MIN_ALIGN,
457 };
458
459 assert!(align.is_power_of_two());
460
461 DstLayout { align, size_info: SizeInfo::Sized { _size: 0 } }
462 }
463
464 /// Constructs a `DstLayout` which describes `T`.
465 ///
466 /// # Safety
467 ///
468 /// Unsafe code may assume that `DstLayout` is the correct layout for `T`.
469 #[doc(hidden)]
470 #[inline]
471 pub const fn for_type<T>() -> DstLayout {
472 // SAFETY: `align` is correct by construction. `T: Sized`, and so it is
473 // sound to initialize `size_info` to `SizeInfo::Sized { size }`; the
474 // `size` field is also correct by construction.
475 DstLayout {
476 align: match NonZeroUsize::new(mem::align_of::<T>()) {
477 Some(align) => align,
478 None => unreachable!(),
479 },
480 size_info: SizeInfo::Sized { _size: mem::size_of::<T>() },
481 }
482 }
483
484 /// Constructs a `DstLayout` which describes `[T]`.
485 ///
486 /// # Safety
487 ///
488 /// Unsafe code may assume that `DstLayout` is the correct layout for `[T]`.
489 const fn for_slice<T>() -> DstLayout {
490 // SAFETY: The alignment of a slice is equal to the alignment of its
491 // element type, and so `align` is initialized correctly.
492 //
493 // Since this is just a slice type, there is no offset between the
494 // beginning of the type and the beginning of the slice, so it is
495 // correct to set `offset: 0`. The `elem_size` is correct by
496 // construction. Since `[T]` is a (degenerate case of a) slice DST, it
497 // is correct to initialize `size_info` to `SizeInfo::SliceDst`.
498 DstLayout {
499 align: match NonZeroUsize::new(mem::align_of::<T>()) {
500 Some(align) => align,
501 None => unreachable!(),
502 },
503 size_info: SizeInfo::SliceDst(TrailingSliceLayout {
504 _offset: 0,
505 _elem_size: mem::size_of::<T>(),
506 }),
507 }
508 }
509
510 /// Like `Layout::extend`, this creates a layout that describes a record
511 /// whose layout consists of `self` followed by `next` that includes the
512 /// necessary inter-field padding, but not any trailing padding.
513 ///
514 /// In order to match the layout of a `#[repr(C)]` struct, this method
515 /// should be invoked for each field in declaration order. To add trailing
516 /// padding, call `DstLayout::pad_to_align` after extending the layout for
517 /// all fields. If `self` corresponds to a type marked with
518 /// `repr(packed(N))`, then `repr_packed` should be set to `Some(N)`,
519 /// otherwise `None`.
520 ///
521 /// This method cannot be used to match the layout of a record with the
522 /// default representation, as that representation is mostly unspecified.
523 ///
524 /// # Safety
525 ///
526 /// If a (potentially hypothetical) valid `repr(C)` Rust type begins with
527 /// fields whose layout are `self`, and those fields are immediately
528 /// followed by a field whose layout is `field`, then unsafe code may rely
529 /// on `self.extend(field, repr_packed)` producing a layout that correctly
530 /// encompasses those two components.
531 ///
532 /// We make no guarantees to the behavior of this method if these fragments
533 /// cannot appear in a valid Rust type (e.g., the concatenation of the
534 /// layouts would lead to a size larger than `isize::MAX`).
535 #[doc(hidden)]
536 #[inline]
537 pub const fn extend(self, field: DstLayout, repr_packed: Option<NonZeroUsize>) -> Self {
538 use util::{core_layout::padding_needed_for, max, min};
539
540 // If `repr_packed` is `None`, there are no alignment constraints, and
541 // the value can be defaulted to `THEORETICAL_MAX_ALIGN`.
542 let max_align = match repr_packed {
543 Some(max_align) => max_align,
544 None => Self::THEORETICAL_MAX_ALIGN,
545 };
546
547 assert!(max_align.is_power_of_two());
548
549 // We use Kani to prove that this method is robust to future increases
550 // in Rust's maximum allowed alignment. However, if such a change ever
551 // actually occurs, we'd like to be notified via assertion failures.
552 #[cfg(not(kani))]
553 {
554 debug_assert!(self.align.get() <= DstLayout::CURRENT_MAX_ALIGN.get());
555 debug_assert!(field.align.get() <= DstLayout::CURRENT_MAX_ALIGN.get());
556 if let Some(repr_packed) = repr_packed {
557 debug_assert!(repr_packed.get() <= DstLayout::CURRENT_MAX_ALIGN.get());
558 }
559 }
560
561 // The field's alignment is clamped by `repr_packed` (i.e., the
562 // `repr(packed(N))` attribute, if any) [1].
563 //
564 // [1] Per https://doc.rust-lang.org/reference/type-layout.html#the-alignment-modifiers:
565 //
566 // The alignments of each field, for the purpose of positioning
567 // fields, is the smaller of the specified alignment and the alignment
568 // of the field's type.
569 let field_align = min(field.align, max_align);
570
571 // The struct's alignment is the maximum of its previous alignment and
572 // `field_align`.
573 let align = max(self.align, field_align);
574
575 let size_info = match self.size_info {
576 // If the layout is already a DST, we panic; DSTs cannot be extended
577 // with additional fields.
578 SizeInfo::SliceDst(..) => panic!("Cannot extend a DST with additional fields."),
579
580 SizeInfo::Sized { _size: preceding_size } => {
581 // Compute the minimum amount of inter-field padding needed to
582 // satisfy the field's alignment, and offset of the trailing
583 // field. [1]
584 //
585 // [1] Per https://doc.rust-lang.org/reference/type-layout.html#the-alignment-modifiers:
586 //
587 // Inter-field padding is guaranteed to be the minimum
588 // required in order to satisfy each field's (possibly
589 // altered) alignment.
590 let padding = padding_needed_for(preceding_size, field_align);
591
592 // This will not panic (and is proven to not panic, with Kani)
593 // if the layout components can correspond to a leading layout
594 // fragment of a valid Rust type, but may panic otherwise (e.g.,
595 // combining or aligning the components would create a size
596 // exceeding `isize::MAX`).
597 let offset = match preceding_size.checked_add(padding) {
598 Some(offset) => offset,
599 None => panic!("Adding padding to `self`'s size overflows `usize`."),
600 };
601
602 match field.size_info {
603 SizeInfo::Sized { _size: field_size } => {
604 // If the trailing field is sized, the resulting layout
605 // will be sized. Its size will be the sum of the
606 // preceeding layout, the size of the new field, and the
607 // size of inter-field padding between the two.
608 //
609 // This will not panic (and is proven with Kani to not
610 // panic) if the layout components can correspond to a
611 // leading layout fragment of a valid Rust type, but may
612 // panic otherwise (e.g., combining or aligning the
613 // components would create a size exceeding
614 // `usize::MAX`).
615 let size = match offset.checked_add(field_size) {
616 Some(size) => size,
617 None => panic!("`field` cannot be appended without the total size overflowing `usize`"),
618 };
619 SizeInfo::Sized { _size: size }
620 }
621 SizeInfo::SliceDst(TrailingSliceLayout {
622 _offset: trailing_offset,
623 _elem_size,
624 }) => {
625 // If the trailing field is dynamically sized, so too
626 // will the resulting layout. The offset of the trailing
627 // slice component is the sum of the offset of the
628 // trailing field and the trailing slice offset within
629 // that field.
630 //
631 // This will not panic (and is proven with Kani to not
632 // panic) if the layout components can correspond to a
633 // leading layout fragment of a valid Rust type, but may
634 // panic otherwise (e.g., combining or aligning the
635 // components would create a size exceeding
636 // `usize::MAX`).
637 let offset = match offset.checked_add(trailing_offset) {
638 Some(offset) => offset,
639 None => panic!("`field` cannot be appended without the total size overflowing `usize`"),
640 };
641 SizeInfo::SliceDst(TrailingSliceLayout { _offset: offset, _elem_size })
642 }
643 }
644 }
645 };
646
647 DstLayout { align, size_info }
648 }
649
650 /// Like `Layout::pad_to_align`, this routine rounds the size of this layout
651 /// up to the nearest multiple of this type's alignment or `repr_packed`
652 /// (whichever is less). This method leaves DST layouts unchanged, since the
653 /// trailing padding of DSTs is computed at runtime.
654 ///
655 /// In order to match the layout of a `#[repr(C)]` struct, this method
656 /// should be invoked after the invocations of [`DstLayout::extend`]. If
657 /// `self` corresponds to a type marked with `repr(packed(N))`, then
658 /// `repr_packed` should be set to `Some(N)`, otherwise `None`.
659 ///
660 /// This method cannot be used to match the layout of a record with the
661 /// default representation, as that representation is mostly unspecified.
662 ///
663 /// # Safety
664 ///
665 /// If a (potentially hypothetical) valid `repr(C)` type begins with fields
666 /// whose layout are `self` followed only by zero or more bytes of trailing
667 /// padding (not included in `self`), then unsafe code may rely on
668 /// `self.pad_to_align(repr_packed)` producing a layout that correctly
669 /// encapsulates the layout of that type.
670 ///
671 /// We make no guarantees to the behavior of this method if `self` cannot
672 /// appear in a valid Rust type (e.g., because the addition of trailing
673 /// padding would lead to a size larger than `isize::MAX`).
674 #[doc(hidden)]
675 #[inline]
676 pub const fn pad_to_align(self) -> Self {
677 use util::core_layout::padding_needed_for;
678
679 let size_info = match self.size_info {
680 // For sized layouts, we add the minimum amount of trailing padding
681 // needed to satisfy alignment.
682 SizeInfo::Sized { _size: unpadded_size } => {
683 let padding = padding_needed_for(unpadded_size, self.align);
684 let size = match unpadded_size.checked_add(padding) {
685 Some(size) => size,
686 None => panic!("Adding padding caused size to overflow `usize`."),
687 };
688 SizeInfo::Sized { _size: size }
689 }
690 // For DST layouts, trailing padding depends on the length of the
691 // trailing DST and is computed at runtime. This does not alter the
692 // offset or element size of the layout, so we leave `size_info`
693 // unchanged.
694 size_info @ SizeInfo::SliceDst(_) => size_info,
695 };
696
697 DstLayout { align: self.align, size_info }
698 }
699
700 /// Validates that a cast is sound from a layout perspective.
701 ///
702 /// Validates that the size and alignment requirements of a type with the
703 /// layout described in `self` would not be violated by performing a
704 /// `cast_type` cast from a pointer with address `addr` which refers to a
705 /// memory region of size `bytes_len`.
706 ///
707 /// If the cast is valid, `validate_cast_and_convert_metadata` returns
708 /// `(elems, split_at)`. If `self` describes a dynamically-sized type, then
709 /// `elems` is the maximum number of trailing slice elements for which a
710 /// cast would be valid (for sized types, `elem` is meaningless and should
711 /// be ignored). `split_at` is the index at which to split the memory region
712 /// in order for the prefix (suffix) to contain the result of the cast, and
713 /// in order for the remaining suffix (prefix) to contain the leftover
714 /// bytes.
715 ///
716 /// There are three conditions under which a cast can fail:
717 /// - The smallest possible value for the type is larger than the provided
718 /// memory region
719 /// - A prefix cast is requested, and `addr` does not satisfy `self`'s
720 /// alignment requirement
721 /// - A suffix cast is requested, and `addr + bytes_len` does not satisfy
722 /// `self`'s alignment requirement (as a consequence, since all instances
723 /// of the type are a multiple of its alignment, no size for the type will
724 /// result in a starting address which is properly aligned)
725 ///
726 /// # Safety
727 ///
728 /// The caller may assume that this implementation is correct, and may rely
729 /// on that assumption for the soundness of their code. In particular, the
730 /// caller may assume that, if `validate_cast_and_convert_metadata` returns
731 /// `Some((elems, split_at))`, then:
732 /// - A pointer to the type (for dynamically sized types, this includes
733 /// `elems` as its pointer metadata) describes an object of size `size <=
734 /// bytes_len`
735 /// - If this is a prefix cast:
736 /// - `addr` satisfies `self`'s alignment
737 /// - `size == split_at`
738 /// - If this is a suffix cast:
739 /// - `split_at == bytes_len - size`
740 /// - `addr + split_at` satisfies `self`'s alignment
741 ///
742 /// Note that this method does *not* ensure that a pointer constructed from
743 /// its return values will be a valid pointer. In particular, this method
744 /// does not reason about `isize` overflow, which is a requirement of many
745 /// Rust pointer APIs, and may at some point be determined to be a validity
746 /// invariant of pointer types themselves. This should never be a problem so
747 /// long as the arguments to this method are derived from a known-valid
748 /// pointer (e.g., one derived from a safe Rust reference), but it is
749 /// nonetheless the caller's responsibility to justify that pointer
750 /// arithmetic will not overflow based on a safety argument *other than* the
751 /// mere fact that this method returned successfully.
752 ///
753 /// # Panics
754 ///
755 /// `validate_cast_and_convert_metadata` will panic if `self` describes a
756 /// DST whose trailing slice element is zero-sized.
757 ///
758 /// If `addr + bytes_len` overflows `usize`,
759 /// `validate_cast_and_convert_metadata` may panic, or it may return
760 /// incorrect results. No guarantees are made about when
761 /// `validate_cast_and_convert_metadata` will panic. The caller should not
762 /// rely on `validate_cast_and_convert_metadata` panicking in any particular
763 /// condition, even if `debug_assertions` are enabled.
764 #[allow(unused)]
765 const fn validate_cast_and_convert_metadata(
766 &self,
767 addr: usize,
768 bytes_len: usize,
769 cast_type: _CastType,
770 ) -> Option<(usize, usize)> {
771 // `debug_assert!`, but with `#[allow(clippy::arithmetic_side_effects)]`.
772 macro_rules! __debug_assert {
773 ($e:expr $(, $msg:expr)?) => {
774 debug_assert!({
775 #[allow(clippy::arithmetic_side_effects)]
776 let e = $e;
777 e
778 } $(, $msg)?);
779 };
780 }
781
782 // Note that, in practice, `self` is always a compile-time constant. We
783 // do this check earlier than needed to ensure that we always panic as a
784 // result of bugs in the program (such as calling this function on an
785 // invalid type) instead of allowing this panic to be hidden if the cast
786 // would have failed anyway for runtime reasons (such as a too-small
787 // memory region).
788 //
789 // TODO(#67): Once our MSRV is 1.65, use let-else:
790 // https://blog.rust-lang.org/2022/11/03/Rust-1.65.0.html#let-else-statements
791 let size_info = match self.size_info.try_to_nonzero_elem_size() {
792 Some(size_info) => size_info,
793 None => panic!("attempted to cast to slice type with zero-sized element"),
794 };
795
796 // Precondition
797 __debug_assert!(addr.checked_add(bytes_len).is_some(), "`addr` + `bytes_len` > usize::MAX");
798
799 // Alignment checks go in their own block to avoid introducing variables
800 // into the top-level scope.
801 {
802 // We check alignment for `addr` (for prefix casts) or `addr +
803 // bytes_len` (for suffix casts). For a prefix cast, the correctness
804 // of this check is trivial - `addr` is the address the object will
805 // live at.
806 //
807 // For a suffix cast, we know that all valid sizes for the type are
808 // a multiple of the alignment (and by safety precondition, we know
809 // `DstLayout` may only describe valid Rust types). Thus, a
810 // validly-sized instance which lives at a validly-aligned address
811 // must also end at a validly-aligned address. Thus, if the end
812 // address for a suffix cast (`addr + bytes_len`) is not aligned,
813 // then no valid start address will be aligned either.
814 let offset = match cast_type {
815 _CastType::_Prefix => 0,
816 _CastType::_Suffix => bytes_len,
817 };
818
819 // Addition is guaranteed not to overflow because `offset <=
820 // bytes_len`, and `addr + bytes_len <= usize::MAX` is a
821 // precondition of this method. Modulus is guaranteed not to divide
822 // by 0 because `align` is non-zero.
823 #[allow(clippy::arithmetic_side_effects)]
824 if (addr + offset) % self.align.get() != 0 {
825 return None;
826 }
827 }
828
829 let (elems, self_bytes) = match size_info {
830 SizeInfo::Sized { _size: size } => {
831 if size > bytes_len {
832 return None;
833 }
834 (0, size)
835 }
836 SizeInfo::SliceDst(TrailingSliceLayout { _offset: offset, _elem_size: elem_size }) => {
837 // Calculate the maximum number of bytes that could be consumed
838 // - any number of bytes larger than this will either not be a
839 // multiple of the alignment, or will be larger than
840 // `bytes_len`.
841 let max_total_bytes =
842 util::round_down_to_next_multiple_of_alignment(bytes_len, self.align);
843 // Calculate the maximum number of bytes that could be consumed
844 // by the trailing slice.
845 //
846 // TODO(#67): Once our MSRV is 1.65, use let-else:
847 // https://blog.rust-lang.org/2022/11/03/Rust-1.65.0.html#let-else-statements
848 let max_slice_and_padding_bytes = match max_total_bytes.checked_sub(offset) {
849 Some(max) => max,
850 // `bytes_len` too small even for 0 trailing slice elements.
851 None => return None,
852 };
853
854 // Calculate the number of elements that fit in
855 // `max_slice_and_padding_bytes`; any remaining bytes will be
856 // considered padding.
857 //
858 // Guaranteed not to divide by zero: `elem_size` is non-zero.
859 #[allow(clippy::arithmetic_side_effects)]
860 let elems = max_slice_and_padding_bytes / elem_size.get();
861 // Guaranteed not to overflow on multiplication: `usize::MAX >=
862 // max_slice_and_padding_bytes >= (max_slice_and_padding_bytes /
863 // elem_size) * elem_size`.
864 //
865 // Guaranteed not to overflow on addition:
866 // - max_slice_and_padding_bytes == max_total_bytes - offset
867 // - elems * elem_size <= max_slice_and_padding_bytes == max_total_bytes - offset
868 // - elems * elem_size + offset <= max_total_bytes <= usize::MAX
869 #[allow(clippy::arithmetic_side_effects)]
870 let without_padding = offset + elems * elem_size.get();
871 // `self_bytes` is equal to the offset bytes plus the bytes
872 // consumed by the trailing slice plus any padding bytes
873 // required to satisfy the alignment. Note that we have computed
874 // the maximum number of trailing slice elements that could fit
875 // in `self_bytes`, so any padding is guaranteed to be less than
876 // the size of an extra element.
877 //
878 // Guaranteed not to overflow:
879 // - By previous comment: without_padding == elems * elem_size +
880 // offset <= max_total_bytes
881 // - By construction, `max_total_bytes` is a multiple of
882 // `self.align`.
883 // - At most, adding padding needed to round `without_padding`
884 // up to the next multiple of the alignment will bring
885 // `self_bytes` up to `max_total_bytes`.
886 #[allow(clippy::arithmetic_side_effects)]
887 let self_bytes = without_padding
888 + util::core_layout::padding_needed_for(without_padding, self.align);
889 (elems, self_bytes)
890 }
891 };
892
893 __debug_assert!(self_bytes <= bytes_len);
894
895 let split_at = match cast_type {
896 _CastType::_Prefix => self_bytes,
897 // Guaranteed not to underflow:
898 // - In the `Sized` branch, only returns `size` if `size <=
899 // bytes_len`.
900 // - In the `SliceDst` branch, calculates `self_bytes <=
901 // max_toatl_bytes`, which is upper-bounded by `bytes_len`.
902 #[allow(clippy::arithmetic_side_effects)]
903 _CastType::_Suffix => bytes_len - self_bytes,
904 };
905
906 Some((elems, split_at))
907 }
908}
909
910/// A trait which carries information about a type's layout that is used by the
911/// internals of this crate.
912///
913/// This trait is not meant for consumption by code outside of this crate. While
914/// the normal semver stability guarantees apply with respect to which types
915/// implement this trait and which trait implementations are implied by this
916/// trait, no semver stability guarantees are made regarding its internals; they
917/// may change at any time, and code which makes use of them may break.
918///
919/// # Safety
920///
921/// This trait does not convey any safety guarantees to code outside this crate.
922#[doc(hidden)] // TODO: Remove this once KnownLayout is used by other APIs
923pub unsafe trait KnownLayout {
924 // The `Self: Sized` bound makes it so that `KnownLayout` can still be
925 // object safe. It's not currently object safe thanks to `const LAYOUT`, and
926 // it likely won't be in the future, but there's no reason not to be
927 // forwards-compatible with object safety.
928 #[doc(hidden)]
929 fn only_derive_is_allowed_to_implement_this_trait()
930 where
931 Self: Sized;
932
933 #[doc(hidden)]
934 const LAYOUT: DstLayout;
935
936 /// SAFETY: The returned pointer has the same address and provenance as
937 /// `bytes`. If `Self` is a DST, the returned pointer's referent has `elems`
938 /// elements in its trailing slice. If `Self` is sized, `elems` is ignored.
939 #[doc(hidden)]
940 fn raw_from_ptr_len(bytes: NonNull<u8>, elems: usize) -> NonNull<Self>;
941}
942
943// SAFETY: Delegates safety to `DstLayout::for_slice`.
944unsafe impl<T: KnownLayout> KnownLayout for [T] {
945 #[allow(clippy::missing_inline_in_public_items)]
946 fn only_derive_is_allowed_to_implement_this_trait()
947 where
948 Self: Sized,
949 {
950 }
951 const LAYOUT: DstLayout = DstLayout::for_slice::<T>();
952
953 // SAFETY: `.cast` preserves address and provenance. The returned pointer
954 // refers to an object with `elems` elements by construction.
955 #[inline(always)]
956 fn raw_from_ptr_len(data: NonNull<u8>, elems: usize) -> NonNull<Self> {
957 // TODO(#67): Remove this allow. See NonNullExt for more details.
958 #[allow(unstable_name_collisions)]
959 NonNull::slice_from_raw_parts(data:data.cast::<T>(), len:elems)
960 }
961}
962
963#[rustfmt::skip]
964impl_known_layout!(
965 (),
966 u8, i8, u16, i16, u32, i32, u64, i64, u128, i128, usize, isize, f32, f64,
967 bool, char,
968 NonZeroU8, NonZeroI8, NonZeroU16, NonZeroI16, NonZeroU32, NonZeroI32,
969 NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, NonZeroUsize, NonZeroIsize
970);
971#[rustfmt::skip]
972impl_known_layout!(
973 T => Option<T>,
974 T: ?Sized => PhantomData<T>,
975 T => Wrapping<T>,
976 T => MaybeUninit<T>,
977 T: ?Sized => *const T,
978 T: ?Sized => *mut T,
979);
980impl_known_layout!(const N: usize, T => [T; N]);
981
982safety_comment! {
983 /// SAFETY:
984 /// `str` and `ManuallyDrop<[T]>` [1] have the same representations as
985 /// `[u8]` and `[T]` repsectively. `str` has different bit validity than
986 /// `[u8]`, but that doesn't affect the soundness of this impl.
987 ///
988 /// [1] Per https://doc.rust-lang.org/nightly/core/mem/struct.ManuallyDrop.html:
989 ///
990 /// `ManuallyDrop<T>` is guaranteed to have the same layout and bit
991 /// validity as `T`
992 ///
993 /// TODO(#429):
994 /// - Add quotes from docs.
995 /// - Once [1] (added in
996 /// https://github.com/rust-lang/rust/pull/115522) is available on stable,
997 /// quote the stable docs instead of the nightly docs.
998 unsafe_impl_known_layout!(#[repr([u8])] str);
999 unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] ManuallyDrop<T>);
1000}
1001
1002/// Analyzes whether a type is [`FromZeroes`].
1003///
1004/// This derive analyzes, at compile time, whether the annotated type satisfies
1005/// the [safety conditions] of `FromZeroes` and implements `FromZeroes` if it is
1006/// sound to do so. This derive can be applied to structs, enums, and unions;
1007/// e.g.:
1008///
1009/// ```
1010/// # use zerocopy_derive::FromZeroes;
1011/// #[derive(FromZeroes)]
1012/// struct MyStruct {
1013/// # /*
1014/// ...
1015/// # */
1016/// }
1017///
1018/// #[derive(FromZeroes)]
1019/// #[repr(u8)]
1020/// enum MyEnum {
1021/// # Variant0,
1022/// # /*
1023/// ...
1024/// # */
1025/// }
1026///
1027/// #[derive(FromZeroes)]
1028/// union MyUnion {
1029/// # variant: u8,
1030/// # /*
1031/// ...
1032/// # */
1033/// }
1034/// ```
1035///
1036/// [safety conditions]: trait@FromZeroes#safety
1037///
1038/// # Analysis
1039///
1040/// *This section describes, roughly, the analysis performed by this derive to
1041/// determine whether it is sound to implement `FromZeroes` for a given type.
1042/// Unless you are modifying the implementation of this derive, or attempting to
1043/// manually implement `FromZeroes` for a type yourself, you don't need to read
1044/// this section.*
1045///
1046/// If a type has the following properties, then this derive can implement
1047/// `FromZeroes` for that type:
1048///
1049/// - If the type is a struct, all of its fields must be `FromZeroes`.
1050/// - If the type is an enum, it must be C-like (meaning that all variants have
1051/// no fields) and it must have a variant with a discriminant of `0`. See [the
1052/// reference] for a description of how discriminant values are chosen.
1053/// - The type must not contain any [`UnsafeCell`]s (this is required in order
1054/// for it to be sound to construct a `&[u8]` and a `&T` to the same region of
1055/// memory). The type may contain references or pointers to `UnsafeCell`s so
1056/// long as those values can themselves be initialized from zeroes
1057/// (`FromZeroes` is not currently implemented for, e.g.,
1058/// `Option<&UnsafeCell<_>>`, but it could be one day).
1059///
1060/// This analysis is subject to change. Unsafe code may *only* rely on the
1061/// documented [safety conditions] of `FromZeroes`, and must *not* rely on the
1062/// implementation details of this derive.
1063///
1064/// [the reference]: https://doc.rust-lang.org/reference/items/enumerations.html#custom-discriminant-values-for-fieldless-enumerations
1065/// [`UnsafeCell`]: core::cell::UnsafeCell
1066///
1067/// ## Why isn't an explicit representation required for structs?
1068///
1069/// Neither this derive, nor the [safety conditions] of `FromZeroes`, requires
1070/// that structs are marked with `#[repr(C)]`.
1071///
1072/// Per the [Rust reference](reference),
1073///
1074/// > The representation of a type can change the padding between fields, but
1075/// does not change the layout of the fields themselves.
1076///
1077/// [reference]: https://doc.rust-lang.org/reference/type-layout.html#representations
1078///
1079/// Since the layout of structs only consists of padding bytes and field bytes,
1080/// a struct is soundly `FromZeroes` if:
1081/// 1. its padding is soundly `FromZeroes`, and
1082/// 2. its fields are soundly `FromZeroes`.
1083///
1084/// The answer to the first question is always yes: padding bytes do not have
1085/// any validity constraints. A [discussion] of this question in the Unsafe Code
1086/// Guidelines Working Group concluded that it would be virtually unimaginable
1087/// for future versions of rustc to add validity constraints to padding bytes.
1088///
1089/// [discussion]: https://github.com/rust-lang/unsafe-code-guidelines/issues/174
1090///
1091/// Whether a struct is soundly `FromZeroes` therefore solely depends on whether
1092/// its fields are `FromZeroes`.
1093// TODO(#146): Document why we don't require an enum to have an explicit `repr`
1094// attribute.
1095#[cfg(any(feature = "derive", test))]
1096#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
1097pub use zerocopy_derive::FromZeroes;
1098
1099/// Types whose validity can be checked at runtime, allowing them to be
1100/// conditionally converted from byte slices.
1101///
1102/// WARNING: Do not implement this trait yourself! Instead, use
1103/// `#[derive(TryFromBytes)]`.
1104///
1105/// `TryFromBytes` types can safely be deserialized from an untrusted sequence
1106/// of bytes by performing a runtime check that the byte sequence contains a
1107/// valid instance of `Self`.
1108///
1109/// `TryFromBytes` is ignorant of byte order. For byte order-aware types, see
1110/// the [`byteorder`] module.
1111///
1112/// # What is a "valid instance"?
1113///
1114/// In Rust, each type has *bit validity*, which refers to the set of bit
1115/// patterns which may appear in an instance of that type. It is impossible for
1116/// safe Rust code to produce values which violate bit validity (ie, values
1117/// outside of the "valid" set of bit patterns). If `unsafe` code produces an
1118/// invalid value, this is considered [undefined behavior].
1119///
1120/// Rust's bit validity rules are currently being decided, which means that some
1121/// types have three classes of bit patterns: those which are definitely valid,
1122/// and whose validity is documented in the language; those which may or may not
1123/// be considered valid at some point in the future; and those which are
1124/// definitely invalid.
1125///
1126/// Zerocopy takes a conservative approach, and only considers a bit pattern to
1127/// be valid if its validity is a documenteed guarantee provided by the
1128/// language.
1129///
1130/// For most use cases, Rust's current guarantees align with programmers'
1131/// intuitions about what ought to be valid. As a result, zerocopy's
1132/// conservatism should not affect most users. One notable exception is unions,
1133/// whose bit validity is very up in the air; zerocopy does not permit
1134/// implementing `TryFromBytes` for any union type.
1135///
1136/// If you are negatively affected by lack of support for a particular type,
1137/// we encourage you to let us know by [filing an issue][github-repo].
1138///
1139/// # Safety
1140///
1141/// On its own, `T: TryFromBytes` does not make any guarantees about the layout
1142/// or representation of `T`. It merely provides the ability to perform a
1143/// validity check at runtime via methods like [`try_from_ref`].
1144///
1145/// Currently, it is not possible to stably implement `TryFromBytes` other than
1146/// by using `#[derive(TryFromBytes)]`. While there are `#[doc(hidden)]` items
1147/// on this trait that provide well-defined safety invariants, no stability
1148/// guarantees are made with respect to these items. In particular, future
1149/// releases of zerocopy may make backwards-breaking changes to these items,
1150/// including changes that only affect soundness, which may cause code which
1151/// uses those items to silently become unsound.
1152///
1153/// [undefined behavior]: https://raphlinus.github.io/programming/rust/2018/08/17/undefined-behavior.html
1154/// [github-repo]: https://github.com/google/zerocopy
1155/// [`try_from_ref`]: TryFromBytes::try_from_ref
1156// TODO(#5): Update `try_from_ref` doc link once it exists
1157#[doc(hidden)]
1158pub unsafe trait TryFromBytes {
1159 /// Does a given memory range contain a valid instance of `Self`?
1160 ///
1161 /// # Safety
1162 ///
1163 /// ## Preconditions
1164 ///
1165 /// The memory referenced by `candidate` may only be accessed via reads for
1166 /// the duration of this method call. This prohibits writes through mutable
1167 /// references and through [`UnsafeCell`]s. There may exist immutable
1168 /// references to the same memory which contain `UnsafeCell`s so long as:
1169 /// - Those `UnsafeCell`s exist at the same byte ranges as `UnsafeCell`s in
1170 /// `Self`. This is a bidirectional property: `Self` may not contain
1171 /// `UnsafeCell`s where other references to the same memory do not, and
1172 /// vice-versa.
1173 /// - Those `UnsafeCell`s are never used to perform mutation for the
1174 /// duration of this method call.
1175 ///
1176 /// The memory referenced by `candidate` may not be referenced by any
1177 /// mutable references even if these references are not used to perform
1178 /// mutation.
1179 ///
1180 /// `candidate` is not required to refer to a valid `Self`. However, it must
1181 /// satisfy the requirement that uninitialized bytes may only be present
1182 /// where it is possible for them to be present in `Self`. This is a dynamic
1183 /// property: if, at a particular byte offset, a valid enum discriminant is
1184 /// set, the subsequent bytes may only have uninitialized bytes as
1185 /// specificed by the corresponding enum.
1186 ///
1187 /// Formally, given `len = size_of_val_raw(candidate)`, at every byte
1188 /// offset, `b`, in the range `[0, len)`:
1189 /// - If, in all instances `s: Self` of length `len`, the byte at offset `b`
1190 /// in `s` is initialized, then the byte at offset `b` within `*candidate`
1191 /// must be initialized.
1192 /// - Let `c` be the contents of the byte range `[0, b)` in `*candidate`.
1193 /// Let `S` be the subset of valid instances of `Self` of length `len`
1194 /// which contain `c` in the offset range `[0, b)`. If, for all instances
1195 /// of `s: Self` in `S`, the byte at offset `b` in `s` is initialized,
1196 /// then the byte at offset `b` in `*candidate` must be initialized.
1197 ///
1198 /// Pragmatically, this means that if `*candidate` is guaranteed to
1199 /// contain an enum type at a particular offset, and the enum discriminant
1200 /// stored in `*candidate` corresponds to a valid variant of that enum
1201 /// type, then it is guaranteed that the appropriate bytes of `*candidate`
1202 /// are initialized as defined by that variant's bit validity (although
1203 /// note that the variant may contain another enum type, in which case the
1204 /// same rules apply depending on the state of its discriminant, and so on
1205 /// recursively).
1206 ///
1207 /// ## Postconditions
1208 ///
1209 /// Unsafe code may assume that, if `is_bit_valid(candidate)` returns true,
1210 /// `*candidate` contains a valid `Self`.
1211 ///
1212 /// # Panics
1213 ///
1214 /// `is_bit_valid` may panic. Callers are responsible for ensuring that any
1215 /// `unsafe` code remains sound even in the face of `is_bit_valid`
1216 /// panicking. (We support user-defined validation routines; so long as
1217 /// these routines are not required to be `unsafe`, there is no way to
1218 /// ensure that these do not generate panics.)
1219 ///
1220 /// [`UnsafeCell`]: core::cell::UnsafeCell
1221 #[doc(hidden)]
1222 unsafe fn is_bit_valid(candidate: Ptr<'_, Self>) -> bool;
1223
1224 /// Attempts to interpret a byte slice as a `Self`.
1225 ///
1226 /// `try_from_ref` validates that `bytes` contains a valid `Self`, and that
1227 /// it satisfies `Self`'s alignment requirement. If it does, then `bytes` is
1228 /// reinterpreted as a `Self`.
1229 ///
1230 /// Note that Rust's bit validity rules are still being decided. As such,
1231 /// there exist types whose bit validity is ambiguous. See the
1232 /// `TryFromBytes` docs for a discussion of how these cases are handled.
1233 // TODO(#251): In a future in which we distinguish between `FromBytes` and
1234 // `RefFromBytes`, this requires `where Self: RefFromBytes` to disallow
1235 // interior mutability.
1236 #[inline]
1237 #[doc(hidden)] // TODO(#5): Finalize name before remove this attribute.
1238 fn try_from_ref(bytes: &[u8]) -> Option<&Self>
1239 where
1240 Self: KnownLayout,
1241 {
1242 let maybe_self = Ptr::from(bytes).try_cast_into_no_leftover::<Self>()?;
1243
1244 // SAFETY:
1245 // - Since `bytes` is an immutable reference, we know that no mutable
1246 // references exist to this memory region.
1247 // - Since `[u8]` contains no `UnsafeCell`s, we know there are no
1248 // `&UnsafeCell` references to this memory region.
1249 // - Since we don't permit implementing `TryFromBytes` for types which
1250 // contain `UnsafeCell`s, there are no `UnsafeCell`s in `Self`, and so
1251 // the requirement that all references contain `UnsafeCell`s at the
1252 // same offsets is trivially satisfied.
1253 // - All bytes of `bytes` are initialized.
1254 //
1255 // This call may panic. If that happens, it doesn't cause any soundness
1256 // issues, as we have not generated any invalid state which we need to
1257 // fix before returning.
1258 if unsafe { !Self::is_bit_valid(maybe_self) } {
1259 return None;
1260 }
1261
1262 // SAFETY:
1263 // - Preconditions for `as_ref`:
1264 // - `is_bit_valid` guarantees that `*maybe_self` contains a valid
1265 // `Self`. Since `&[u8]` does not permit interior mutation, this
1266 // cannot be invalidated after this method returns.
1267 // - Since the argument and return types are immutable references,
1268 // Rust will prevent the caller from producing any mutable
1269 // references to the same memory region.
1270 // - Since `Self` is not allowed to contain any `UnsafeCell`s and the
1271 // same is true of `[u8]`, interior mutation is not possible. Thus,
1272 // no mutation is possible. For the same reason, there is no
1273 // mismatch between the two types in terms of which byte ranges are
1274 // referenced as `UnsafeCell`s.
1275 // - Since interior mutation isn't possible within `Self`, there's no
1276 // way for the returned reference to be used to modify the byte range,
1277 // and thus there's no way for the returned reference to be used to
1278 // write an invalid `[u8]` which would be observable via the original
1279 // `&[u8]`.
1280 Some(unsafe { maybe_self.as_ref() })
1281 }
1282}
1283
1284/// Types for which a sequence of bytes all set to zero represents a valid
1285/// instance of the type.
1286///
1287/// Any memory region of the appropriate length which is guaranteed to contain
1288/// only zero bytes can be viewed as any `FromZeroes` type with no runtime
1289/// overhead. This is useful whenever memory is known to be in a zeroed state,
1290/// such memory returned from some allocation routines.
1291///
1292/// # Implementation
1293///
1294/// **Do not implement this trait yourself!** Instead, use
1295/// [`#[derive(FromZeroes)]`][derive] (requires the `derive` Cargo feature);
1296/// e.g.:
1297///
1298/// ```
1299/// # use zerocopy_derive::FromZeroes;
1300/// #[derive(FromZeroes)]
1301/// struct MyStruct {
1302/// # /*
1303/// ...
1304/// # */
1305/// }
1306///
1307/// #[derive(FromZeroes)]
1308/// #[repr(u8)]
1309/// enum MyEnum {
1310/// # Variant0,
1311/// # /*
1312/// ...
1313/// # */
1314/// }
1315///
1316/// #[derive(FromZeroes)]
1317/// union MyUnion {
1318/// # variant: u8,
1319/// # /*
1320/// ...
1321/// # */
1322/// }
1323/// ```
1324///
1325/// This derive performs a sophisticated, compile-time safety analysis to
1326/// determine whether a type is `FromZeroes`.
1327///
1328/// # Safety
1329///
1330/// *This section describes what is required in order for `T: FromZeroes`, and
1331/// what unsafe code may assume of such types. If you don't plan on implementing
1332/// `FromZeroes` manually, and you don't plan on writing unsafe code that
1333/// operates on `FromZeroes` types, then you don't need to read this section.*
1334///
1335/// If `T: FromZeroes`, then unsafe code may assume that:
1336/// - It is sound to treat any initialized sequence of zero bytes of length
1337/// `size_of::<T>()` as a `T`.
1338/// - Given `b: &[u8]` where `b.len() == size_of::<T>()`, `b` is aligned to
1339/// `align_of::<T>()`, and `b` contains only zero bytes, it is sound to
1340/// construct a `t: &T` at the same address as `b`, and it is sound for both
1341/// `b` and `t` to be live at the same time.
1342///
1343/// If a type is marked as `FromZeroes` which violates this contract, it may
1344/// cause undefined behavior.
1345///
1346/// `#[derive(FromZeroes)]` only permits [types which satisfy these
1347/// requirements][derive-analysis].
1348///
1349#[cfg_attr(
1350 feature = "derive",
1351 doc = "[derive]: zerocopy_derive::FromZeroes",
1352 doc = "[derive-analysis]: zerocopy_derive::FromZeroes#analysis"
1353)]
1354#[cfg_attr(
1355 not(feature = "derive"),
1356 doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromZeroes.html"),
1357 doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromZeroes.html#analysis"),
1358)]
1359pub unsafe trait FromZeroes {
1360 // The `Self: Sized` bound makes it so that `FromZeroes` is still object
1361 // safe.
1362 #[doc(hidden)]
1363 fn only_derive_is_allowed_to_implement_this_trait()
1364 where
1365 Self: Sized;
1366
1367 /// Overwrites `self` with zeroes.
1368 ///
1369 /// Sets every byte in `self` to 0. While this is similar to doing `*self =
1370 /// Self::new_zeroed()`, it differs in that `zero` does not semantically
1371 /// drop the current value and replace it with a new one - it simply
1372 /// modifies the bytes of the existing value.
1373 ///
1374 /// # Examples
1375 ///
1376 /// ```
1377 /// # use zerocopy::FromZeroes;
1378 /// # use zerocopy_derive::*;
1379 /// #
1380 /// #[derive(FromZeroes)]
1381 /// #[repr(C)]
1382 /// struct PacketHeader {
1383 /// src_port: [u8; 2],
1384 /// dst_port: [u8; 2],
1385 /// length: [u8; 2],
1386 /// checksum: [u8; 2],
1387 /// }
1388 ///
1389 /// let mut header = PacketHeader {
1390 /// src_port: 100u16.to_be_bytes(),
1391 /// dst_port: 200u16.to_be_bytes(),
1392 /// length: 300u16.to_be_bytes(),
1393 /// checksum: 400u16.to_be_bytes(),
1394 /// };
1395 ///
1396 /// header.zero();
1397 ///
1398 /// assert_eq!(header.src_port, [0, 0]);
1399 /// assert_eq!(header.dst_port, [0, 0]);
1400 /// assert_eq!(header.length, [0, 0]);
1401 /// assert_eq!(header.checksum, [0, 0]);
1402 /// ```
1403 #[inline(always)]
1404 fn zero(&mut self) {
1405 let slf: *mut Self = self;
1406 let len = mem::size_of_val(self);
1407 // SAFETY:
1408 // - `self` is guaranteed by the type system to be valid for writes of
1409 // size `size_of_val(self)`.
1410 // - `u8`'s alignment is 1, and thus `self` is guaranteed to be aligned
1411 // as required by `u8`.
1412 // - Since `Self: FromZeroes`, the all-zeroes instance is a valid
1413 // instance of `Self.`
1414 //
1415 // TODO(#429): Add references to docs and quotes.
1416 unsafe { ptr::write_bytes(slf.cast::<u8>(), 0, len) };
1417 }
1418
1419 /// Creates an instance of `Self` from zeroed bytes.
1420 ///
1421 /// # Examples
1422 ///
1423 /// ```
1424 /// # use zerocopy::FromZeroes;
1425 /// # use zerocopy_derive::*;
1426 /// #
1427 /// #[derive(FromZeroes)]
1428 /// #[repr(C)]
1429 /// struct PacketHeader {
1430 /// src_port: [u8; 2],
1431 /// dst_port: [u8; 2],
1432 /// length: [u8; 2],
1433 /// checksum: [u8; 2],
1434 /// }
1435 ///
1436 /// let header: PacketHeader = FromZeroes::new_zeroed();
1437 ///
1438 /// assert_eq!(header.src_port, [0, 0]);
1439 /// assert_eq!(header.dst_port, [0, 0]);
1440 /// assert_eq!(header.length, [0, 0]);
1441 /// assert_eq!(header.checksum, [0, 0]);
1442 /// ```
1443 #[inline(always)]
1444 fn new_zeroed() -> Self
1445 where
1446 Self: Sized,
1447 {
1448 // SAFETY: `FromZeroes` says that the all-zeroes bit pattern is legal.
1449 unsafe { mem::zeroed() }
1450 }
1451
1452 /// Creates a `Box<Self>` from zeroed bytes.
1453 ///
1454 /// This function is useful for allocating large values on the heap and
1455 /// zero-initializing them, without ever creating a temporary instance of
1456 /// `Self` on the stack. For example, `<[u8; 1048576]>::new_box_zeroed()`
1457 /// will allocate `[u8; 1048576]` directly on the heap; it does not require
1458 /// storing `[u8; 1048576]` in a temporary variable on the stack.
1459 ///
1460 /// On systems that use a heap implementation that supports allocating from
1461 /// pre-zeroed memory, using `new_box_zeroed` (or related functions) may
1462 /// have performance benefits.
1463 ///
1464 /// Note that `Box<Self>` can be converted to `Arc<Self>` and other
1465 /// container types without reallocation.
1466 ///
1467 /// # Panics
1468 ///
1469 /// Panics if allocation of `size_of::<Self>()` bytes fails.
1470 #[cfg(feature = "alloc")]
1471 #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
1472 #[inline]
1473 fn new_box_zeroed() -> Box<Self>
1474 where
1475 Self: Sized,
1476 {
1477 // If `T` is a ZST, then return a proper boxed instance of it. There is
1478 // no allocation, but `Box` does require a correct dangling pointer.
1479 let layout = Layout::new::<Self>();
1480 if layout.size() == 0 {
1481 return Box::new(Self::new_zeroed());
1482 }
1483
1484 // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
1485 #[allow(clippy::undocumented_unsafe_blocks)]
1486 let ptr = unsafe { alloc::alloc::alloc_zeroed(layout).cast::<Self>() };
1487 if ptr.is_null() {
1488 alloc::alloc::handle_alloc_error(layout);
1489 }
1490 // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
1491 #[allow(clippy::undocumented_unsafe_blocks)]
1492 unsafe {
1493 Box::from_raw(ptr)
1494 }
1495 }
1496
1497 /// Creates a `Box<[Self]>` (a boxed slice) from zeroed bytes.
1498 ///
1499 /// This function is useful for allocating large values of `[Self]` on the
1500 /// heap and zero-initializing them, without ever creating a temporary
1501 /// instance of `[Self; _]` on the stack. For example,
1502 /// `u8::new_box_slice_zeroed(1048576)` will allocate the slice directly on
1503 /// the heap; it does not require storing the slice on the stack.
1504 ///
1505 /// On systems that use a heap implementation that supports allocating from
1506 /// pre-zeroed memory, using `new_box_slice_zeroed` may have performance
1507 /// benefits.
1508 ///
1509 /// If `Self` is a zero-sized type, then this function will return a
1510 /// `Box<[Self]>` that has the correct `len`. Such a box cannot contain any
1511 /// actual information, but its `len()` property will report the correct
1512 /// value.
1513 ///
1514 /// # Panics
1515 ///
1516 /// * Panics if `size_of::<Self>() * len` overflows.
1517 /// * Panics if allocation of `size_of::<Self>() * len` bytes fails.
1518 #[cfg(feature = "alloc")]
1519 #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
1520 #[inline]
1521 fn new_box_slice_zeroed(len: usize) -> Box<[Self]>
1522 where
1523 Self: Sized,
1524 {
1525 let size = mem::size_of::<Self>()
1526 .checked_mul(len)
1527 .expect("mem::size_of::<Self>() * len overflows `usize`");
1528 let align = mem::align_of::<Self>();
1529 // On stable Rust versions <= 1.64.0, `Layout::from_size_align` has a
1530 // bug in which sufficiently-large allocations (those which, when
1531 // rounded up to the alignment, overflow `isize`) are not rejected,
1532 // which can cause undefined behavior. See #64 for details.
1533 //
1534 // TODO(#67): Once our MSRV is > 1.64.0, remove this assertion.
1535 #[allow(clippy::as_conversions)]
1536 let max_alloc = (isize::MAX as usize).saturating_sub(align);
1537 assert!(size <= max_alloc);
1538 // TODO(https://github.com/rust-lang/rust/issues/55724): Use
1539 // `Layout::repeat` once it's stabilized.
1540 let layout =
1541 Layout::from_size_align(size, align).expect("total allocation size overflows `isize`");
1542
1543 let ptr = if layout.size() != 0 {
1544 // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
1545 #[allow(clippy::undocumented_unsafe_blocks)]
1546 let ptr = unsafe { alloc::alloc::alloc_zeroed(layout).cast::<Self>() };
1547 if ptr.is_null() {
1548 alloc::alloc::handle_alloc_error(layout);
1549 }
1550 ptr
1551 } else {
1552 // `Box<[T]>` does not allocate when `T` is zero-sized or when `len`
1553 // is zero, but it does require a non-null dangling pointer for its
1554 // allocation.
1555 NonNull::<Self>::dangling().as_ptr()
1556 };
1557
1558 // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
1559 #[allow(clippy::undocumented_unsafe_blocks)]
1560 unsafe {
1561 Box::from_raw(slice::from_raw_parts_mut(ptr, len))
1562 }
1563 }
1564
1565 /// Creates a `Vec<Self>` from zeroed bytes.
1566 ///
1567 /// This function is useful for allocating large values of `Vec`s and
1568 /// zero-initializing them, without ever creating a temporary instance of
1569 /// `[Self; _]` (or many temporary instances of `Self`) on the stack. For
1570 /// example, `u8::new_vec_zeroed(1048576)` will allocate directly on the
1571 /// heap; it does not require storing intermediate values on the stack.
1572 ///
1573 /// On systems that use a heap implementation that supports allocating from
1574 /// pre-zeroed memory, using `new_vec_zeroed` may have performance benefits.
1575 ///
1576 /// If `Self` is a zero-sized type, then this function will return a
1577 /// `Vec<Self>` that has the correct `len`. Such a `Vec` cannot contain any
1578 /// actual information, but its `len()` property will report the correct
1579 /// value.
1580 ///
1581 /// # Panics
1582 ///
1583 /// * Panics if `size_of::<Self>() * len` overflows.
1584 /// * Panics if allocation of `size_of::<Self>() * len` bytes fails.
1585 #[cfg(feature = "alloc")]
1586 #[cfg_attr(doc_cfg, doc(cfg(feature = "new_vec_zeroed")))]
1587 #[inline(always)]
1588 fn new_vec_zeroed(len: usize) -> Vec<Self>
1589 where
1590 Self: Sized,
1591 {
1592 Self::new_box_slice_zeroed(len).into()
1593 }
1594}
1595
1596/// Analyzes whether a type is [`FromBytes`].
1597///
1598/// This derive analyzes, at compile time, whether the annotated type satisfies
1599/// the [safety conditions] of `FromBytes` and implements `FromBytes` if it is
1600/// sound to do so. This derive can be applied to structs, enums, and unions;
1601/// e.g.:
1602///
1603/// ```
1604/// # use zerocopy_derive::{FromBytes, FromZeroes};
1605/// #[derive(FromZeroes, FromBytes)]
1606/// struct MyStruct {
1607/// # /*
1608/// ...
1609/// # */
1610/// }
1611///
1612/// #[derive(FromZeroes, FromBytes)]
1613/// #[repr(u8)]
1614/// enum MyEnum {
1615/// # V00, V01, V02, V03, V04, V05, V06, V07, V08, V09, V0A, V0B, V0C, V0D, V0E,
1616/// # V0F, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V1A, V1B, V1C, V1D,
1617/// # V1E, V1F, V20, V21, V22, V23, V24, V25, V26, V27, V28, V29, V2A, V2B, V2C,
1618/// # V2D, V2E, V2F, V30, V31, V32, V33, V34, V35, V36, V37, V38, V39, V3A, V3B,
1619/// # V3C, V3D, V3E, V3F, V40, V41, V42, V43, V44, V45, V46, V47, V48, V49, V4A,
1620/// # V4B, V4C, V4D, V4E, V4F, V50, V51, V52, V53, V54, V55, V56, V57, V58, V59,
1621/// # V5A, V5B, V5C, V5D, V5E, V5F, V60, V61, V62, V63, V64, V65, V66, V67, V68,
1622/// # V69, V6A, V6B, V6C, V6D, V6E, V6F, V70, V71, V72, V73, V74, V75, V76, V77,
1623/// # V78, V79, V7A, V7B, V7C, V7D, V7E, V7F, V80, V81, V82, V83, V84, V85, V86,
1624/// # V87, V88, V89, V8A, V8B, V8C, V8D, V8E, V8F, V90, V91, V92, V93, V94, V95,
1625/// # V96, V97, V98, V99, V9A, V9B, V9C, V9D, V9E, V9F, VA0, VA1, VA2, VA3, VA4,
1626/// # VA5, VA6, VA7, VA8, VA9, VAA, VAB, VAC, VAD, VAE, VAF, VB0, VB1, VB2, VB3,
1627/// # VB4, VB5, VB6, VB7, VB8, VB9, VBA, VBB, VBC, VBD, VBE, VBF, VC0, VC1, VC2,
1628/// # VC3, VC4, VC5, VC6, VC7, VC8, VC9, VCA, VCB, VCC, VCD, VCE, VCF, VD0, VD1,
1629/// # VD2, VD3, VD4, VD5, VD6, VD7, VD8, VD9, VDA, VDB, VDC, VDD, VDE, VDF, VE0,
1630/// # VE1, VE2, VE3, VE4, VE5, VE6, VE7, VE8, VE9, VEA, VEB, VEC, VED, VEE, VEF,
1631/// # VF0, VF1, VF2, VF3, VF4, VF5, VF6, VF7, VF8, VF9, VFA, VFB, VFC, VFD, VFE,
1632/// # VFF,
1633/// # /*
1634/// ...
1635/// # */
1636/// }
1637///
1638/// #[derive(FromZeroes, FromBytes)]
1639/// union MyUnion {
1640/// # variant: u8,
1641/// # /*
1642/// ...
1643/// # */
1644/// }
1645/// ```
1646///
1647/// [safety conditions]: trait@FromBytes#safety
1648///
1649/// # Analysis
1650///
1651/// *This section describes, roughly, the analysis performed by this derive to
1652/// determine whether it is sound to implement `FromBytes` for a given type.
1653/// Unless you are modifying the implementation of this derive, or attempting to
1654/// manually implement `FromBytes` for a type yourself, you don't need to read
1655/// this section.*
1656///
1657/// If a type has the following properties, then this derive can implement
1658/// `FromBytes` for that type:
1659///
1660/// - If the type is a struct, all of its fields must be `FromBytes`.
1661/// - If the type is an enum:
1662/// - It must be a C-like enum (meaning that all variants have no fields).
1663/// - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`,
1664/// `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`).
1665/// - The maximum number of discriminants must be used (so that every possible
1666/// bit pattern is a valid one). Be very careful when using the `C`,
1667/// `usize`, or `isize` representations, as their size is
1668/// platform-dependent.
1669/// - The type must not contain any [`UnsafeCell`]s (this is required in order
1670/// for it to be sound to construct a `&[u8]` and a `&T` to the same region of
1671/// memory). The type may contain references or pointers to `UnsafeCell`s so
1672/// long as those values can themselves be initialized from zeroes
1673/// (`FromBytes` is not currently implemented for, e.g., `Option<*const
1674/// UnsafeCell<_>>`, but it could be one day).
1675///
1676/// [`UnsafeCell`]: core::cell::UnsafeCell
1677///
1678/// This analysis is subject to change. Unsafe code may *only* rely on the
1679/// documented [safety conditions] of `FromBytes`, and must *not* rely on the
1680/// implementation details of this derive.
1681///
1682/// ## Why isn't an explicit representation required for structs?
1683///
1684/// Neither this derive, nor the [safety conditions] of `FromBytes`, requires
1685/// that structs are marked with `#[repr(C)]`.
1686///
1687/// Per the [Rust reference](reference),
1688///
1689/// > The representation of a type can change the padding between fields, but
1690/// does not change the layout of the fields themselves.
1691///
1692/// [reference]: https://doc.rust-lang.org/reference/type-layout.html#representations
1693///
1694/// Since the layout of structs only consists of padding bytes and field bytes,
1695/// a struct is soundly `FromBytes` if:
1696/// 1. its padding is soundly `FromBytes`, and
1697/// 2. its fields are soundly `FromBytes`.
1698///
1699/// The answer to the first question is always yes: padding bytes do not have
1700/// any validity constraints. A [discussion] of this question in the Unsafe Code
1701/// Guidelines Working Group concluded that it would be virtually unimaginable
1702/// for future versions of rustc to add validity constraints to padding bytes.
1703///
1704/// [discussion]: https://github.com/rust-lang/unsafe-code-guidelines/issues/174
1705///
1706/// Whether a struct is soundly `FromBytes` therefore solely depends on whether
1707/// its fields are `FromBytes`.
1708// TODO(#146): Document why we don't require an enum to have an explicit `repr`
1709// attribute.
1710#[cfg(any(feature = "derive", test))]
1711#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
1712pub use zerocopy_derive::FromBytes;
1713
1714/// Types for which any bit pattern is valid.
1715///
1716/// Any memory region of the appropriate length which contains initialized bytes
1717/// can be viewed as any `FromBytes` type with no runtime overhead. This is
1718/// useful for efficiently parsing bytes as structured data.
1719///
1720/// # Implementation
1721///
1722/// **Do not implement this trait yourself!** Instead, use
1723/// [`#[derive(FromBytes)]`][derive] (requires the `derive` Cargo feature);
1724/// e.g.:
1725///
1726/// ```
1727/// # use zerocopy_derive::{FromBytes, FromZeroes};
1728/// #[derive(FromZeroes, FromBytes)]
1729/// struct MyStruct {
1730/// # /*
1731/// ...
1732/// # */
1733/// }
1734///
1735/// #[derive(FromZeroes, FromBytes)]
1736/// #[repr(u8)]
1737/// enum MyEnum {
1738/// # V00, V01, V02, V03, V04, V05, V06, V07, V08, V09, V0A, V0B, V0C, V0D, V0E,
1739/// # V0F, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V1A, V1B, V1C, V1D,
1740/// # V1E, V1F, V20, V21, V22, V23, V24, V25, V26, V27, V28, V29, V2A, V2B, V2C,
1741/// # V2D, V2E, V2F, V30, V31, V32, V33, V34, V35, V36, V37, V38, V39, V3A, V3B,
1742/// # V3C, V3D, V3E, V3F, V40, V41, V42, V43, V44, V45, V46, V47, V48, V49, V4A,
1743/// # V4B, V4C, V4D, V4E, V4F, V50, V51, V52, V53, V54, V55, V56, V57, V58, V59,
1744/// # V5A, V5B, V5C, V5D, V5E, V5F, V60, V61, V62, V63, V64, V65, V66, V67, V68,
1745/// # V69, V6A, V6B, V6C, V6D, V6E, V6F, V70, V71, V72, V73, V74, V75, V76, V77,
1746/// # V78, V79, V7A, V7B, V7C, V7D, V7E, V7F, V80, V81, V82, V83, V84, V85, V86,
1747/// # V87, V88, V89, V8A, V8B, V8C, V8D, V8E, V8F, V90, V91, V92, V93, V94, V95,
1748/// # V96, V97, V98, V99, V9A, V9B, V9C, V9D, V9E, V9F, VA0, VA1, VA2, VA3, VA4,
1749/// # VA5, VA6, VA7, VA8, VA9, VAA, VAB, VAC, VAD, VAE, VAF, VB0, VB1, VB2, VB3,
1750/// # VB4, VB5, VB6, VB7, VB8, VB9, VBA, VBB, VBC, VBD, VBE, VBF, VC0, VC1, VC2,
1751/// # VC3, VC4, VC5, VC6, VC7, VC8, VC9, VCA, VCB, VCC, VCD, VCE, VCF, VD0, VD1,
1752/// # VD2, VD3, VD4, VD5, VD6, VD7, VD8, VD9, VDA, VDB, VDC, VDD, VDE, VDF, VE0,
1753/// # VE1, VE2, VE3, VE4, VE5, VE6, VE7, VE8, VE9, VEA, VEB, VEC, VED, VEE, VEF,
1754/// # VF0, VF1, VF2, VF3, VF4, VF5, VF6, VF7, VF8, VF9, VFA, VFB, VFC, VFD, VFE,
1755/// # VFF,
1756/// # /*
1757/// ...
1758/// # */
1759/// }
1760///
1761/// #[derive(FromZeroes, FromBytes)]
1762/// union MyUnion {
1763/// # variant: u8,
1764/// # /*
1765/// ...
1766/// # */
1767/// }
1768/// ```
1769///
1770/// This derive performs a sophisticated, compile-time safety analysis to
1771/// determine whether a type is `FromBytes`.
1772///
1773/// # Safety
1774///
1775/// *This section describes what is required in order for `T: FromBytes`, and
1776/// what unsafe code may assume of such types. If you don't plan on implementing
1777/// `FromBytes` manually, and you don't plan on writing unsafe code that
1778/// operates on `FromBytes` types, then you don't need to read this section.*
1779///
1780/// If `T: FromBytes`, then unsafe code may assume that:
1781/// - It is sound to treat any initialized sequence of bytes of length
1782/// `size_of::<T>()` as a `T`.
1783/// - Given `b: &[u8]` where `b.len() == size_of::<T>()`, `b` is aligned to
1784/// `align_of::<T>()` it is sound to construct a `t: &T` at the same address
1785/// as `b`, and it is sound for both `b` and `t` to be live at the same time.
1786///
1787/// If a type is marked as `FromBytes` which violates this contract, it may
1788/// cause undefined behavior.
1789///
1790/// `#[derive(FromBytes)]` only permits [types which satisfy these
1791/// requirements][derive-analysis].
1792///
1793#[cfg_attr(
1794 feature = "derive",
1795 doc = "[derive]: zerocopy_derive::FromBytes",
1796 doc = "[derive-analysis]: zerocopy_derive::FromBytes#analysis"
1797)]
1798#[cfg_attr(
1799 not(feature = "derive"),
1800 doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromBytes.html"),
1801 doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromBytes.html#analysis"),
1802)]
1803pub unsafe trait FromBytes: FromZeroes {
1804 // The `Self: Sized` bound makes it so that `FromBytes` is still object
1805 // safe.
1806 #[doc(hidden)]
1807 fn only_derive_is_allowed_to_implement_this_trait()
1808 where
1809 Self: Sized;
1810
1811 /// Interprets the given `bytes` as a `&Self` without copying.
1812 ///
1813 /// If `bytes.len() != size_of::<Self>()` or `bytes` is not aligned to
1814 /// `align_of::<Self>()`, this returns `None`.
1815 ///
1816 /// # Examples
1817 ///
1818 /// ```
1819 /// use zerocopy::FromBytes;
1820 /// # use zerocopy_derive::*;
1821 ///
1822 /// #[derive(FromZeroes, FromBytes)]
1823 /// #[repr(C)]
1824 /// struct PacketHeader {
1825 /// src_port: [u8; 2],
1826 /// dst_port: [u8; 2],
1827 /// length: [u8; 2],
1828 /// checksum: [u8; 2],
1829 /// }
1830 ///
1831 /// // These bytes encode a `PacketHeader`.
1832 /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7].as_slice();
1833 ///
1834 /// let header = PacketHeader::ref_from(bytes).unwrap();
1835 ///
1836 /// assert_eq!(header.src_port, [0, 1]);
1837 /// assert_eq!(header.dst_port, [2, 3]);
1838 /// assert_eq!(header.length, [4, 5]);
1839 /// assert_eq!(header.checksum, [6, 7]);
1840 /// ```
1841 #[inline]
1842 fn ref_from(bytes: &[u8]) -> Option<&Self>
1843 where
1844 Self: Sized,
1845 {
1846 Ref::<&[u8], Self>::new(bytes).map(Ref::into_ref)
1847 }
1848
1849 /// Interprets the prefix of the given `bytes` as a `&Self` without copying.
1850 ///
1851 /// `ref_from_prefix` returns a reference to the first `size_of::<Self>()`
1852 /// bytes of `bytes`. If `bytes.len() < size_of::<Self>()` or `bytes` is not
1853 /// aligned to `align_of::<Self>()`, this returns `None`.
1854 ///
1855 /// To also access the prefix bytes, use [`Ref::new_from_prefix`]. Then, use
1856 /// [`Ref::into_ref`] to get a `&Self` with the same lifetime.
1857 ///
1858 /// # Examples
1859 ///
1860 /// ```
1861 /// use zerocopy::FromBytes;
1862 /// # use zerocopy_derive::*;
1863 ///
1864 /// #[derive(FromZeroes, FromBytes)]
1865 /// #[repr(C)]
1866 /// struct PacketHeader {
1867 /// src_port: [u8; 2],
1868 /// dst_port: [u8; 2],
1869 /// length: [u8; 2],
1870 /// checksum: [u8; 2],
1871 /// }
1872 ///
1873 /// // These are more bytes than are needed to encode a `PacketHeader`.
1874 /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9].as_slice();
1875 ///
1876 /// let header = PacketHeader::ref_from_prefix(bytes).unwrap();
1877 ///
1878 /// assert_eq!(header.src_port, [0, 1]);
1879 /// assert_eq!(header.dst_port, [2, 3]);
1880 /// assert_eq!(header.length, [4, 5]);
1881 /// assert_eq!(header.checksum, [6, 7]);
1882 /// ```
1883 #[inline]
1884 fn ref_from_prefix(bytes: &[u8]) -> Option<&Self>
1885 where
1886 Self: Sized,
1887 {
1888 Ref::<&[u8], Self>::new_from_prefix(bytes).map(|(r, _)| r.into_ref())
1889 }
1890
1891 /// Interprets the suffix of the given `bytes` as a `&Self` without copying.
1892 ///
1893 /// `ref_from_suffix` returns a reference to the last `size_of::<Self>()`
1894 /// bytes of `bytes`. If `bytes.len() < size_of::<Self>()` or the suffix of
1895 /// `bytes` is not aligned to `align_of::<Self>()`, this returns `None`.
1896 ///
1897 /// To also access the suffix bytes, use [`Ref::new_from_suffix`]. Then, use
1898 /// [`Ref::into_ref`] to get a `&Self` with the same lifetime.
1899 ///
1900 /// # Examples
1901 ///
1902 /// ```
1903 /// use zerocopy::FromBytes;
1904 /// # use zerocopy_derive::*;
1905 ///
1906 /// #[derive(FromZeroes, FromBytes)]
1907 /// #[repr(C)]
1908 /// struct PacketTrailer {
1909 /// frame_check_sequence: [u8; 4],
1910 /// }
1911 ///
1912 /// // These are more bytes than are needed to encode a `PacketTrailer`.
1913 /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9].as_slice();
1914 ///
1915 /// let trailer = PacketTrailer::ref_from_suffix(bytes).unwrap();
1916 ///
1917 /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
1918 /// ```
1919 #[inline]
1920 fn ref_from_suffix(bytes: &[u8]) -> Option<&Self>
1921 where
1922 Self: Sized,
1923 {
1924 Ref::<&[u8], Self>::new_from_suffix(bytes).map(|(_, r)| r.into_ref())
1925 }
1926
1927 /// Interprets the given `bytes` as a `&mut Self` without copying.
1928 ///
1929 /// If `bytes.len() != size_of::<Self>()` or `bytes` is not aligned to
1930 /// `align_of::<Self>()`, this returns `None`.
1931 ///
1932 /// # Examples
1933 ///
1934 /// ```
1935 /// use zerocopy::FromBytes;
1936 /// # use zerocopy_derive::*;
1937 ///
1938 /// #[derive(AsBytes, FromZeroes, FromBytes)]
1939 /// #[repr(C)]
1940 /// struct PacketHeader {
1941 /// src_port: [u8; 2],
1942 /// dst_port: [u8; 2],
1943 /// length: [u8; 2],
1944 /// checksum: [u8; 2],
1945 /// }
1946 ///
1947 /// // These bytes encode a `PacketHeader`.
1948 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7][..];
1949 ///
1950 /// let header = PacketHeader::mut_from(bytes).unwrap();
1951 ///
1952 /// assert_eq!(header.src_port, [0, 1]);
1953 /// assert_eq!(header.dst_port, [2, 3]);
1954 /// assert_eq!(header.length, [4, 5]);
1955 /// assert_eq!(header.checksum, [6, 7]);
1956 ///
1957 /// header.checksum = [0, 0];
1958 ///
1959 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0]);
1960 /// ```
1961 #[inline]
1962 fn mut_from(bytes: &mut [u8]) -> Option<&mut Self>
1963 where
1964 Self: Sized + AsBytes,
1965 {
1966 Ref::<&mut [u8], Self>::new(bytes).map(Ref::into_mut)
1967 }
1968
1969 /// Interprets the prefix of the given `bytes` as a `&mut Self` without
1970 /// copying.
1971 ///
1972 /// `mut_from_prefix` returns a reference to the first `size_of::<Self>()`
1973 /// bytes of `bytes`. If `bytes.len() < size_of::<Self>()` or `bytes` is not
1974 /// aligned to `align_of::<Self>()`, this returns `None`.
1975 ///
1976 /// To also access the prefix bytes, use [`Ref::new_from_prefix`]. Then, use
1977 /// [`Ref::into_mut`] to get a `&mut Self` with the same lifetime.
1978 ///
1979 /// # Examples
1980 ///
1981 /// ```
1982 /// use zerocopy::FromBytes;
1983 /// # use zerocopy_derive::*;
1984 ///
1985 /// #[derive(AsBytes, FromZeroes, FromBytes)]
1986 /// #[repr(C)]
1987 /// struct PacketHeader {
1988 /// src_port: [u8; 2],
1989 /// dst_port: [u8; 2],
1990 /// length: [u8; 2],
1991 /// checksum: [u8; 2],
1992 /// }
1993 ///
1994 /// // These are more bytes than are needed to encode a `PacketHeader`.
1995 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
1996 ///
1997 /// let header = PacketHeader::mut_from_prefix(bytes).unwrap();
1998 ///
1999 /// assert_eq!(header.src_port, [0, 1]);
2000 /// assert_eq!(header.dst_port, [2, 3]);
2001 /// assert_eq!(header.length, [4, 5]);
2002 /// assert_eq!(header.checksum, [6, 7]);
2003 ///
2004 /// header.checksum = [0, 0];
2005 ///
2006 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0, 8, 9]);
2007 /// ```
2008 #[inline]
2009 fn mut_from_prefix(bytes: &mut [u8]) -> Option<&mut Self>
2010 where
2011 Self: Sized + AsBytes,
2012 {
2013 Ref::<&mut [u8], Self>::new_from_prefix(bytes).map(|(r, _)| r.into_mut())
2014 }
2015
2016 /// Interprets the suffix of the given `bytes` as a `&mut Self` without copying.
2017 ///
2018 /// `mut_from_suffix` returns a reference to the last `size_of::<Self>()`
2019 /// bytes of `bytes`. If `bytes.len() < size_of::<Self>()` or the suffix of
2020 /// `bytes` is not aligned to `align_of::<Self>()`, this returns `None`.
2021 ///
2022 /// To also access the suffix bytes, use [`Ref::new_from_suffix`]. Then,
2023 /// use [`Ref::into_mut`] to get a `&mut Self` with the same lifetime.
2024 ///
2025 /// # Examples
2026 ///
2027 /// ```
2028 /// use zerocopy::FromBytes;
2029 /// # use zerocopy_derive::*;
2030 ///
2031 /// #[derive(AsBytes, FromZeroes, FromBytes)]
2032 /// #[repr(C)]
2033 /// struct PacketTrailer {
2034 /// frame_check_sequence: [u8; 4],
2035 /// }
2036 ///
2037 /// // These are more bytes than are needed to encode a `PacketTrailer`.
2038 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
2039 ///
2040 /// let trailer = PacketTrailer::mut_from_suffix(bytes).unwrap();
2041 ///
2042 /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
2043 ///
2044 /// trailer.frame_check_sequence = [0, 0, 0, 0];
2045 ///
2046 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0, 0, 0]);
2047 /// ```
2048 #[inline]
2049 fn mut_from_suffix(bytes: &mut [u8]) -> Option<&mut Self>
2050 where
2051 Self: Sized + AsBytes,
2052 {
2053 Ref::<&mut [u8], Self>::new_from_suffix(bytes).map(|(_, r)| r.into_mut())
2054 }
2055
2056 /// Interprets the given `bytes` as a `&[Self]` without copying.
2057 ///
2058 /// If `bytes.len() % size_of::<Self>() != 0` or `bytes` is not aligned to
2059 /// `align_of::<Self>()`, this returns `None`.
2060 ///
2061 /// If you need to convert a specific number of slice elements, see
2062 /// [`slice_from_prefix`](FromBytes::slice_from_prefix) or
2063 /// [`slice_from_suffix`](FromBytes::slice_from_suffix).
2064 ///
2065 /// # Panics
2066 ///
2067 /// If `Self` is a zero-sized type.
2068 ///
2069 /// # Examples
2070 ///
2071 /// ```
2072 /// use zerocopy::FromBytes;
2073 /// # use zerocopy_derive::*;
2074 ///
2075 /// # #[derive(Debug, PartialEq, Eq)]
2076 /// #[derive(FromZeroes, FromBytes)]
2077 /// #[repr(C)]
2078 /// struct Pixel {
2079 /// r: u8,
2080 /// g: u8,
2081 /// b: u8,
2082 /// a: u8,
2083 /// }
2084 ///
2085 /// // These bytes encode two `Pixel`s.
2086 /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7].as_slice();
2087 ///
2088 /// let pixels = Pixel::slice_from(bytes).unwrap();
2089 ///
2090 /// assert_eq!(pixels, &[
2091 /// Pixel { r: 0, g: 1, b: 2, a: 3 },
2092 /// Pixel { r: 4, g: 5, b: 6, a: 7 },
2093 /// ]);
2094 /// ```
2095 #[inline]
2096 fn slice_from(bytes: &[u8]) -> Option<&[Self]>
2097 where
2098 Self: Sized,
2099 {
2100 Ref::<_, [Self]>::new_slice(bytes).map(|r| r.into_slice())
2101 }
2102
2103 /// Interprets the prefix of the given `bytes` as a `&[Self]` with length
2104 /// equal to `count` without copying.
2105 ///
2106 /// This method verifies that `bytes.len() >= size_of::<T>() * count`
2107 /// and that `bytes` is aligned to `align_of::<T>()`. It consumes the
2108 /// first `size_of::<T>() * count` bytes from `bytes` to construct a
2109 /// `&[Self]`, and returns the remaining bytes to the caller. It also
2110 /// ensures that `sizeof::<T>() * count` does not overflow a `usize`.
2111 /// If any of the length, alignment, or overflow checks fail, it returns
2112 /// `None`.
2113 ///
2114 /// # Panics
2115 ///
2116 /// If `T` is a zero-sized type.
2117 ///
2118 /// # Examples
2119 ///
2120 /// ```
2121 /// use zerocopy::FromBytes;
2122 /// # use zerocopy_derive::*;
2123 ///
2124 /// # #[derive(Debug, PartialEq, Eq)]
2125 /// #[derive(FromZeroes, FromBytes)]
2126 /// #[repr(C)]
2127 /// struct Pixel {
2128 /// r: u8,
2129 /// g: u8,
2130 /// b: u8,
2131 /// a: u8,
2132 /// }
2133 ///
2134 /// // These are more bytes than are needed to encode two `Pixel`s.
2135 /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9].as_slice();
2136 ///
2137 /// let (pixels, rest) = Pixel::slice_from_prefix(bytes, 2).unwrap();
2138 ///
2139 /// assert_eq!(pixels, &[
2140 /// Pixel { r: 0, g: 1, b: 2, a: 3 },
2141 /// Pixel { r: 4, g: 5, b: 6, a: 7 },
2142 /// ]);
2143 ///
2144 /// assert_eq!(rest, &[8, 9]);
2145 /// ```
2146 #[inline]
2147 fn slice_from_prefix(bytes: &[u8], count: usize) -> Option<(&[Self], &[u8])>
2148 where
2149 Self: Sized,
2150 {
2151 Ref::<_, [Self]>::new_slice_from_prefix(bytes, count).map(|(r, b)| (r.into_slice(), b))
2152 }
2153
2154 /// Interprets the suffix of the given `bytes` as a `&[Self]` with length
2155 /// equal to `count` without copying.
2156 ///
2157 /// This method verifies that `bytes.len() >= size_of::<T>() * count`
2158 /// and that `bytes` is aligned to `align_of::<T>()`. It consumes the
2159 /// last `size_of::<T>() * count` bytes from `bytes` to construct a
2160 /// `&[Self]`, and returns the preceding bytes to the caller. It also
2161 /// ensures that `sizeof::<T>() * count` does not overflow a `usize`.
2162 /// If any of the length, alignment, or overflow checks fail, it returns
2163 /// `None`.
2164 ///
2165 /// # Panics
2166 ///
2167 /// If `T` is a zero-sized type.
2168 ///
2169 /// # Examples
2170 ///
2171 /// ```
2172 /// use zerocopy::FromBytes;
2173 /// # use zerocopy_derive::*;
2174 ///
2175 /// # #[derive(Debug, PartialEq, Eq)]
2176 /// #[derive(FromZeroes, FromBytes)]
2177 /// #[repr(C)]
2178 /// struct Pixel {
2179 /// r: u8,
2180 /// g: u8,
2181 /// b: u8,
2182 /// a: u8,
2183 /// }
2184 ///
2185 /// // These are more bytes than are needed to encode two `Pixel`s.
2186 /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9].as_slice();
2187 ///
2188 /// let (rest, pixels) = Pixel::slice_from_suffix(bytes, 2).unwrap();
2189 ///
2190 /// assert_eq!(rest, &[0, 1]);
2191 ///
2192 /// assert_eq!(pixels, &[
2193 /// Pixel { r: 2, g: 3, b: 4, a: 5 },
2194 /// Pixel { r: 6, g: 7, b: 8, a: 9 },
2195 /// ]);
2196 /// ```
2197 #[inline]
2198 fn slice_from_suffix(bytes: &[u8], count: usize) -> Option<(&[u8], &[Self])>
2199 where
2200 Self: Sized,
2201 {
2202 Ref::<_, [Self]>::new_slice_from_suffix(bytes, count).map(|(b, r)| (b, r.into_slice()))
2203 }
2204
2205 /// Interprets the given `bytes` as a `&mut [Self]` without copying.
2206 ///
2207 /// If `bytes.len() % size_of::<T>() != 0` or `bytes` is not aligned to
2208 /// `align_of::<T>()`, this returns `None`.
2209 ///
2210 /// If you need to convert a specific number of slice elements, see
2211 /// [`mut_slice_from_prefix`](FromBytes::mut_slice_from_prefix) or
2212 /// [`mut_slice_from_suffix`](FromBytes::mut_slice_from_suffix).
2213 ///
2214 /// # Panics
2215 ///
2216 /// If `T` is a zero-sized type.
2217 ///
2218 /// # Examples
2219 ///
2220 /// ```
2221 /// use zerocopy::FromBytes;
2222 /// # use zerocopy_derive::*;
2223 ///
2224 /// # #[derive(Debug, PartialEq, Eq)]
2225 /// #[derive(AsBytes, FromZeroes, FromBytes)]
2226 /// #[repr(C)]
2227 /// struct Pixel {
2228 /// r: u8,
2229 /// g: u8,
2230 /// b: u8,
2231 /// a: u8,
2232 /// }
2233 ///
2234 /// // These bytes encode two `Pixel`s.
2235 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7][..];
2236 ///
2237 /// let pixels = Pixel::mut_slice_from(bytes).unwrap();
2238 ///
2239 /// assert_eq!(pixels, &[
2240 /// Pixel { r: 0, g: 1, b: 2, a: 3 },
2241 /// Pixel { r: 4, g: 5, b: 6, a: 7 },
2242 /// ]);
2243 ///
2244 /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
2245 ///
2246 /// assert_eq!(bytes, [0, 1, 2, 3, 0, 0, 0, 0]);
2247 /// ```
2248 #[inline]
2249 fn mut_slice_from(bytes: &mut [u8]) -> Option<&mut [Self]>
2250 where
2251 Self: Sized + AsBytes,
2252 {
2253 Ref::<_, [Self]>::new_slice(bytes).map(|r| r.into_mut_slice())
2254 }
2255
2256 /// Interprets the prefix of the given `bytes` as a `&mut [Self]` with length
2257 /// equal to `count` without copying.
2258 ///
2259 /// This method verifies that `bytes.len() >= size_of::<T>() * count`
2260 /// and that `bytes` is aligned to `align_of::<T>()`. It consumes the
2261 /// first `size_of::<T>() * count` bytes from `bytes` to construct a
2262 /// `&[Self]`, and returns the remaining bytes to the caller. It also
2263 /// ensures that `sizeof::<T>() * count` does not overflow a `usize`.
2264 /// If any of the length, alignment, or overflow checks fail, it returns
2265 /// `None`.
2266 ///
2267 /// # Panics
2268 ///
2269 /// If `T` is a zero-sized type.
2270 ///
2271 /// # Examples
2272 ///
2273 /// ```
2274 /// use zerocopy::FromBytes;
2275 /// # use zerocopy_derive::*;
2276 ///
2277 /// # #[derive(Debug, PartialEq, Eq)]
2278 /// #[derive(AsBytes, FromZeroes, FromBytes)]
2279 /// #[repr(C)]
2280 /// struct Pixel {
2281 /// r: u8,
2282 /// g: u8,
2283 /// b: u8,
2284 /// a: u8,
2285 /// }
2286 ///
2287 /// // These are more bytes than are needed to encode two `Pixel`s.
2288 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
2289 ///
2290 /// let (pixels, rest) = Pixel::mut_slice_from_prefix(bytes, 2).unwrap();
2291 ///
2292 /// assert_eq!(pixels, &[
2293 /// Pixel { r: 0, g: 1, b: 2, a: 3 },
2294 /// Pixel { r: 4, g: 5, b: 6, a: 7 },
2295 /// ]);
2296 ///
2297 /// assert_eq!(rest, &[8, 9]);
2298 ///
2299 /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
2300 ///
2301 /// assert_eq!(bytes, [0, 1, 2, 3, 0, 0, 0, 0, 8, 9]);
2302 /// ```
2303 #[inline]
2304 fn mut_slice_from_prefix(bytes: &mut [u8], count: usize) -> Option<(&mut [Self], &mut [u8])>
2305 where
2306 Self: Sized + AsBytes,
2307 {
2308 Ref::<_, [Self]>::new_slice_from_prefix(bytes, count).map(|(r, b)| (r.into_mut_slice(), b))
2309 }
2310
2311 /// Interprets the suffix of the given `bytes` as a `&mut [Self]` with length
2312 /// equal to `count` without copying.
2313 ///
2314 /// This method verifies that `bytes.len() >= size_of::<T>() * count`
2315 /// and that `bytes` is aligned to `align_of::<T>()`. It consumes the
2316 /// last `size_of::<T>() * count` bytes from `bytes` to construct a
2317 /// `&[Self]`, and returns the preceding bytes to the caller. It also
2318 /// ensures that `sizeof::<T>() * count` does not overflow a `usize`.
2319 /// If any of the length, alignment, or overflow checks fail, it returns
2320 /// `None`.
2321 ///
2322 /// # Panics
2323 ///
2324 /// If `T` is a zero-sized type.
2325 ///
2326 /// # Examples
2327 ///
2328 /// ```
2329 /// use zerocopy::FromBytes;
2330 /// # use zerocopy_derive::*;
2331 ///
2332 /// # #[derive(Debug, PartialEq, Eq)]
2333 /// #[derive(AsBytes, FromZeroes, FromBytes)]
2334 /// #[repr(C)]
2335 /// struct Pixel {
2336 /// r: u8,
2337 /// g: u8,
2338 /// b: u8,
2339 /// a: u8,
2340 /// }
2341 ///
2342 /// // These are more bytes than are needed to encode two `Pixel`s.
2343 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
2344 ///
2345 /// let (rest, pixels) = Pixel::mut_slice_from_suffix(bytes, 2).unwrap();
2346 ///
2347 /// assert_eq!(rest, &[0, 1]);
2348 ///
2349 /// assert_eq!(pixels, &[
2350 /// Pixel { r: 2, g: 3, b: 4, a: 5 },
2351 /// Pixel { r: 6, g: 7, b: 8, a: 9 },
2352 /// ]);
2353 ///
2354 /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
2355 ///
2356 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0, 0, 0]);
2357 /// ```
2358 #[inline]
2359 fn mut_slice_from_suffix(bytes: &mut [u8], count: usize) -> Option<(&mut [u8], &mut [Self])>
2360 where
2361 Self: Sized + AsBytes,
2362 {
2363 Ref::<_, [Self]>::new_slice_from_suffix(bytes, count).map(|(b, r)| (b, r.into_mut_slice()))
2364 }
2365
2366 /// Reads a copy of `Self` from `bytes`.
2367 ///
2368 /// If `bytes.len() != size_of::<Self>()`, `read_from` returns `None`.
2369 ///
2370 /// # Examples
2371 ///
2372 /// ```
2373 /// use zerocopy::FromBytes;
2374 /// # use zerocopy_derive::*;
2375 ///
2376 /// #[derive(FromZeroes, FromBytes)]
2377 /// #[repr(C)]
2378 /// struct PacketHeader {
2379 /// src_port: [u8; 2],
2380 /// dst_port: [u8; 2],
2381 /// length: [u8; 2],
2382 /// checksum: [u8; 2],
2383 /// }
2384 ///
2385 /// // These bytes encode a `PacketHeader`.
2386 /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7].as_slice();
2387 ///
2388 /// let header = PacketHeader::read_from(bytes).unwrap();
2389 ///
2390 /// assert_eq!(header.src_port, [0, 1]);
2391 /// assert_eq!(header.dst_port, [2, 3]);
2392 /// assert_eq!(header.length, [4, 5]);
2393 /// assert_eq!(header.checksum, [6, 7]);
2394 /// ```
2395 #[inline]
2396 fn read_from(bytes: &[u8]) -> Option<Self>
2397 where
2398 Self: Sized,
2399 {
2400 Ref::<_, Unalign<Self>>::new_unaligned(bytes).map(|r| r.read().into_inner())
2401 }
2402
2403 /// Reads a copy of `Self` from the prefix of `bytes`.
2404 ///
2405 /// `read_from_prefix` reads a `Self` from the first `size_of::<Self>()`
2406 /// bytes of `bytes`. If `bytes.len() < size_of::<Self>()`, it returns
2407 /// `None`.
2408 ///
2409 /// # Examples
2410 ///
2411 /// ```
2412 /// use zerocopy::FromBytes;
2413 /// # use zerocopy_derive::*;
2414 ///
2415 /// #[derive(FromZeroes, FromBytes)]
2416 /// #[repr(C)]
2417 /// struct PacketHeader {
2418 /// src_port: [u8; 2],
2419 /// dst_port: [u8; 2],
2420 /// length: [u8; 2],
2421 /// checksum: [u8; 2],
2422 /// }
2423 ///
2424 /// // These are more bytes than are needed to encode a `PacketHeader`.
2425 /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9].as_slice();
2426 ///
2427 /// let header = PacketHeader::read_from_prefix(bytes).unwrap();
2428 ///
2429 /// assert_eq!(header.src_port, [0, 1]);
2430 /// assert_eq!(header.dst_port, [2, 3]);
2431 /// assert_eq!(header.length, [4, 5]);
2432 /// assert_eq!(header.checksum, [6, 7]);
2433 /// ```
2434 #[inline]
2435 fn read_from_prefix(bytes: &[u8]) -> Option<Self>
2436 where
2437 Self: Sized,
2438 {
2439 Ref::<_, Unalign<Self>>::new_unaligned_from_prefix(bytes)
2440 .map(|(r, _)| r.read().into_inner())
2441 }
2442
2443 /// Reads a copy of `Self` from the suffix of `bytes`.
2444 ///
2445 /// `read_from_suffix` reads a `Self` from the last `size_of::<Self>()`
2446 /// bytes of `bytes`. If `bytes.len() < size_of::<Self>()`, it returns
2447 /// `None`.
2448 ///
2449 /// # Examples
2450 ///
2451 /// ```
2452 /// use zerocopy::FromBytes;
2453 /// # use zerocopy_derive::*;
2454 ///
2455 /// #[derive(FromZeroes, FromBytes)]
2456 /// #[repr(C)]
2457 /// struct PacketTrailer {
2458 /// frame_check_sequence: [u8; 4],
2459 /// }
2460 ///
2461 /// // These are more bytes than are needed to encode a `PacketTrailer`.
2462 /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9].as_slice();
2463 ///
2464 /// let trailer = PacketTrailer::read_from_suffix(bytes).unwrap();
2465 ///
2466 /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
2467 /// ```
2468 #[inline]
2469 fn read_from_suffix(bytes: &[u8]) -> Option<Self>
2470 where
2471 Self: Sized,
2472 {
2473 Ref::<_, Unalign<Self>>::new_unaligned_from_suffix(bytes)
2474 .map(|(_, r)| r.read().into_inner())
2475 }
2476}
2477
2478/// Analyzes whether a type is [`AsBytes`].
2479///
2480/// This derive analyzes, at compile time, whether the annotated type satisfies
2481/// the [safety conditions] of `AsBytes` and implements `AsBytes` if it is
2482/// sound to do so. This derive can be applied to structs, enums, and unions;
2483/// e.g.:
2484///
2485/// ```
2486/// # use zerocopy_derive::{AsBytes};
2487/// #[derive(AsBytes)]
2488/// #[repr(C)]
2489/// struct MyStruct {
2490/// # /*
2491/// ...
2492/// # */
2493/// }
2494///
2495/// #[derive(AsBytes)]
2496/// #[repr(u8)]
2497/// enum MyEnum {
2498/// # Variant,
2499/// # /*
2500/// ...
2501/// # */
2502/// }
2503///
2504/// #[derive(AsBytes)]
2505/// #[repr(C)]
2506/// union MyUnion {
2507/// # variant: u8,
2508/// # /*
2509/// ...
2510/// # */
2511/// }
2512/// ```
2513///
2514/// [safety conditions]: trait@AsBytes#safety
2515///
2516/// # Error Messages
2517///
2518/// Due to the way that the custom derive for `AsBytes` is implemented, you may
2519/// get an error like this:
2520///
2521/// ```text
2522/// error[E0277]: the trait bound `HasPadding<Foo, true>: ShouldBe<false>` is not satisfied
2523/// --> lib.rs:23:10
2524/// |
2525/// 1 | #[derive(AsBytes)]
2526/// | ^^^^^^^ the trait `ShouldBe<false>` is not implemented for `HasPadding<Foo, true>`
2527/// |
2528/// = help: the trait `ShouldBe<VALUE>` is implemented for `HasPadding<T, VALUE>`
2529/// ```
2530///
2531/// This error indicates that the type being annotated has padding bytes, which
2532/// is illegal for `AsBytes` types. Consider reducing the alignment of some
2533/// fields by using types in the [`byteorder`] module, adding explicit struct
2534/// fields where those padding bytes would be, or using `#[repr(packed)]`. See
2535/// the Rust Reference's page on [type layout] for more information
2536/// about type layout and padding.
2537///
2538/// [type layout]: https://doc.rust-lang.org/reference/type-layout.html
2539///
2540/// # Analysis
2541///
2542/// *This section describes, roughly, the analysis performed by this derive to
2543/// determine whether it is sound to implement `AsBytes` for a given type.
2544/// Unless you are modifying the implementation of this derive, or attempting to
2545/// manually implement `AsBytes` for a type yourself, you don't need to read
2546/// this section.*
2547///
2548/// If a type has the following properties, then this derive can implement
2549/// `AsBytes` for that type:
2550///
2551/// - If the type is a struct:
2552/// - It must have a defined representation (`repr(C)`, `repr(transparent)`,
2553/// or `repr(packed)`).
2554/// - All of its fields must be `AsBytes`.
2555/// - Its layout must have no padding. This is always true for
2556/// `repr(transparent)` and `repr(packed)`. For `repr(C)`, see the layout
2557/// algorithm described in the [Rust Reference].
2558/// - If the type is an enum:
2559/// - It must be a C-like enum (meaning that all variants have no fields).
2560/// - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`,
2561/// `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`).
2562/// - The type must not contain any [`UnsafeCell`]s (this is required in order
2563/// for it to be sound to construct a `&[u8]` and a `&T` to the same region of
2564/// memory). The type may contain references or pointers to `UnsafeCell`s so
2565/// long as those values can themselves be initialized from zeroes (`AsBytes`
2566/// is not currently implemented for, e.g., `Option<&UnsafeCell<_>>`, but it
2567/// could be one day).
2568///
2569/// [`UnsafeCell`]: core::cell::UnsafeCell
2570///
2571/// This analysis is subject to change. Unsafe code may *only* rely on the
2572/// documented [safety conditions] of `FromBytes`, and must *not* rely on the
2573/// implementation details of this derive.
2574///
2575/// [Rust Reference]: https://doc.rust-lang.org/reference/type-layout.html
2576#[cfg(any(feature = "derive", test))]
2577#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
2578pub use zerocopy_derive::AsBytes;
2579
2580/// Types that can be viewed as an immutable slice of initialized bytes.
2581///
2582/// Any `AsBytes` type can be viewed as a slice of initialized bytes of the same
2583/// size. This is useful for efficiently serializing structured data as raw
2584/// bytes.
2585///
2586/// # Implementation
2587///
2588/// **Do not implement this trait yourself!** Instead, use
2589/// [`#[derive(AsBytes)]`][derive] (requires the `derive` Cargo feature); e.g.:
2590///
2591/// ```
2592/// # use zerocopy_derive::AsBytes;
2593/// #[derive(AsBytes)]
2594/// #[repr(C)]
2595/// struct MyStruct {
2596/// # /*
2597/// ...
2598/// # */
2599/// }
2600///
2601/// #[derive(AsBytes)]
2602/// #[repr(u8)]
2603/// enum MyEnum {
2604/// # Variant0,
2605/// # /*
2606/// ...
2607/// # */
2608/// }
2609///
2610/// #[derive(AsBytes)]
2611/// #[repr(C)]
2612/// union MyUnion {
2613/// # variant: u8,
2614/// # /*
2615/// ...
2616/// # */
2617/// }
2618/// ```
2619///
2620/// This derive performs a sophisticated, compile-time safety analysis to
2621/// determine whether a type is `AsBytes`. See the [derive
2622/// documentation][derive] for guidance on how to interpret error messages
2623/// produced by the derive's analysis.
2624///
2625/// # Safety
2626///
2627/// *This section describes what is required in order for `T: AsBytes`, and
2628/// what unsafe code may assume of such types. If you don't plan on implementing
2629/// `AsBytes` manually, and you don't plan on writing unsafe code that
2630/// operates on `AsBytes` types, then you don't need to read this section.*
2631///
2632/// If `T: AsBytes`, then unsafe code may assume that:
2633/// - It is sound to treat any `t: T` as an immutable `[u8]` of length
2634/// `size_of_val(t)`.
2635/// - Given `t: &T`, it is sound to construct a `b: &[u8]` where `b.len() ==
2636/// size_of_val(t)` at the same address as `t`, and it is sound for both `b`
2637/// and `t` to be live at the same time.
2638///
2639/// If a type is marked as `AsBytes` which violates this contract, it may cause
2640/// undefined behavior.
2641///
2642/// `#[derive(AsBytes)]` only permits [types which satisfy these
2643/// requirements][derive-analysis].
2644///
2645#[cfg_attr(
2646 feature = "derive",
2647 doc = "[derive]: zerocopy_derive::AsBytes",
2648 doc = "[derive-analysis]: zerocopy_derive::AsBytes#analysis"
2649)]
2650#[cfg_attr(
2651 not(feature = "derive"),
2652 doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.AsBytes.html"),
2653 doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.AsBytes.html#analysis"),
2654)]
2655pub unsafe trait AsBytes {
2656 // The `Self: Sized` bound makes it so that this function doesn't prevent
2657 // `AsBytes` from being object safe. Note that other `AsBytes` methods
2658 // prevent object safety, but those provide a benefit in exchange for object
2659 // safety. If at some point we remove those methods, change their type
2660 // signatures, or move them out of this trait so that `AsBytes` is object
2661 // safe again, it's important that this function not prevent object safety.
2662 #[doc(hidden)]
2663 fn only_derive_is_allowed_to_implement_this_trait()
2664 where
2665 Self: Sized;
2666
2667 /// Gets the bytes of this value.
2668 ///
2669 /// `as_bytes` provides access to the bytes of this value as an immutable
2670 /// byte slice.
2671 ///
2672 /// # Examples
2673 ///
2674 /// ```
2675 /// use zerocopy::AsBytes;
2676 /// # use zerocopy_derive::*;
2677 ///
2678 /// #[derive(AsBytes)]
2679 /// #[repr(C)]
2680 /// struct PacketHeader {
2681 /// src_port: [u8; 2],
2682 /// dst_port: [u8; 2],
2683 /// length: [u8; 2],
2684 /// checksum: [u8; 2],
2685 /// }
2686 ///
2687 /// let header = PacketHeader {
2688 /// src_port: [0, 1],
2689 /// dst_port: [2, 3],
2690 /// length: [4, 5],
2691 /// checksum: [6, 7],
2692 /// };
2693 ///
2694 /// let bytes = header.as_bytes();
2695 ///
2696 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
2697 /// ```
2698 #[inline(always)]
2699 fn as_bytes(&self) -> &[u8] {
2700 // Note that this method does not have a `Self: Sized` bound;
2701 // `size_of_val` works for unsized values too.
2702 let len = mem::size_of_val(self);
2703 let slf: *const Self = self;
2704
2705 // SAFETY:
2706 // - `slf.cast::<u8>()` is valid for reads for `len *
2707 // mem::size_of::<u8>()` many bytes because...
2708 // - `slf` is the same pointer as `self`, and `self` is a reference
2709 // which points to an object whose size is `len`. Thus...
2710 // - The entire region of `len` bytes starting at `slf` is contained
2711 // within a single allocation.
2712 // - `slf` is non-null.
2713 // - `slf` is trivially aligned to `align_of::<u8>() == 1`.
2714 // - `Self: AsBytes` ensures that all of the bytes of `slf` are
2715 // initialized.
2716 // - Since `slf` is derived from `self`, and `self` is an immutable
2717 // reference, the only other references to this memory region that
2718 // could exist are other immutable references, and those don't allow
2719 // mutation. `AsBytes` prohibits types which contain `UnsafeCell`s,
2720 // which are the only types for which this rule wouldn't be sufficient.
2721 // - The total size of the resulting slice is no larger than
2722 // `isize::MAX` because no allocation produced by safe code can be
2723 // larger than `isize::MAX`.
2724 //
2725 // TODO(#429): Add references to docs and quotes.
2726 unsafe { slice::from_raw_parts(slf.cast::<u8>(), len) }
2727 }
2728
2729 /// Gets the bytes of this value mutably.
2730 ///
2731 /// `as_bytes_mut` provides access to the bytes of this value as a mutable
2732 /// byte slice.
2733 ///
2734 /// # Examples
2735 ///
2736 /// ```
2737 /// use zerocopy::AsBytes;
2738 /// # use zerocopy_derive::*;
2739 ///
2740 /// # #[derive(Eq, PartialEq, Debug)]
2741 /// #[derive(AsBytes, FromZeroes, FromBytes)]
2742 /// #[repr(C)]
2743 /// struct PacketHeader {
2744 /// src_port: [u8; 2],
2745 /// dst_port: [u8; 2],
2746 /// length: [u8; 2],
2747 /// checksum: [u8; 2],
2748 /// }
2749 ///
2750 /// let mut header = PacketHeader {
2751 /// src_port: [0, 1],
2752 /// dst_port: [2, 3],
2753 /// length: [4, 5],
2754 /// checksum: [6, 7],
2755 /// };
2756 ///
2757 /// let bytes = header.as_bytes_mut();
2758 ///
2759 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
2760 ///
2761 /// bytes.reverse();
2762 ///
2763 /// assert_eq!(header, PacketHeader {
2764 /// src_port: [7, 6],
2765 /// dst_port: [5, 4],
2766 /// length: [3, 2],
2767 /// checksum: [1, 0],
2768 /// });
2769 /// ```
2770 #[inline(always)]
2771 fn as_bytes_mut(&mut self) -> &mut [u8]
2772 where
2773 Self: FromBytes,
2774 {
2775 // Note that this method does not have a `Self: Sized` bound;
2776 // `size_of_val` works for unsized values too.
2777 let len = mem::size_of_val(self);
2778 let slf: *mut Self = self;
2779
2780 // SAFETY:
2781 // - `slf.cast::<u8>()` is valid for reads and writes for `len *
2782 // mem::size_of::<u8>()` many bytes because...
2783 // - `slf` is the same pointer as `self`, and `self` is a reference
2784 // which points to an object whose size is `len`. Thus...
2785 // - The entire region of `len` bytes starting at `slf` is contained
2786 // within a single allocation.
2787 // - `slf` is non-null.
2788 // - `slf` is trivially aligned to `align_of::<u8>() == 1`.
2789 // - `Self: AsBytes` ensures that all of the bytes of `slf` are
2790 // initialized.
2791 // - `Self: FromBytes` ensures that no write to this memory region
2792 // could result in it containing an invalid `Self`.
2793 // - Since `slf` is derived from `self`, and `self` is a mutable
2794 // reference, no other references to this memory region can exist.
2795 // - The total size of the resulting slice is no larger than
2796 // `isize::MAX` because no allocation produced by safe code can be
2797 // larger than `isize::MAX`.
2798 //
2799 // TODO(#429): Add references to docs and quotes.
2800 unsafe { slice::from_raw_parts_mut(slf.cast::<u8>(), len) }
2801 }
2802
2803 /// Writes a copy of `self` to `bytes`.
2804 ///
2805 /// If `bytes.len() != size_of_val(self)`, `write_to` returns `None`.
2806 ///
2807 /// # Examples
2808 ///
2809 /// ```
2810 /// use zerocopy::AsBytes;
2811 /// # use zerocopy_derive::*;
2812 ///
2813 /// #[derive(AsBytes)]
2814 /// #[repr(C)]
2815 /// struct PacketHeader {
2816 /// src_port: [u8; 2],
2817 /// dst_port: [u8; 2],
2818 /// length: [u8; 2],
2819 /// checksum: [u8; 2],
2820 /// }
2821 ///
2822 /// let header = PacketHeader {
2823 /// src_port: [0, 1],
2824 /// dst_port: [2, 3],
2825 /// length: [4, 5],
2826 /// checksum: [6, 7],
2827 /// };
2828 ///
2829 /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0];
2830 ///
2831 /// header.write_to(&mut bytes[..]);
2832 ///
2833 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
2834 /// ```
2835 ///
2836 /// If too many or too few target bytes are provided, `write_to` returns
2837 /// `None` and leaves the target bytes unmodified:
2838 ///
2839 /// ```
2840 /// # use zerocopy::AsBytes;
2841 /// # let header = u128::MAX;
2842 /// let mut excessive_bytes = &mut [0u8; 128][..];
2843 ///
2844 /// let write_result = header.write_to(excessive_bytes);
2845 ///
2846 /// assert!(write_result.is_none());
2847 /// assert_eq!(excessive_bytes, [0u8; 128]);
2848 /// ```
2849 #[inline]
2850 fn write_to(&self, bytes: &mut [u8]) -> Option<()> {
2851 if bytes.len() != mem::size_of_val(self) {
2852 return None;
2853 }
2854
2855 bytes.copy_from_slice(self.as_bytes());
2856 Some(())
2857 }
2858
2859 /// Writes a copy of `self` to the prefix of `bytes`.
2860 ///
2861 /// `write_to_prefix` writes `self` to the first `size_of_val(self)` bytes
2862 /// of `bytes`. If `bytes.len() < size_of_val(self)`, it returns `None`.
2863 ///
2864 /// # Examples
2865 ///
2866 /// ```
2867 /// use zerocopy::AsBytes;
2868 /// # use zerocopy_derive::*;
2869 ///
2870 /// #[derive(AsBytes)]
2871 /// #[repr(C)]
2872 /// struct PacketHeader {
2873 /// src_port: [u8; 2],
2874 /// dst_port: [u8; 2],
2875 /// length: [u8; 2],
2876 /// checksum: [u8; 2],
2877 /// }
2878 ///
2879 /// let header = PacketHeader {
2880 /// src_port: [0, 1],
2881 /// dst_port: [2, 3],
2882 /// length: [4, 5],
2883 /// checksum: [6, 7],
2884 /// };
2885 ///
2886 /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
2887 ///
2888 /// header.write_to_prefix(&mut bytes[..]);
2889 ///
2890 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7, 0, 0]);
2891 /// ```
2892 ///
2893 /// If insufficient target bytes are provided, `write_to_prefix` returns
2894 /// `None` and leaves the target bytes unmodified:
2895 ///
2896 /// ```
2897 /// # use zerocopy::AsBytes;
2898 /// # let header = u128::MAX;
2899 /// let mut insufficent_bytes = &mut [0, 0][..];
2900 ///
2901 /// let write_result = header.write_to_suffix(insufficent_bytes);
2902 ///
2903 /// assert!(write_result.is_none());
2904 /// assert_eq!(insufficent_bytes, [0, 0]);
2905 /// ```
2906 #[inline]
2907 fn write_to_prefix(&self, bytes: &mut [u8]) -> Option<()> {
2908 let size = mem::size_of_val(self);
2909 bytes.get_mut(..size)?.copy_from_slice(self.as_bytes());
2910 Some(())
2911 }
2912
2913 /// Writes a copy of `self` to the suffix of `bytes`.
2914 ///
2915 /// `write_to_suffix` writes `self` to the last `size_of_val(self)` bytes of
2916 /// `bytes`. If `bytes.len() < size_of_val(self)`, it returns `None`.
2917 ///
2918 /// # Examples
2919 ///
2920 /// ```
2921 /// use zerocopy::AsBytes;
2922 /// # use zerocopy_derive::*;
2923 ///
2924 /// #[derive(AsBytes)]
2925 /// #[repr(C)]
2926 /// struct PacketHeader {
2927 /// src_port: [u8; 2],
2928 /// dst_port: [u8; 2],
2929 /// length: [u8; 2],
2930 /// checksum: [u8; 2],
2931 /// }
2932 ///
2933 /// let header = PacketHeader {
2934 /// src_port: [0, 1],
2935 /// dst_port: [2, 3],
2936 /// length: [4, 5],
2937 /// checksum: [6, 7],
2938 /// };
2939 ///
2940 /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
2941 ///
2942 /// header.write_to_suffix(&mut bytes[..]);
2943 ///
2944 /// assert_eq!(bytes, [0, 0, 0, 1, 2, 3, 4, 5, 6, 7]);
2945 ///
2946 /// let mut insufficent_bytes = &mut [0, 0][..];
2947 ///
2948 /// let write_result = header.write_to_suffix(insufficent_bytes);
2949 ///
2950 /// assert!(write_result.is_none());
2951 /// assert_eq!(insufficent_bytes, [0, 0]);
2952 /// ```
2953 ///
2954 /// If insufficient target bytes are provided, `write_to_suffix` returns
2955 /// `None` and leaves the target bytes unmodified:
2956 ///
2957 /// ```
2958 /// # use zerocopy::AsBytes;
2959 /// # let header = u128::MAX;
2960 /// let mut insufficent_bytes = &mut [0, 0][..];
2961 ///
2962 /// let write_result = header.write_to_suffix(insufficent_bytes);
2963 ///
2964 /// assert!(write_result.is_none());
2965 /// assert_eq!(insufficent_bytes, [0, 0]);
2966 /// ```
2967 #[inline]
2968 fn write_to_suffix(&self, bytes: &mut [u8]) -> Option<()> {
2969 let start = bytes.len().checked_sub(mem::size_of_val(self))?;
2970 bytes
2971 .get_mut(start..)
2972 .expect("`start` should be in-bounds of `bytes`")
2973 .copy_from_slice(self.as_bytes());
2974 Some(())
2975 }
2976}
2977
2978/// Types with no alignment requirement.
2979///
2980/// WARNING: Do not implement this trait yourself! Instead, use
2981/// `#[derive(Unaligned)]` (requires the `derive` Cargo feature).
2982///
2983/// If `T: Unaligned`, then `align_of::<T>() == 1`.
2984///
2985/// # Safety
2986///
2987/// *This section describes what is required in order for `T: Unaligned`, and
2988/// what unsafe code may assume of such types. `#[derive(Unaligned)]` only
2989/// permits types which satisfy these requirements. If you don't plan on
2990/// implementing `Unaligned` manually, and you don't plan on writing unsafe code
2991/// that operates on `Unaligned` types, then you don't need to read this
2992/// section.*
2993///
2994/// If `T: Unaligned`, then unsafe code may assume that it is sound to produce a
2995/// reference to `T` at any memory location regardless of alignment. If a type
2996/// is marked as `Unaligned` which violates this contract, it may cause
2997/// undefined behavior.
2998pub unsafe trait Unaligned {
2999 // The `Self: Sized` bound makes it so that `Unaligned` is still object
3000 // safe.
3001 #[doc(hidden)]
3002 fn only_derive_is_allowed_to_implement_this_trait()
3003 where
3004 Self: Sized;
3005}
3006
3007safety_comment! {
3008 /// SAFETY:
3009 /// Per the reference [1], "the unit tuple (`()`) ... is guaranteed as a
3010 /// zero-sized type to have a size of 0 and an alignment of 1."
3011 /// - `TryFromBytes` (with no validator), `FromZeroes`, `FromBytes`: There
3012 /// is only one possible sequence of 0 bytes, and `()` is inhabited.
3013 /// - `AsBytes`: Since `()` has size 0, it contains no padding bytes.
3014 /// - `Unaligned`: `()` has alignment 1.
3015 ///
3016 /// [1] https://doc.rust-lang.org/reference/type-layout.html#tuple-layout
3017 unsafe_impl!((): TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
3018 assert_unaligned!(());
3019}
3020
3021safety_comment! {
3022 /// SAFETY:
3023 /// - `TryFromBytes` (with no validator), `FromZeroes`, `FromBytes`: all bit
3024 /// patterns are valid for numeric types [1]
3025 /// - `AsBytes`: numeric types have no padding bytes [1]
3026 /// - `Unaligned` (`u8` and `i8` only): The reference [2] specifies the size
3027 /// of `u8` and `i8` as 1 byte. We also know that:
3028 /// - Alignment is >= 1 [3]
3029 /// - Size is an integer multiple of alignment [4]
3030 /// - The only value >= 1 for which 1 is an integer multiple is 1
3031 /// Therefore, the only possible alignment for `u8` and `i8` is 1.
3032 ///
3033 /// [1] Per https://doc.rust-lang.org/beta/reference/types/numeric.html#bit-validity:
3034 ///
3035 /// For every numeric type, `T`, the bit validity of `T` is equivalent to
3036 /// the bit validity of `[u8; size_of::<T>()]`. An uninitialized byte is
3037 /// not a valid `u8`.
3038 ///
3039 /// TODO(https://github.com/rust-lang/reference/pull/1392): Once this text
3040 /// is available on the Stable docs, cite those instead.
3041 ///
3042 /// [2] https://doc.rust-lang.org/reference/type-layout.html#primitive-data-layout
3043 ///
3044 /// [3] Per https://doc.rust-lang.org/reference/type-layout.html#size-and-alignment:
3045 ///
3046 /// Alignment is measured in bytes, and must be at least 1.
3047 ///
3048 /// [4] Per https://doc.rust-lang.org/reference/type-layout.html#size-and-alignment:
3049 ///
3050 /// The size of a value is always a multiple of its alignment.
3051 ///
3052 /// TODO(#278): Once we've updated the trait docs to refer to `u8`s rather
3053 /// than bits or bytes, update this comment, especially the reference to
3054 /// [1].
3055 unsafe_impl!(u8: TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
3056 unsafe_impl!(i8: TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
3057 assert_unaligned!(u8, i8);
3058 unsafe_impl!(u16: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3059 unsafe_impl!(i16: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3060 unsafe_impl!(u32: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3061 unsafe_impl!(i32: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3062 unsafe_impl!(u64: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3063 unsafe_impl!(i64: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3064 unsafe_impl!(u128: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3065 unsafe_impl!(i128: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3066 unsafe_impl!(usize: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3067 unsafe_impl!(isize: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3068 unsafe_impl!(f32: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3069 unsafe_impl!(f64: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3070}
3071
3072safety_comment! {
3073 /// SAFETY:
3074 /// - `FromZeroes`: Valid since "[t]he value false has the bit pattern
3075 /// 0x00" [1].
3076 /// - `AsBytes`: Since "the boolean type has a size and alignment of 1 each"
3077 /// and "The value false has the bit pattern 0x00 and the value true has
3078 /// the bit pattern 0x01" [1]. Thus, the only byte of the bool is always
3079 /// initialized.
3080 /// - `Unaligned`: Per the reference [1], "[a]n object with the boolean type
3081 /// has a size and alignment of 1 each."
3082 ///
3083 /// [1] https://doc.rust-lang.org/reference/types/boolean.html
3084 unsafe_impl!(bool: FromZeroes, AsBytes, Unaligned);
3085 assert_unaligned!(bool);
3086 /// SAFETY:
3087 /// - The safety requirements for `unsafe_impl!` with an `is_bit_valid`
3088 /// closure:
3089 /// - Given `t: *mut bool` and `let r = *mut u8`, `r` refers to an object
3090 /// of the same size as that referred to by `t`. This is true because
3091 /// `bool` and `u8` have the same size (1 byte) [1].
3092 /// - Since the closure takes a `&u8` argument, given a `Ptr<'a, bool>`
3093 /// which satisfies the preconditions of
3094 /// `TryFromBytes::<bool>::is_bit_valid`, it must be guaranteed that the
3095 /// memory referenced by that `Ptr` always contains a valid `u8`. Since
3096 /// `bool`'s single byte is always initialized, `is_bit_valid`'s
3097 /// precondition requires that the same is true of its argument. Since
3098 /// `u8`'s only bit validity invariant is that its single byte must be
3099 /// initialized, this memory is guaranteed to contain a valid `u8`.
3100 /// - The alignment of `bool` is equal to the alignment of `u8`. [1] [2]
3101 /// - The impl must only return `true` for its argument if the original
3102 /// `Ptr<bool>` refers to a valid `bool`. We only return true if the
3103 /// `u8` value is 0 or 1, and both of these are valid values for `bool`.
3104 /// [3]
3105 ///
3106 /// [1] Per https://doc.rust-lang.org/reference/type-layout.html#primitive-data-layout:
3107 ///
3108 /// The size of most primitives is given in this table.
3109 ///
3110 /// | Type | `size_of::<Type>() ` |
3111 /// |-----------|----------------------|
3112 /// | `bool` | 1 |
3113 /// | `u8`/`i8` | 1 |
3114 ///
3115 /// [2] Per https://doc.rust-lang.org/reference/type-layout.html#size-and-alignment:
3116 ///
3117 /// The size of a value is always a multiple of its alignment.
3118 ///
3119 /// [3] Per https://doc.rust-lang.org/reference/types/boolean.html:
3120 ///
3121 /// The value false has the bit pattern 0x00 and the value true has the
3122 /// bit pattern 0x01.
3123 unsafe_impl!(bool: TryFromBytes; |byte: &u8| *byte < 2);
3124}
3125safety_comment! {
3126 /// SAFETY:
3127 /// - `FromZeroes`: Per reference [1], "[a] value of type char is a Unicode
3128 /// scalar value (i.e. a code point that is not a surrogate), represented
3129 /// as a 32-bit unsigned word in the 0x0000 to 0xD7FF or 0xE000 to
3130 /// 0x10FFFF range" which contains 0x0000.
3131 /// - `AsBytes`: `char` is per reference [1] "represented as a 32-bit
3132 /// unsigned word" (`u32`) which is `AsBytes`. Note that unlike `u32`, not
3133 /// all bit patterns are valid for `char`.
3134 ///
3135 /// [1] https://doc.rust-lang.org/reference/types/textual.html
3136 unsafe_impl!(char: FromZeroes, AsBytes);
3137 /// SAFETY:
3138 /// - The safety requirements for `unsafe_impl!` with an `is_bit_valid`
3139 /// closure:
3140 /// - Given `t: *mut char` and `let r = *mut u32`, `r` refers to an object
3141 /// of the same size as that referred to by `t`. This is true because
3142 /// `char` and `u32` have the same size [1].
3143 /// - Since the closure takes a `&u32` argument, given a `Ptr<'a, char>`
3144 /// which satisfies the preconditions of
3145 /// `TryFromBytes::<char>::is_bit_valid`, it must be guaranteed that the
3146 /// memory referenced by that `Ptr` always contains a valid `u32`. Since
3147 /// `char`'s bytes are always initialized [2], `is_bit_valid`'s
3148 /// precondition requires that the same is true of its argument. Since
3149 /// `u32`'s only bit validity invariant is that its bytes must be
3150 /// initialized, this memory is guaranteed to contain a valid `u32`.
3151 /// - The alignment of `char` is equal to the alignment of `u32`. [1]
3152 /// - The impl must only return `true` for its argument if the original
3153 /// `Ptr<char>` refers to a valid `char`. `char::from_u32` guarantees
3154 /// that it returns `None` if its input is not a valid `char`. [3]
3155 ///
3156 /// [1] Per https://doc.rust-lang.org/nightly/reference/types/textual.html#layout-and-bit-validity:
3157 ///
3158 /// `char` is guaranteed to have the same size and alignment as `u32` on
3159 /// all platforms.
3160 ///
3161 /// [2] Per https://doc.rust-lang.org/core/primitive.char.html#method.from_u32:
3162 ///
3163 /// Every byte of a `char` is guaranteed to be initialized.
3164 ///
3165 /// [3] Per https://doc.rust-lang.org/core/primitive.char.html#method.from_u32:
3166 ///
3167 /// `from_u32()` will return `None` if the input is not a valid value for
3168 /// a `char`.
3169 unsafe_impl!(char: TryFromBytes; |candidate: &u32| char::from_u32(*candidate).is_some());
3170}
3171safety_comment! {
3172 /// SAFETY:
3173 /// - `FromZeroes`, `AsBytes`, `Unaligned`: Per the reference [1], `str`
3174 /// has the same layout as `[u8]`, and `[u8]` is `FromZeroes`, `AsBytes`,
3175 /// and `Unaligned`.
3176 ///
3177 /// Note that we don't `assert_unaligned!(str)` because `assert_unaligned!`
3178 /// uses `align_of`, which only works for `Sized` types.
3179 ///
3180 /// TODO(#429): Add quotes from documentation.
3181 ///
3182 /// [1] https://doc.rust-lang.org/reference/type-layout.html#str-layout
3183 unsafe_impl!(str: FromZeroes, AsBytes, Unaligned);
3184 /// SAFETY:
3185 /// - The safety requirements for `unsafe_impl!` with an `is_bit_valid`
3186 /// closure:
3187 /// - Given `t: *mut str` and `let r = *mut [u8]`, `r` refers to an object
3188 /// of the same size as that referred to by `t`. This is true because
3189 /// `str` and `[u8]` have the same representation. [1]
3190 /// - Since the closure takes a `&[u8]` argument, given a `Ptr<'a, str>`
3191 /// which satisfies the preconditions of
3192 /// `TryFromBytes::<str>::is_bit_valid`, it must be guaranteed that the
3193 /// memory referenced by that `Ptr` always contains a valid `[u8]`.
3194 /// Since `str`'s bytes are always initialized [1], `is_bit_valid`'s
3195 /// precondition requires that the same is true of its argument. Since
3196 /// `[u8]`'s only bit validity invariant is that its bytes must be
3197 /// initialized, this memory is guaranteed to contain a valid `[u8]`.
3198 /// - The alignment of `str` is equal to the alignment of `[u8]`. [1]
3199 /// - The impl must only return `true` for its argument if the original
3200 /// `Ptr<str>` refers to a valid `str`. `str::from_utf8` guarantees that
3201 /// it returns `Err` if its input is not a valid `str`. [2]
3202 ///
3203 /// [1] Per https://doc.rust-lang.org/reference/types/textual.html:
3204 ///
3205 /// A value of type `str` is represented the same was as `[u8]`.
3206 ///
3207 /// [2] Per https://doc.rust-lang.org/core/str/fn.from_utf8.html#errors:
3208 ///
3209 /// Returns `Err` if the slice is not UTF-8.
3210 unsafe_impl!(str: TryFromBytes; |candidate: &[u8]| core::str::from_utf8(candidate).is_ok());
3211}
3212
3213safety_comment! {
3214 // `NonZeroXxx` is `AsBytes`, but not `FromZeroes` or `FromBytes`.
3215 //
3216 /// SAFETY:
3217 /// - `AsBytes`: `NonZeroXxx` has the same layout as its associated
3218 /// primitive. Since it is the same size, this guarantees it has no
3219 /// padding - integers have no padding, and there's no room for padding
3220 /// if it can represent all of the same values except 0.
3221 /// - `Unaligned`: `NonZeroU8` and `NonZeroI8` document that
3222 /// `Option<NonZeroU8>` and `Option<NonZeroI8>` both have size 1. [1] [2]
3223 /// This is worded in a way that makes it unclear whether it's meant as a
3224 /// guarantee, but given the purpose of those types, it's virtually
3225 /// unthinkable that that would ever change. `Option` cannot be smaller
3226 /// than its contained type, which implies that, and `NonZeroX8` are of
3227 /// size 1 or 0. `NonZeroX8` can represent multiple states, so they cannot
3228 /// be 0 bytes, which means that they must be 1 byte. The only valid
3229 /// alignment for a 1-byte type is 1.
3230 ///
3231 /// TODO(#429): Add quotes from documentation.
3232 ///
3233 /// [1] https://doc.rust-lang.org/stable/std/num/struct.NonZeroU8.html
3234 /// [2] https://doc.rust-lang.org/stable/std/num/struct.NonZeroI8.html
3235 /// TODO(https://github.com/rust-lang/rust/pull/104082): Cite documentation
3236 /// that layout is the same as primitive layout.
3237 unsafe_impl!(NonZeroU8: AsBytes, Unaligned);
3238 unsafe_impl!(NonZeroI8: AsBytes, Unaligned);
3239 assert_unaligned!(NonZeroU8, NonZeroI8);
3240 unsafe_impl!(NonZeroU16: AsBytes);
3241 unsafe_impl!(NonZeroI16: AsBytes);
3242 unsafe_impl!(NonZeroU32: AsBytes);
3243 unsafe_impl!(NonZeroI32: AsBytes);
3244 unsafe_impl!(NonZeroU64: AsBytes);
3245 unsafe_impl!(NonZeroI64: AsBytes);
3246 unsafe_impl!(NonZeroU128: AsBytes);
3247 unsafe_impl!(NonZeroI128: AsBytes);
3248 unsafe_impl!(NonZeroUsize: AsBytes);
3249 unsafe_impl!(NonZeroIsize: AsBytes);
3250 /// SAFETY:
3251 /// - The safety requirements for `unsafe_impl!` with an `is_bit_valid`
3252 /// closure:
3253 /// - Given `t: *mut NonZeroXxx` and `let r = *mut xxx`, `r` refers to an
3254 /// object of the same size as that referred to by `t`. This is true
3255 /// because `NonZeroXxx` and `xxx` have the same size. [1]
3256 /// - Since the closure takes a `&xxx` argument, given a `Ptr<'a,
3257 /// NonZeroXxx>` which satisfies the preconditions of
3258 /// `TryFromBytes::<NonZeroXxx>::is_bit_valid`, it must be guaranteed
3259 /// that the memory referenced by that `Ptr` always contains a valid
3260 /// `xxx`. Since `NonZeroXxx`'s bytes are always initialized [1],
3261 /// `is_bit_valid`'s precondition requires that the same is true of its
3262 /// argument. Since `xxx`'s only bit validity invariant is that its
3263 /// bytes must be initialized, this memory is guaranteed to contain a
3264 /// valid `xxx`.
3265 /// - The alignment of `NonZeroXxx` is equal to the alignment of `xxx`.
3266 /// [1]
3267 /// - The impl must only return `true` for its argument if the original
3268 /// `Ptr<NonZeroXxx>` refers to a valid `NonZeroXxx`. The only `xxx`
3269 /// which is not also a valid `NonZeroXxx` is 0. [1]
3270 ///
3271 /// [1] Per https://doc.rust-lang.org/core/num/struct.NonZeroU16.html:
3272 ///
3273 /// `NonZeroU16` is guaranteed to have the same layout and bit validity as
3274 /// `u16` with the exception that `0` is not a valid instance.
3275 unsafe_impl!(NonZeroU8: TryFromBytes; |n: &u8| *n != 0);
3276 unsafe_impl!(NonZeroI8: TryFromBytes; |n: &i8| *n != 0);
3277 unsafe_impl!(NonZeroU16: TryFromBytes; |n: &u16| *n != 0);
3278 unsafe_impl!(NonZeroI16: TryFromBytes; |n: &i16| *n != 0);
3279 unsafe_impl!(NonZeroU32: TryFromBytes; |n: &u32| *n != 0);
3280 unsafe_impl!(NonZeroI32: TryFromBytes; |n: &i32| *n != 0);
3281 unsafe_impl!(NonZeroU64: TryFromBytes; |n: &u64| *n != 0);
3282 unsafe_impl!(NonZeroI64: TryFromBytes; |n: &i64| *n != 0);
3283 unsafe_impl!(NonZeroU128: TryFromBytes; |n: &u128| *n != 0);
3284 unsafe_impl!(NonZeroI128: TryFromBytes; |n: &i128| *n != 0);
3285 unsafe_impl!(NonZeroUsize: TryFromBytes; |n: &usize| *n != 0);
3286 unsafe_impl!(NonZeroIsize: TryFromBytes; |n: &isize| *n != 0);
3287}
3288safety_comment! {
3289 /// SAFETY:
3290 /// - `TryFromBytes` (with no validator), `FromZeroes`, `FromBytes`,
3291 /// `AsBytes`: The Rust compiler reuses `0` value to represent `None`, so
3292 /// `size_of::<Option<NonZeroXxx>>() == size_of::<xxx>()`; see
3293 /// `NonZeroXxx` documentation.
3294 /// - `Unaligned`: `NonZeroU8` and `NonZeroI8` document that
3295 /// `Option<NonZeroU8>` and `Option<NonZeroI8>` both have size 1. [1] [2]
3296 /// This is worded in a way that makes it unclear whether it's meant as a
3297 /// guarantee, but given the purpose of those types, it's virtually
3298 /// unthinkable that that would ever change. The only valid alignment for
3299 /// a 1-byte type is 1.
3300 ///
3301 /// TODO(#429): Add quotes from documentation.
3302 ///
3303 /// [1] https://doc.rust-lang.org/stable/std/num/struct.NonZeroU8.html
3304 /// [2] https://doc.rust-lang.org/stable/std/num/struct.NonZeroI8.html
3305 ///
3306 /// TODO(https://github.com/rust-lang/rust/pull/104082): Cite documentation
3307 /// for layout guarantees.
3308 unsafe_impl!(Option<NonZeroU8>: TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
3309 unsafe_impl!(Option<NonZeroI8>: TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
3310 assert_unaligned!(Option<NonZeroU8>, Option<NonZeroI8>);
3311 unsafe_impl!(Option<NonZeroU16>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3312 unsafe_impl!(Option<NonZeroI16>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3313 unsafe_impl!(Option<NonZeroU32>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3314 unsafe_impl!(Option<NonZeroI32>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3315 unsafe_impl!(Option<NonZeroU64>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3316 unsafe_impl!(Option<NonZeroI64>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3317 unsafe_impl!(Option<NonZeroU128>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3318 unsafe_impl!(Option<NonZeroI128>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3319 unsafe_impl!(Option<NonZeroUsize>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3320 unsafe_impl!(Option<NonZeroIsize>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3321}
3322
3323safety_comment! {
3324 /// SAFETY:
3325 /// The following types can be transmuted from `[0u8; size_of::<T>()]`. [1]
3326 /// None of them contain `UnsafeCell`s, and so they all soundly implement
3327 /// `FromZeroes`.
3328 ///
3329 /// [1] Per
3330 /// https://doc.rust-lang.org/nightly/core/option/index.html#representation:
3331 ///
3332 /// Rust guarantees to optimize the following types `T` such that
3333 /// [`Option<T>`] has the same size and alignment as `T`. In some of these
3334 /// cases, Rust further guarantees that `transmute::<_, Option<T>>([0u8;
3335 /// size_of::<T>()])` is sound and produces `Option::<T>::None`. These
3336 /// cases are identified by the second column:
3337 ///
3338 /// | `T` | `transmute::<_, Option<T>>([0u8; size_of::<T>()])` sound? |
3339 /// |-----------------------|-----------------------------------------------------------|
3340 /// | [`Box<U>`] | when `U: Sized` |
3341 /// | `&U` | when `U: Sized` |
3342 /// | `&mut U` | when `U: Sized` |
3343 /// | [`ptr::NonNull<U>`] | when `U: Sized` |
3344 /// | `fn`, `extern "C" fn` | always |
3345 ///
3346 /// TODO(#429), TODO(https://github.com/rust-lang/rust/pull/115333): Cite
3347 /// the Stable docs once they're available.
3348 #[cfg(feature = "alloc")]
3349 unsafe_impl!(
3350 #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3351 T => FromZeroes for Option<Box<T>>
3352 );
3353 unsafe_impl!(T => FromZeroes for Option<&'_ T>);
3354 unsafe_impl!(T => FromZeroes for Option<&'_ mut T>);
3355 unsafe_impl!(T => FromZeroes for Option<NonNull<T>>);
3356 unsafe_impl_for_power_set!(A, B, C, D, E, F, G, H, I, J, K, L -> M => FromZeroes for opt_fn!(...));
3357 unsafe_impl_for_power_set!(A, B, C, D, E, F, G, H, I, J, K, L -> M => FromZeroes for opt_extern_c_fn!(...));
3358}
3359
3360safety_comment! {
3361 /// SAFETY:
3362 /// Per reference [1]:
3363 /// "For all T, the following are guaranteed:
3364 /// size_of::<PhantomData<T>>() == 0
3365 /// align_of::<PhantomData<T>>() == 1".
3366 /// This gives:
3367 /// - `TryFromBytes` (with no validator), `FromZeroes`, `FromBytes`: There
3368 /// is only one possible sequence of 0 bytes, and `PhantomData` is
3369 /// inhabited.
3370 /// - `AsBytes`: Since `PhantomData` has size 0, it contains no padding
3371 /// bytes.
3372 /// - `Unaligned`: Per the preceding reference, `PhantomData` has alignment
3373 /// 1.
3374 ///
3375 /// [1] https://doc.rust-lang.org/std/marker/struct.PhantomData.html#layout-1
3376 unsafe_impl!(T: ?Sized => TryFromBytes for PhantomData<T>);
3377 unsafe_impl!(T: ?Sized => FromZeroes for PhantomData<T>);
3378 unsafe_impl!(T: ?Sized => FromBytes for PhantomData<T>);
3379 unsafe_impl!(T: ?Sized => AsBytes for PhantomData<T>);
3380 unsafe_impl!(T: ?Sized => Unaligned for PhantomData<T>);
3381 assert_unaligned!(PhantomData<()>, PhantomData<u8>, PhantomData<u64>);
3382}
3383safety_comment! {
3384 /// SAFETY:
3385 /// `Wrapping<T>` is guaranteed by its docs [1] to have the same layout and
3386 /// bit validity as `T`. Also, `Wrapping<T>` is `#[repr(transparent)]`, and
3387 /// has a single field, which is `pub`. Per the reference [2], this means
3388 /// that the `#[repr(transparent)]` attribute is "considered part of the
3389 /// public ABI".
3390 ///
3391 /// - `TryFromBytes`: The safety requirements for `unsafe_impl!` with an
3392 /// `is_bit_valid` closure:
3393 /// - Given `t: *mut Wrapping<T>` and `let r = *mut T`, `r` refers to an
3394 /// object of the same size as that referred to by `t`. This is true
3395 /// because `Wrapping<T>` and `T` have the same layout
3396 /// - The alignment of `Wrapping<T>` is equal to the alignment of `T`.
3397 /// - The impl must only return `true` for its argument if the original
3398 /// `Ptr<Wrapping<T>>` refers to a valid `Wrapping<T>`. Since
3399 /// `Wrapping<T>` has the same bit validity as `T`, and since our impl
3400 /// just calls `T::is_bit_valid`, our impl returns `true` exactly when
3401 /// its argument contains a valid `Wrapping<T>`.
3402 /// - `FromBytes`: Since `Wrapping<T>` has the same bit validity as `T`, if
3403 /// `T: FromBytes`, then all initialized byte sequences are valid
3404 /// instances of `Wrapping<T>`. Similarly, if `T: FromBytes`, then
3405 /// `Wrapping<T>` doesn't contain any `UnsafeCell`s. Thus, `impl FromBytes
3406 /// for Wrapping<T> where T: FromBytes` is a sound impl.
3407 /// - `AsBytes`: Since `Wrapping<T>` has the same bit validity as `T`, if
3408 /// `T: AsBytes`, then all valid instances of `Wrapping<T>` have all of
3409 /// their bytes initialized. Similarly, if `T: AsBytes`, then
3410 /// `Wrapping<T>` doesn't contain any `UnsafeCell`s. Thus, `impl AsBytes
3411 /// for Wrapping<T> where T: AsBytes` is a valid impl.
3412 /// - `Unaligned`: Since `Wrapping<T>` has the same layout as `T`,
3413 /// `Wrapping<T>` has alignment 1 exactly when `T` does.
3414 ///
3415 /// [1] Per https://doc.rust-lang.org/core/num/struct.NonZeroU16.html:
3416 ///
3417 /// `NonZeroU16` is guaranteed to have the same layout and bit validity as
3418 /// `u16` with the exception that `0` is not a valid instance.
3419 ///
3420 /// TODO(#429): Add quotes from documentation.
3421 ///
3422 /// [1] TODO(https://doc.rust-lang.org/nightly/core/num/struct.Wrapping.html#layout-1):
3423 /// Reference this documentation once it's available on stable.
3424 ///
3425 /// [2] https://doc.rust-lang.org/nomicon/other-reprs.html#reprtransparent
3426 unsafe_impl!(T: TryFromBytes => TryFromBytes for Wrapping<T>; |candidate: Ptr<T>| {
3427 // SAFETY:
3428 // - Since `T` and `Wrapping<T>` have the same layout and bit validity
3429 // and contain the same fields, `T` contains `UnsafeCell`s exactly
3430 // where `Wrapping<T>` does. Thus, all memory and `UnsafeCell`
3431 // preconditions of `T::is_bit_valid` hold exactly when the same
3432 // preconditions for `Wrapping<T>::is_bit_valid` hold.
3433 // - By the same token, since `candidate` is guaranteed to have its
3434 // bytes initialized where there are always initialized bytes in
3435 // `Wrapping<T>`, the same is true for `T`.
3436 unsafe { T::is_bit_valid(candidate) }
3437 });
3438 unsafe_impl!(T: FromZeroes => FromZeroes for Wrapping<T>);
3439 unsafe_impl!(T: FromBytes => FromBytes for Wrapping<T>);
3440 unsafe_impl!(T: AsBytes => AsBytes for Wrapping<T>);
3441 unsafe_impl!(T: Unaligned => Unaligned for Wrapping<T>);
3442 assert_unaligned!(Wrapping<()>, Wrapping<u8>);
3443}
3444safety_comment! {
3445 // `MaybeUninit<T>` is `FromZeroes` and `FromBytes`, but never `AsBytes`
3446 // since it may contain uninitialized bytes.
3447 //
3448 /// SAFETY:
3449 /// - `TryFromBytes` (with no validator), `FromZeroes`, `FromBytes`:
3450 /// `MaybeUninit<T>` has no restrictions on its contents. Unfortunately,
3451 /// in addition to bit validity, `TryFromBytes`, `FromZeroes` and
3452 /// `FromBytes` also require that implementers contain no `UnsafeCell`s.
3453 /// Thus, we require `T: Trait` in order to ensure that `T` - and thus
3454 /// `MaybeUninit<T>` - contains to `UnsafeCell`s. Thus, requiring that `T`
3455 /// implement each of these traits is sufficient.
3456 /// - `Unaligned`: "MaybeUninit<T> is guaranteed to have the same size,
3457 /// alignment, and ABI as T" [1]
3458 ///
3459 /// [1] https://doc.rust-lang.org/stable/core/mem/union.MaybeUninit.html#layout-1
3460 ///
3461 /// TODO(https://github.com/google/zerocopy/issues/251): If we split
3462 /// `FromBytes` and `RefFromBytes`, or if we introduce a separate
3463 /// `NoCell`/`Freeze` trait, we can relax the trait bounds for `FromZeroes`
3464 /// and `FromBytes`.
3465 unsafe_impl!(T: TryFromBytes => TryFromBytes for MaybeUninit<T>);
3466 unsafe_impl!(T: FromZeroes => FromZeroes for MaybeUninit<T>);
3467 unsafe_impl!(T: FromBytes => FromBytes for MaybeUninit<T>);
3468 unsafe_impl!(T: Unaligned => Unaligned for MaybeUninit<T>);
3469 assert_unaligned!(MaybeUninit<()>, MaybeUninit<u8>);
3470}
3471safety_comment! {
3472 /// SAFETY:
3473 /// `ManuallyDrop` has the same layout and bit validity as `T` [1], and
3474 /// accessing the inner value is safe (meaning that it's unsound to leave
3475 /// the inner value uninitialized while exposing the `ManuallyDrop` to safe
3476 /// code).
3477 /// - `FromZeroes`, `FromBytes`: Since it has the same layout as `T`, any
3478 /// valid `T` is a valid `ManuallyDrop<T>`. If `T: FromZeroes`, a sequence
3479 /// of zero bytes is a valid `T`, and thus a valid `ManuallyDrop<T>`. If
3480 /// `T: FromBytes`, any sequence of bytes is a valid `T`, and thus a valid
3481 /// `ManuallyDrop<T>`.
3482 /// - `AsBytes`: Since it has the same layout as `T`, and since it's unsound
3483 /// to let safe code access a `ManuallyDrop` whose inner value is
3484 /// uninitialized, safe code can only ever access a `ManuallyDrop` whose
3485 /// contents are a valid `T`. Since `T: AsBytes`, this means that safe
3486 /// code can only ever access a `ManuallyDrop` with all initialized bytes.
3487 /// - `Unaligned`: `ManuallyDrop` has the same layout (and thus alignment)
3488 /// as `T`, and `T: Unaligned` guarantees that that alignment is 1.
3489 ///
3490 /// `ManuallyDrop<T>` is guaranteed to have the same layout and bit
3491 /// validity as `T`
3492 ///
3493 /// [1] Per https://doc.rust-lang.org/nightly/core/mem/struct.ManuallyDrop.html:
3494 ///
3495 /// TODO(#429):
3496 /// - Add quotes from docs.
3497 /// - Once [1] (added in
3498 /// https://github.com/rust-lang/rust/pull/115522) is available on stable,
3499 /// quote the stable docs instead of the nightly docs.
3500 unsafe_impl!(T: ?Sized + FromZeroes => FromZeroes for ManuallyDrop<T>);
3501 unsafe_impl!(T: ?Sized + FromBytes => FromBytes for ManuallyDrop<T>);
3502 unsafe_impl!(T: ?Sized + AsBytes => AsBytes for ManuallyDrop<T>);
3503 unsafe_impl!(T: ?Sized + Unaligned => Unaligned for ManuallyDrop<T>);
3504 assert_unaligned!(ManuallyDrop<()>, ManuallyDrop<u8>);
3505}
3506safety_comment! {
3507 /// SAFETY:
3508 /// Per the reference [1]:
3509 ///
3510 /// An array of `[T; N]` has a size of `size_of::<T>() * N` and the same
3511 /// alignment of `T`. Arrays are laid out so that the zero-based `nth`
3512 /// element of the array is offset from the start of the array by `n *
3513 /// size_of::<T>()` bytes.
3514 ///
3515 /// ...
3516 ///
3517 /// Slices have the same layout as the section of the array they slice.
3518 ///
3519 /// In other words, the layout of a `[T]` or `[T; N]` is a sequence of `T`s
3520 /// laid out back-to-back with no bytes in between. Therefore, `[T]` or `[T;
3521 /// N]` are `TryFromBytes`, `FromZeroes`, `FromBytes`, and `AsBytes` if `T`
3522 /// is (respectively). Furthermore, since an array/slice has "the same
3523 /// alignment of `T`", `[T]` and `[T; N]` are `Unaligned` if `T` is.
3524 ///
3525 /// Note that we don't `assert_unaligned!` for slice types because
3526 /// `assert_unaligned!` uses `align_of`, which only works for `Sized` types.
3527 ///
3528 /// [1] https://doc.rust-lang.org/reference/type-layout.html#array-layout
3529 unsafe_impl!(const N: usize, T: FromZeroes => FromZeroes for [T; N]);
3530 unsafe_impl!(const N: usize, T: FromBytes => FromBytes for [T; N]);
3531 unsafe_impl!(const N: usize, T: AsBytes => AsBytes for [T; N]);
3532 unsafe_impl!(const N: usize, T: Unaligned => Unaligned for [T; N]);
3533 assert_unaligned!([(); 0], [(); 1], [u8; 0], [u8; 1]);
3534 unsafe_impl!(T: TryFromBytes => TryFromBytes for [T]; |c: Ptr<[T]>| {
3535 // SAFETY: Assuming the preconditions of `is_bit_valid` are satisfied,
3536 // so too will the postcondition: that, if `is_bit_valid(candidate)`
3537 // returns true, `*candidate` contains a valid `Self`. Per the reference
3538 // [1]:
3539 //
3540 // An array of `[T; N]` has a size of `size_of::<T>() * N` and the
3541 // same alignment of `T`. Arrays are laid out so that the zero-based
3542 // `nth` element of the array is offset from the start of the array by
3543 // `n * size_of::<T>()` bytes.
3544 //
3545 // ...
3546 //
3547 // Slices have the same layout as the section of the array they slice.
3548 //
3549 // In other words, the layout of a `[T] is a sequence of `T`s laid out
3550 // back-to-back with no bytes in between. If all elements in `candidate`
3551 // are `is_bit_valid`, so too is `candidate`.
3552 //
3553 // Note that any of the below calls may panic, but it would still be
3554 // sound even if it did. `is_bit_valid` does not promise that it will
3555 // not panic (in fact, it explicitly warns that it's a possibility), and
3556 // we have not violated any safety invariants that we must fix before
3557 // returning.
3558 c.iter().all(|elem|
3559 // SAFETY: We uphold the safety contract of `is_bit_valid(elem)`, by
3560 // precondition on the surrounding call to `is_bit_valid`. The
3561 // memory referenced by `elem` is contained entirely within `c`, and
3562 // satisfies the preconditions satisfied by `c`. By axiom, we assume
3563 // that `Iterator:all` does not invalidate these preconditions
3564 // (e.g., by writing to `elem`.) Since `elem` is derived from `c`,
3565 // it is only possible for uninitialized bytes to occur in `elem` at
3566 // the same bytes they occur within `c`.
3567 unsafe { <T as TryFromBytes>::is_bit_valid(elem) }
3568 )
3569 });
3570 unsafe_impl!(T: FromZeroes => FromZeroes for [T]);
3571 unsafe_impl!(T: FromBytes => FromBytes for [T]);
3572 unsafe_impl!(T: AsBytes => AsBytes for [T]);
3573 unsafe_impl!(T: Unaligned => Unaligned for [T]);
3574}
3575safety_comment! {
3576 /// SAFETY:
3577 /// - `FromZeroes`: For thin pointers (note that `T: Sized`), the zero
3578 /// pointer is considered "null". [1] No operations which require
3579 /// provenance are legal on null pointers, so this is not a footgun.
3580 ///
3581 /// NOTE(#170): Implementing `FromBytes` and `AsBytes` for raw pointers
3582 /// would be sound, but carries provenance footguns. We want to support
3583 /// `FromBytes` and `AsBytes` for raw pointers eventually, but we are
3584 /// holding off until we can figure out how to address those footguns.
3585 ///
3586 /// [1] TODO(https://github.com/rust-lang/rust/pull/116988): Cite the
3587 /// documentation once this PR lands.
3588 unsafe_impl!(T => FromZeroes for *const T);
3589 unsafe_impl!(T => FromZeroes for *mut T);
3590}
3591
3592// SIMD support
3593//
3594// Per the Unsafe Code Guidelines Reference [1]:
3595//
3596// Packed SIMD vector types are `repr(simd)` homogeneous tuple-structs
3597// containing `N` elements of type `T` where `N` is a power-of-two and the
3598// size and alignment requirements of `T` are equal:
3599//
3600// ```rust
3601// #[repr(simd)]
3602// struct Vector<T, N>(T_0, ..., T_(N - 1));
3603// ```
3604//
3605// ...
3606//
3607// The size of `Vector` is `N * size_of::<T>()` and its alignment is an
3608// implementation-defined function of `T` and `N` greater than or equal to
3609// `align_of::<T>()`.
3610//
3611// ...
3612//
3613// Vector elements are laid out in source field order, enabling random access
3614// to vector elements by reinterpreting the vector as an array:
3615//
3616// ```rust
3617// union U {
3618// vec: Vector<T, N>,
3619// arr: [T; N]
3620// }
3621//
3622// assert_eq!(size_of::<Vector<T, N>>(), size_of::<[T; N]>());
3623// assert!(align_of::<Vector<T, N>>() >= align_of::<[T; N]>());
3624//
3625// unsafe {
3626// let u = U { vec: Vector<T, N>(t_0, ..., t_(N - 1)) };
3627//
3628// assert_eq!(u.vec.0, u.arr[0]);
3629// // ...
3630// assert_eq!(u.vec.(N - 1), u.arr[N - 1]);
3631// }
3632// ```
3633//
3634// Given this background, we can observe that:
3635// - The size and bit pattern requirements of a SIMD type are equivalent to the
3636// equivalent array type. Thus, for any SIMD type whose primitive `T` is
3637// `TryFromBytes`, `FromZeroes`, `FromBytes`, or `AsBytes`, that SIMD type is
3638// also `TryFromBytes`, `FromZeroes`, `FromBytes`, or `AsBytes` respectively.
3639// - Since no upper bound is placed on the alignment, no SIMD type can be
3640// guaranteed to be `Unaligned`.
3641//
3642// Also per [1]:
3643//
3644// This chapter represents the consensus from issue #38. The statements in
3645// here are not (yet) "guaranteed" not to change until an RFC ratifies them.
3646//
3647// See issue #38 [2]. While this behavior is not technically guaranteed, the
3648// likelihood that the behavior will change such that SIMD types are no longer
3649// `TryFromBytes`, `FromZeroes`, `FromBytes`, or `AsBytes` is next to zero, as
3650// that would defeat the entire purpose of SIMD types. Nonetheless, we put this
3651// behavior behind the `simd` Cargo feature, which requires consumers to opt
3652// into this stability hazard.
3653//
3654// [1] https://rust-lang.github.io/unsafe-code-guidelines/layout/packed-simd-vectors.html
3655// [2] https://github.com/rust-lang/unsafe-code-guidelines/issues/38
3656#[cfg(feature = "simd")]
3657#[cfg_attr(doc_cfg, doc(cfg(feature = "simd")))]
3658mod simd {
3659 /// Defines a module which implements `TryFromBytes`, `FromZeroes`,
3660 /// `FromBytes`, and `AsBytes` for a set of types from a module in
3661 /// `core::arch`.
3662 ///
3663 /// `$arch` is both the name of the defined module and the name of the
3664 /// module in `core::arch`, and `$typ` is the list of items from that module
3665 /// to implement `FromZeroes`, `FromBytes`, and `AsBytes` for.
3666 #[allow(unused_macros)] // `allow(unused_macros)` is needed because some
3667 // target/feature combinations don't emit any impls
3668 // and thus don't use this macro.
3669 macro_rules! simd_arch_mod {
3670 (#[cfg $cfg:tt] $arch:ident, $mod:ident, $($typ:ident),*) => {
3671 #[cfg $cfg]
3672 #[cfg_attr(doc_cfg, doc(cfg $cfg))]
3673 mod $mod {
3674 use core::arch::$arch::{$($typ),*};
3675
3676 use crate::*;
3677 impl_known_layout!($($typ),*);
3678 safety_comment! {
3679 /// SAFETY:
3680 /// See comment on module definition for justification.
3681 $( unsafe_impl!($typ: TryFromBytes, FromZeroes, FromBytes, AsBytes); )*
3682 }
3683 }
3684 };
3685 }
3686
3687 #[rustfmt::skip]
3688 const _: () = {
3689 simd_arch_mod!(
3690 #[cfg(target_arch = "x86")]
3691 x86, x86, __m128, __m128d, __m128i, __m256, __m256d, __m256i
3692 );
3693 simd_arch_mod!(
3694 #[cfg(all(feature = "simd-nightly", target_arch = "x86"))]
3695 x86, x86_nightly, __m512bh, __m512, __m512d, __m512i
3696 );
3697 simd_arch_mod!(
3698 #[cfg(target_arch = "x86_64")]
3699 x86_64, x86_64, __m128, __m128d, __m128i, __m256, __m256d, __m256i
3700 );
3701 simd_arch_mod!(
3702 #[cfg(all(feature = "simd-nightly", target_arch = "x86_64"))]
3703 x86_64, x86_64_nightly, __m512bh, __m512, __m512d, __m512i
3704 );
3705 simd_arch_mod!(
3706 #[cfg(target_arch = "wasm32")]
3707 wasm32, wasm32, v128
3708 );
3709 simd_arch_mod!(
3710 #[cfg(all(feature = "simd-nightly", target_arch = "powerpc"))]
3711 powerpc, powerpc, vector_bool_long, vector_double, vector_signed_long, vector_unsigned_long
3712 );
3713 simd_arch_mod!(
3714 #[cfg(all(feature = "simd-nightly", target_arch = "powerpc64"))]
3715 powerpc64, powerpc64, vector_bool_long, vector_double, vector_signed_long, vector_unsigned_long
3716 );
3717 simd_arch_mod!(
3718 #[cfg(target_arch = "aarch64")]
3719 aarch64, aarch64, float32x2_t, float32x4_t, float64x1_t, float64x2_t, int8x8_t, int8x8x2_t,
3720 int8x8x3_t, int8x8x4_t, int8x16_t, int8x16x2_t, int8x16x3_t, int8x16x4_t, int16x4_t,
3721 int16x8_t, int32x2_t, int32x4_t, int64x1_t, int64x2_t, poly8x8_t, poly8x8x2_t, poly8x8x3_t,
3722 poly8x8x4_t, poly8x16_t, poly8x16x2_t, poly8x16x3_t, poly8x16x4_t, poly16x4_t, poly16x8_t,
3723 poly64x1_t, poly64x2_t, uint8x8_t, uint8x8x2_t, uint8x8x3_t, uint8x8x4_t, uint8x16_t,
3724 uint8x16x2_t, uint8x16x3_t, uint8x16x4_t, uint16x4_t, uint16x8_t, uint32x2_t, uint32x4_t,
3725 uint64x1_t, uint64x2_t
3726 );
3727 simd_arch_mod!(
3728 #[cfg(all(feature = "simd-nightly", target_arch = "arm"))]
3729 arm, arm, int8x4_t, uint8x4_t
3730 );
3731 };
3732}
3733
3734/// Safely transmutes a value of one type to a value of another type of the same
3735/// size.
3736///
3737/// The expression `$e` must have a concrete type, `T`, which implements
3738/// `AsBytes`. The `transmute!` expression must also have a concrete type, `U`
3739/// (`U` is inferred from the calling context), and `U` must implement
3740/// `FromBytes`.
3741///
3742/// Note that the `T` produced by the expression `$e` will *not* be dropped.
3743/// Semantically, its bits will be copied into a new value of type `U`, the
3744/// original `T` will be forgotten, and the value of type `U` will be returned.
3745///
3746/// # Examples
3747///
3748/// ```
3749/// # use zerocopy::transmute;
3750/// let one_dimensional: [u8; 8] = [0, 1, 2, 3, 4, 5, 6, 7];
3751///
3752/// let two_dimensional: [[u8; 4]; 2] = transmute!(one_dimensional);
3753///
3754/// assert_eq!(two_dimensional, [[0, 1, 2, 3], [4, 5, 6, 7]]);
3755/// ```
3756#[macro_export]
3757macro_rules! transmute {
3758 ($e:expr) => {{
3759 // NOTE: This must be a macro (rather than a function with trait bounds)
3760 // because there's no way, in a generic context, to enforce that two
3761 // types have the same size. `core::mem::transmute` uses compiler magic
3762 // to enforce this so long as the types are concrete.
3763
3764 let e = $e;
3765 if false {
3766 // This branch, though never taken, ensures that the type of `e` is
3767 // `AsBytes` and that the type of this macro invocation expression
3768 // is `FromBytes`.
3769
3770 struct AssertIsAsBytes<T: $crate::AsBytes>(T);
3771 let _ = AssertIsAsBytes(e);
3772
3773 struct AssertIsFromBytes<U: $crate::FromBytes>(U);
3774 #[allow(unused, unreachable_code)]
3775 let u = AssertIsFromBytes(loop {});
3776 u.0
3777 } else {
3778 // SAFETY: `core::mem::transmute` ensures that the type of `e` and
3779 // the type of this macro invocation expression have the same size.
3780 // We know this transmute is safe thanks to the `AsBytes` and
3781 // `FromBytes` bounds enforced by the `false` branch.
3782 //
3783 // We use this reexport of `core::mem::transmute` because we know it
3784 // will always be available for crates which are using the 2015
3785 // edition of Rust. By contrast, if we were to use
3786 // `std::mem::transmute`, this macro would not work for such crates
3787 // in `no_std` contexts, and if we were to use
3788 // `core::mem::transmute`, this macro would not work in `std`
3789 // contexts in which `core` was not manually imported. This is not a
3790 // problem for 2018 edition crates.
3791 unsafe {
3792 // Clippy: It's okay to transmute a type to itself.
3793 #[allow(clippy::useless_transmute)]
3794 $crate::macro_util::core_reexport::mem::transmute(e)
3795 }
3796 }
3797 }}
3798}
3799
3800/// Safely transmutes a mutable or immutable reference of one type to an
3801/// immutable reference of another type of the same size.
3802///
3803/// The expression `$e` must have a concrete type, `&T` or `&mut T`, where `T:
3804/// Sized + AsBytes`. The `transmute_ref!` expression must also have a concrete
3805/// type, `&U` (`U` is inferred from the calling context), where `U: Sized +
3806/// FromBytes`. It must be the case that `align_of::<T>() >= align_of::<U>()`.
3807///
3808/// The lifetime of the input type, `&T` or `&mut T`, must be the same as or
3809/// outlive the lifetime of the output type, `&U`.
3810///
3811/// # Examples
3812///
3813/// ```
3814/// # use zerocopy::transmute_ref;
3815/// let one_dimensional: [u8; 8] = [0, 1, 2, 3, 4, 5, 6, 7];
3816///
3817/// let two_dimensional: &[[u8; 4]; 2] = transmute_ref!(&one_dimensional);
3818///
3819/// assert_eq!(two_dimensional, &[[0, 1, 2, 3], [4, 5, 6, 7]]);
3820/// ```
3821///
3822/// # Alignment increase error message
3823///
3824/// Because of limitations on macros, the error message generated when
3825/// `transmute_ref!` is used to transmute from a type of lower alignment to a
3826/// type of higher alignment is somewhat confusing. For example, the following
3827/// code:
3828///
3829/// ```compile_fail
3830/// const INCREASE_ALIGNMENT: &u16 = zerocopy::transmute_ref!(&[0u8; 2]);
3831/// ```
3832///
3833/// ...generates the following error:
3834///
3835/// ```text
3836/// error[E0512]: cannot transmute between types of different sizes, or dependently-sized types
3837/// --> src/lib.rs:1524:34
3838/// |
3839/// 5 | const INCREASE_ALIGNMENT: &u16 = zerocopy::transmute_ref!(&[0u8; 2]);
3840/// | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
3841/// |
3842/// = note: source type: `AlignOf<[u8; 2]>` (8 bits)
3843/// = note: target type: `MaxAlignsOf<[u8; 2], u16>` (16 bits)
3844/// = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info)
3845/// ```
3846///
3847/// This is saying that `max(align_of::<T>(), align_of::<U>()) !=
3848/// align_of::<T>()`, which is equivalent to `align_of::<T>() <
3849/// align_of::<U>()`.
3850#[macro_export]
3851macro_rules! transmute_ref {
3852 ($e:expr) => {{
3853 // NOTE: This must be a macro (rather than a function with trait bounds)
3854 // because there's no way, in a generic context, to enforce that two
3855 // types have the same size or alignment.
3856
3857 // Ensure that the source type is a reference or a mutable reference
3858 // (note that mutable references are implicitly reborrowed here).
3859 let e: &_ = $e;
3860
3861 #[allow(unused, clippy::diverging_sub_expression)]
3862 if false {
3863 // This branch, though never taken, ensures that the type of `e` is
3864 // `&T` where `T: 't + Sized + AsBytes`, that the type of this macro
3865 // expression is `&U` where `U: 'u + Sized + FromBytes`, and that
3866 // `'t` outlives `'u`.
3867
3868 struct AssertIsAsBytes<'a, T: ::core::marker::Sized + $crate::AsBytes>(&'a T);
3869 let _ = AssertIsAsBytes(e);
3870
3871 struct AssertIsFromBytes<'a, U: ::core::marker::Sized + $crate::FromBytes>(&'a U);
3872 #[allow(unused, unreachable_code)]
3873 let u = AssertIsFromBytes(loop {});
3874 u.0
3875 } else if false {
3876 // This branch, though never taken, ensures that `size_of::<T>() ==
3877 // size_of::<U>()` and that that `align_of::<T>() >=
3878 // align_of::<U>()`.
3879
3880 // `t` is inferred to have type `T` because it's assigned to `e` (of
3881 // type `&T`) as `&t`.
3882 let mut t = unreachable!();
3883 e = &t;
3884
3885 // `u` is inferred to have type `U` because it's used as `&u` as the
3886 // value returned from this branch.
3887 let u;
3888
3889 $crate::assert_size_eq!(t, u);
3890 $crate::assert_align_gt_eq!(t, u);
3891
3892 &u
3893 } else {
3894 // SAFETY: For source type `Src` and destination type `Dst`:
3895 // - We know that `Src: AsBytes` and `Dst: FromBytes` thanks to the
3896 // uses of `AssertIsAsBytes` and `AssertIsFromBytes` above.
3897 // - We know that `size_of::<Src>() == size_of::<Dst>()` thanks to
3898 // the use of `assert_size_eq!` above.
3899 // - We know that `align_of::<Src>() >= align_of::<Dst>()` thanks to
3900 // the use of `assert_align_gt_eq!` above.
3901 unsafe { $crate::macro_util::transmute_ref(e) }
3902 }
3903 }}
3904}
3905
3906/// Safely transmutes a mutable reference of one type to an mutable reference of
3907/// another type of the same size.
3908///
3909/// The expression `$e` must have a concrete type, `&mut T`, where `T: Sized +
3910/// AsBytes`. The `transmute_mut!` expression must also have a concrete type,
3911/// `&mut U` (`U` is inferred from the calling context), where `U: Sized +
3912/// FromBytes`. It must be the case that `align_of::<T>() >= align_of::<U>()`.
3913///
3914/// The lifetime of the input type, `&mut T`, must be the same as or outlive the
3915/// lifetime of the output type, `&mut U`.
3916///
3917/// # Examples
3918///
3919/// ```
3920/// # use zerocopy::transmute_mut;
3921/// let mut one_dimensional: [u8; 8] = [0, 1, 2, 3, 4, 5, 6, 7];
3922///
3923/// let two_dimensional: &mut [[u8; 4]; 2] = transmute_mut!(&mut one_dimensional);
3924///
3925/// assert_eq!(two_dimensional, &[[0, 1, 2, 3], [4, 5, 6, 7]]);
3926///
3927/// two_dimensional.reverse();
3928///
3929/// assert_eq!(one_dimensional, [4, 5, 6, 7, 0, 1, 2, 3]);
3930/// ```
3931///
3932/// # Alignment increase error message
3933///
3934/// Because of limitations on macros, the error message generated when
3935/// `transmute_mut!` is used to transmute from a type of lower alignment to a
3936/// type of higher alignment is somewhat confusing. For example, the following
3937/// code:
3938///
3939/// ```compile_fail
3940/// const INCREASE_ALIGNMENT: &mut u16 = zerocopy::transmute_mut!(&mut [0u8; 2]);
3941/// ```
3942///
3943/// ...generates the following error:
3944///
3945/// ```text
3946/// error[E0512]: cannot transmute between types of different sizes, or dependently-sized types
3947/// --> src/lib.rs:1524:34
3948/// |
3949/// 5 | const INCREASE_ALIGNMENT: &mut u16 = zerocopy::transmute_mut!(&mut [0u8; 2]);
3950/// | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
3951/// |
3952/// = note: source type: `AlignOf<[u8; 2]>` (8 bits)
3953/// = note: target type: `MaxAlignsOf<[u8; 2], u16>` (16 bits)
3954/// = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info)
3955/// ```
3956///
3957/// This is saying that `max(align_of::<T>(), align_of::<U>()) !=
3958/// align_of::<T>()`, which is equivalent to `align_of::<T>() <
3959/// align_of::<U>()`.
3960#[macro_export]
3961macro_rules! transmute_mut {
3962 ($e:expr) => {{
3963 // NOTE: This must be a macro (rather than a function with trait bounds)
3964 // because there's no way, in a generic context, to enforce that two
3965 // types have the same size or alignment.
3966
3967 // Ensure that the source type is a mutable reference.
3968 let e: &mut _ = $e;
3969
3970 #[allow(unused, clippy::diverging_sub_expression)]
3971 if false {
3972 // This branch, though never taken, ensures that the type of `e` is
3973 // `&mut T` where `T: 't + Sized + FromBytes + AsBytes`, that the
3974 // type of this macro expression is `&mut U` where `U: 'u + Sized +
3975 // FromBytes + AsBytes`.
3976
3977 // We use immutable references here rather than mutable so that, if
3978 // this macro is used in a const context (in which, as of this
3979 // writing, mutable references are banned), the error message
3980 // appears to originate in the user's code rather than in the
3981 // internals of this macro.
3982 struct AssertSrcIsFromBytes<'a, T: ::core::marker::Sized + $crate::FromBytes>(&'a T);
3983 struct AssertSrcIsAsBytes<'a, T: ::core::marker::Sized + $crate::AsBytes>(&'a T);
3984 struct AssertDstIsFromBytes<'a, T: ::core::marker::Sized + $crate::FromBytes>(&'a T);
3985 struct AssertDstIsAsBytes<'a, T: ::core::marker::Sized + $crate::AsBytes>(&'a T);
3986
3987 if true {
3988 let _ = AssertSrcIsFromBytes(&*e);
3989 } else {
3990 let _ = AssertSrcIsAsBytes(&*e);
3991 }
3992
3993 if true {
3994 #[allow(unused, unreachable_code)]
3995 let u = AssertDstIsFromBytes(loop {});
3996 &mut *u.0
3997 } else {
3998 #[allow(unused, unreachable_code)]
3999 let u = AssertDstIsAsBytes(loop {});
4000 &mut *u.0
4001 }
4002 } else if false {
4003 // This branch, though never taken, ensures that `size_of::<T>() ==
4004 // size_of::<U>()` and that that `align_of::<T>() >=
4005 // align_of::<U>()`.
4006
4007 // `t` is inferred to have type `T` because it's assigned to `e` (of
4008 // type `&mut T`) as `&mut t`.
4009 let mut t = unreachable!();
4010 e = &mut t;
4011
4012 // `u` is inferred to have type `U` because it's used as `&mut u` as
4013 // the value returned from this branch.
4014 let u;
4015
4016 $crate::assert_size_eq!(t, u);
4017 $crate::assert_align_gt_eq!(t, u);
4018
4019 &mut u
4020 } else {
4021 // SAFETY: For source type `Src` and destination type `Dst`:
4022 // - We know that `Src: FromBytes + AsBytes` and `Dst: FromBytes +
4023 // AsBytes` thanks to the uses of `AssertSrcIsFromBytes`,
4024 // `AssertSrcIsAsBytes`, `AssertDstIsFromBytes`, and
4025 // `AssertDstIsAsBytes` above.
4026 // - We know that `size_of::<Src>() == size_of::<Dst>()` thanks to
4027 // the use of `assert_size_eq!` above.
4028 // - We know that `align_of::<Src>() >= align_of::<Dst>()` thanks to
4029 // the use of `assert_align_gt_eq!` above.
4030 unsafe { $crate::macro_util::transmute_mut(e) }
4031 }
4032 }}
4033}
4034
4035/// Includes a file and safely transmutes it to a value of an arbitrary type.
4036///
4037/// The file will be included as a byte array, `[u8; N]`, which will be
4038/// transmuted to another type, `T`. `T` is inferred from the calling context,
4039/// and must implement [`FromBytes`].
4040///
4041/// The file is located relative to the current file (similarly to how modules
4042/// are found). The provided path is interpreted in a platform-specific way at
4043/// compile time. So, for instance, an invocation with a Windows path containing
4044/// backslashes `\` would not compile correctly on Unix.
4045///
4046/// `include_value!` is ignorant of byte order. For byte order-aware types, see
4047/// the [`byteorder`] module.
4048///
4049/// # Examples
4050///
4051/// Assume there are two files in the same directory with the following
4052/// contents:
4053///
4054/// File `data` (no trailing newline):
4055///
4056/// ```text
4057/// abcd
4058/// ```
4059///
4060/// File `main.rs`:
4061///
4062/// ```rust
4063/// use zerocopy::include_value;
4064/// # macro_rules! include_value {
4065/// # ($file:expr) => { zerocopy::include_value!(concat!("../testdata/include_value/", $file)) };
4066/// # }
4067///
4068/// fn main() {
4069/// let as_u32: u32 = include_value!("data");
4070/// assert_eq!(as_u32, u32::from_ne_bytes([b'a', b'b', b'c', b'd']));
4071/// let as_i32: i32 = include_value!("data");
4072/// assert_eq!(as_i32, i32::from_ne_bytes([b'a', b'b', b'c', b'd']));
4073/// }
4074/// ```
4075#[doc(alias("include_bytes", "include_data", "include_type"))]
4076#[macro_export]
4077macro_rules! include_value {
4078 ($file:expr $(,)?) => {
4079 $crate::transmute!(*::core::include_bytes!($file))
4080 };
4081}
4082
4083/// A typed reference derived from a byte slice.
4084///
4085/// A `Ref<B, T>` is a reference to a `T` which is stored in a byte slice, `B`.
4086/// Unlike a native reference (`&T` or `&mut T`), `Ref<B, T>` has the same
4087/// mutability as the byte slice it was constructed from (`B`).
4088///
4089/// # Examples
4090///
4091/// `Ref` can be used to treat a sequence of bytes as a structured type, and to
4092/// read and write the fields of that type as if the byte slice reference were
4093/// simply a reference to that type.
4094///
4095/// ```rust
4096/// # #[cfg(feature = "derive")] { // This example uses derives, and won't compile without them
4097/// use zerocopy::{AsBytes, ByteSlice, ByteSliceMut, FromBytes, FromZeroes, Ref, Unaligned};
4098///
4099/// #[derive(FromZeroes, FromBytes, AsBytes, Unaligned)]
4100/// #[repr(C)]
4101/// struct UdpHeader {
4102/// src_port: [u8; 2],
4103/// dst_port: [u8; 2],
4104/// length: [u8; 2],
4105/// checksum: [u8; 2],
4106/// }
4107///
4108/// struct UdpPacket<B> {
4109/// header: Ref<B, UdpHeader>,
4110/// body: B,
4111/// }
4112///
4113/// impl<B: ByteSlice> UdpPacket<B> {
4114/// pub fn parse(bytes: B) -> Option<UdpPacket<B>> {
4115/// let (header, body) = Ref::new_unaligned_from_prefix(bytes)?;
4116/// Some(UdpPacket { header, body })
4117/// }
4118///
4119/// pub fn get_src_port(&self) -> [u8; 2] {
4120/// self.header.src_port
4121/// }
4122/// }
4123///
4124/// impl<B: ByteSliceMut> UdpPacket<B> {
4125/// pub fn set_src_port(&mut self, src_port: [u8; 2]) {
4126/// self.header.src_port = src_port;
4127/// }
4128/// }
4129/// # }
4130/// ```
4131pub struct Ref<B, T: ?Sized>(B, PhantomData<T>);
4132
4133/// Deprecated: prefer [`Ref`] instead.
4134#[deprecated(since = "0.7.0", note = "LayoutVerified has been renamed to Ref")]
4135#[doc(hidden)]
4136pub type LayoutVerified<B, T> = Ref<B, T>;
4137
4138impl<B, T> Ref<B, T>
4139where
4140 B: ByteSlice,
4141{
4142 /// Constructs a new `Ref`.
4143 ///
4144 /// `new` verifies that `bytes.len() == size_of::<T>()` and that `bytes` is
4145 /// aligned to `align_of::<T>()`, and constructs a new `Ref`. If either of
4146 /// these checks fail, it returns `None`.
4147 #[inline]
4148 pub fn new(bytes: B) -> Option<Ref<B, T>> {
4149 if bytes.len() != mem::size_of::<T>() || !util::aligned_to::<_, T>(bytes.deref()) {
4150 return None;
4151 }
4152 Some(Ref(bytes, PhantomData))
4153 }
4154
4155 /// Constructs a new `Ref` from the prefix of a byte slice.
4156 ///
4157 /// `new_from_prefix` verifies that `bytes.len() >= size_of::<T>()` and that
4158 /// `bytes` is aligned to `align_of::<T>()`. It consumes the first
4159 /// `size_of::<T>()` bytes from `bytes` to construct a `Ref`, and returns
4160 /// the remaining bytes to the caller. If either the length or alignment
4161 /// checks fail, it returns `None`.
4162 #[inline]
4163 pub fn new_from_prefix(bytes: B) -> Option<(Ref<B, T>, B)> {
4164 if bytes.len() < mem::size_of::<T>() || !util::aligned_to::<_, T>(bytes.deref()) {
4165 return None;
4166 }
4167 let (bytes, suffix) = bytes.split_at(mem::size_of::<T>());
4168 Some((Ref(bytes, PhantomData), suffix))
4169 }
4170
4171 /// Constructs a new `Ref` from the suffix of a byte slice.
4172 ///
4173 /// `new_from_suffix` verifies that `bytes.len() >= size_of::<T>()` and that
4174 /// the last `size_of::<T>()` bytes of `bytes` are aligned to
4175 /// `align_of::<T>()`. It consumes the last `size_of::<T>()` bytes from
4176 /// `bytes` to construct a `Ref`, and returns the preceding bytes to the
4177 /// caller. If either the length or alignment checks fail, it returns
4178 /// `None`.
4179 #[inline]
4180 pub fn new_from_suffix(bytes: B) -> Option<(B, Ref<B, T>)> {
4181 let bytes_len = bytes.len();
4182 let split_at = bytes_len.checked_sub(mem::size_of::<T>())?;
4183 let (prefix, bytes) = bytes.split_at(split_at);
4184 if !util::aligned_to::<_, T>(bytes.deref()) {
4185 return None;
4186 }
4187 Some((prefix, Ref(bytes, PhantomData)))
4188 }
4189}
4190
4191impl<B, T> Ref<B, [T]>
4192where
4193 B: ByteSlice,
4194{
4195 /// Constructs a new `Ref` of a slice type.
4196 ///
4197 /// `new_slice` verifies that `bytes.len()` is a multiple of
4198 /// `size_of::<T>()` and that `bytes` is aligned to `align_of::<T>()`, and
4199 /// constructs a new `Ref`. If either of these checks fail, it returns
4200 /// `None`.
4201 ///
4202 /// # Panics
4203 ///
4204 /// `new_slice` panics if `T` is a zero-sized type.
4205 #[inline]
4206 pub fn new_slice(bytes: B) -> Option<Ref<B, [T]>> {
4207 let remainder = bytes
4208 .len()
4209 .checked_rem(mem::size_of::<T>())
4210 .expect("Ref::new_slice called on a zero-sized type");
4211 if remainder != 0 || !util::aligned_to::<_, T>(bytes.deref()) {
4212 return None;
4213 }
4214 Some(Ref(bytes, PhantomData))
4215 }
4216
4217 /// Constructs a new `Ref` of a slice type from the prefix of a byte slice.
4218 ///
4219 /// `new_slice_from_prefix` verifies that `bytes.len() >= size_of::<T>() *
4220 /// count` and that `bytes` is aligned to `align_of::<T>()`. It consumes the
4221 /// first `size_of::<T>() * count` bytes from `bytes` to construct a `Ref`,
4222 /// and returns the remaining bytes to the caller. It also ensures that
4223 /// `sizeof::<T>() * count` does not overflow a `usize`. If any of the
4224 /// length, alignment, or overflow checks fail, it returns `None`.
4225 ///
4226 /// # Panics
4227 ///
4228 /// `new_slice_from_prefix` panics if `T` is a zero-sized type.
4229 #[inline]
4230 pub fn new_slice_from_prefix(bytes: B, count: usize) -> Option<(Ref<B, [T]>, B)> {
4231 let expected_len = match mem::size_of::<T>().checked_mul(count) {
4232 Some(len) => len,
4233 None => return None,
4234 };
4235 if bytes.len() < expected_len {
4236 return None;
4237 }
4238 let (prefix, bytes) = bytes.split_at(expected_len);
4239 Self::new_slice(prefix).map(move |l| (l, bytes))
4240 }
4241
4242 /// Constructs a new `Ref` of a slice type from the suffix of a byte slice.
4243 ///
4244 /// `new_slice_from_suffix` verifies that `bytes.len() >= size_of::<T>() *
4245 /// count` and that `bytes` is aligned to `align_of::<T>()`. It consumes the
4246 /// last `size_of::<T>() * count` bytes from `bytes` to construct a `Ref`,
4247 /// and returns the preceding bytes to the caller. It also ensures that
4248 /// `sizeof::<T>() * count` does not overflow a `usize`. If any of the
4249 /// length, alignment, or overflow checks fail, it returns `None`.
4250 ///
4251 /// # Panics
4252 ///
4253 /// `new_slice_from_suffix` panics if `T` is a zero-sized type.
4254 #[inline]
4255 pub fn new_slice_from_suffix(bytes: B, count: usize) -> Option<(B, Ref<B, [T]>)> {
4256 let expected_len = match mem::size_of::<T>().checked_mul(count) {
4257 Some(len) => len,
4258 None => return None,
4259 };
4260 let split_at = bytes.len().checked_sub(expected_len)?;
4261 let (bytes, suffix) = bytes.split_at(split_at);
4262 Self::new_slice(suffix).map(move |l| (bytes, l))
4263 }
4264}
4265
4266fn map_zeroed<B: ByteSliceMut, T: ?Sized>(opt: Option<Ref<B, T>>) -> Option<Ref<B, T>> {
4267 match opt {
4268 Some(mut r: Ref) => {
4269 r.0.fill(0);
4270 Some(r)
4271 }
4272 None => None,
4273 }
4274}
4275
4276fn map_prefix_tuple_zeroed<B: ByteSliceMut, T: ?Sized>(
4277 opt: Option<(Ref<B, T>, B)>,
4278) -> Option<(Ref<B, T>, B)> {
4279 match opt {
4280 Some((mut r: Ref, rest: B)) => {
4281 r.0.fill(0);
4282 Some((r, rest))
4283 }
4284 None => None,
4285 }
4286}
4287
4288fn map_suffix_tuple_zeroed<B: ByteSliceMut, T: ?Sized>(
4289 opt: Option<(B, Ref<B, T>)>,
4290) -> Option<(B, Ref<B, T>)> {
4291 map_prefix_tuple_zeroed(opt:opt.map(|(a: B, b: Ref)| (b, a))).map(|(a: Ref, b: B)| (b, a))
4292}
4293
4294impl<B, T> Ref<B, T>
4295where
4296 B: ByteSliceMut,
4297{
4298 /// Constructs a new `Ref` after zeroing the bytes.
4299 ///
4300 /// `new_zeroed` verifies that `bytes.len() == size_of::<T>()` and that
4301 /// `bytes` is aligned to `align_of::<T>()`, and constructs a new `Ref`. If
4302 /// either of these checks fail, it returns `None`.
4303 ///
4304 /// If the checks succeed, then `bytes` will be initialized to zero. This
4305 /// can be useful when re-using buffers to ensure that sensitive data
4306 /// previously stored in the buffer is not leaked.
4307 #[inline(always)]
4308 pub fn new_zeroed(bytes: B) -> Option<Ref<B, T>> {
4309 map_zeroed(Self::new(bytes))
4310 }
4311
4312 /// Constructs a new `Ref` from the prefix of a byte slice, zeroing the
4313 /// prefix.
4314 ///
4315 /// `new_from_prefix_zeroed` verifies that `bytes.len() >= size_of::<T>()`
4316 /// and that `bytes` is aligned to `align_of::<T>()`. It consumes the first
4317 /// `size_of::<T>()` bytes from `bytes` to construct a `Ref`, and returns
4318 /// the remaining bytes to the caller. If either the length or alignment
4319 /// checks fail, it returns `None`.
4320 ///
4321 /// If the checks succeed, then the prefix which is consumed will be
4322 /// initialized to zero. This can be useful when re-using buffers to ensure
4323 /// that sensitive data previously stored in the buffer is not leaked.
4324 #[inline(always)]
4325 pub fn new_from_prefix_zeroed(bytes: B) -> Option<(Ref<B, T>, B)> {
4326 map_prefix_tuple_zeroed(Self::new_from_prefix(bytes))
4327 }
4328
4329 /// Constructs a new `Ref` from the suffix of a byte slice, zeroing the
4330 /// suffix.
4331 ///
4332 /// `new_from_suffix_zeroed` verifies that `bytes.len() >= size_of::<T>()`
4333 /// and that the last `size_of::<T>()` bytes of `bytes` are aligned to
4334 /// `align_of::<T>()`. It consumes the last `size_of::<T>()` bytes from
4335 /// `bytes` to construct a `Ref`, and returns the preceding bytes to the
4336 /// caller. If either the length or alignment checks fail, it returns
4337 /// `None`.
4338 ///
4339 /// If the checks succeed, then the suffix which is consumed will be
4340 /// initialized to zero. This can be useful when re-using buffers to ensure
4341 /// that sensitive data previously stored in the buffer is not leaked.
4342 #[inline(always)]
4343 pub fn new_from_suffix_zeroed(bytes: B) -> Option<(B, Ref<B, T>)> {
4344 map_suffix_tuple_zeroed(Self::new_from_suffix(bytes))
4345 }
4346}
4347
4348impl<B, T> Ref<B, [T]>
4349where
4350 B: ByteSliceMut,
4351{
4352 /// Constructs a new `Ref` of a slice type after zeroing the bytes.
4353 ///
4354 /// `new_slice_zeroed` verifies that `bytes.len()` is a multiple of
4355 /// `size_of::<T>()` and that `bytes` is aligned to `align_of::<T>()`, and
4356 /// constructs a new `Ref`. If either of these checks fail, it returns
4357 /// `None`.
4358 ///
4359 /// If the checks succeed, then `bytes` will be initialized to zero. This
4360 /// can be useful when re-using buffers to ensure that sensitive data
4361 /// previously stored in the buffer is not leaked.
4362 ///
4363 /// # Panics
4364 ///
4365 /// `new_slice` panics if `T` is a zero-sized type.
4366 #[inline(always)]
4367 pub fn new_slice_zeroed(bytes: B) -> Option<Ref<B, [T]>> {
4368 map_zeroed(Self::new_slice(bytes))
4369 }
4370
4371 /// Constructs a new `Ref` of a slice type from the prefix of a byte slice,
4372 /// after zeroing the bytes.
4373 ///
4374 /// `new_slice_from_prefix` verifies that `bytes.len() >= size_of::<T>() *
4375 /// count` and that `bytes` is aligned to `align_of::<T>()`. It consumes the
4376 /// first `size_of::<T>() * count` bytes from `bytes` to construct a `Ref`,
4377 /// and returns the remaining bytes to the caller. It also ensures that
4378 /// `sizeof::<T>() * count` does not overflow a `usize`. If any of the
4379 /// length, alignment, or overflow checks fail, it returns `None`.
4380 ///
4381 /// If the checks succeed, then the suffix which is consumed will be
4382 /// initialized to zero. This can be useful when re-using buffers to ensure
4383 /// that sensitive data previously stored in the buffer is not leaked.
4384 ///
4385 /// # Panics
4386 ///
4387 /// `new_slice_from_prefix_zeroed` panics if `T` is a zero-sized type.
4388 #[inline(always)]
4389 pub fn new_slice_from_prefix_zeroed(bytes: B, count: usize) -> Option<(Ref<B, [T]>, B)> {
4390 map_prefix_tuple_zeroed(Self::new_slice_from_prefix(bytes, count))
4391 }
4392
4393 /// Constructs a new `Ref` of a slice type from the prefix of a byte slice,
4394 /// after zeroing the bytes.
4395 ///
4396 /// `new_slice_from_suffix` verifies that `bytes.len() >= size_of::<T>() *
4397 /// count` and that `bytes` is aligned to `align_of::<T>()`. It consumes the
4398 /// last `size_of::<T>() * count` bytes from `bytes` to construct a `Ref`,
4399 /// and returns the preceding bytes to the caller. It also ensures that
4400 /// `sizeof::<T>() * count` does not overflow a `usize`. If any of the
4401 /// length, alignment, or overflow checks fail, it returns `None`.
4402 ///
4403 /// If the checks succeed, then the consumed suffix will be initialized to
4404 /// zero. This can be useful when re-using buffers to ensure that sensitive
4405 /// data previously stored in the buffer is not leaked.
4406 ///
4407 /// # Panics
4408 ///
4409 /// `new_slice_from_suffix_zeroed` panics if `T` is a zero-sized type.
4410 #[inline(always)]
4411 pub fn new_slice_from_suffix_zeroed(bytes: B, count: usize) -> Option<(B, Ref<B, [T]>)> {
4412 map_suffix_tuple_zeroed(Self::new_slice_from_suffix(bytes, count))
4413 }
4414}
4415
4416impl<B, T> Ref<B, T>
4417where
4418 B: ByteSlice,
4419 T: Unaligned,
4420{
4421 /// Constructs a new `Ref` for a type with no alignment requirement.
4422 ///
4423 /// `new_unaligned` verifies that `bytes.len() == size_of::<T>()` and
4424 /// constructs a new `Ref`. If the check fails, it returns `None`.
4425 #[inline(always)]
4426 pub fn new_unaligned(bytes: B) -> Option<Ref<B, T>> {
4427 Ref::new(bytes)
4428 }
4429
4430 /// Constructs a new `Ref` from the prefix of a byte slice for a type with
4431 /// no alignment requirement.
4432 ///
4433 /// `new_unaligned_from_prefix` verifies that `bytes.len() >=
4434 /// size_of::<T>()`. It consumes the first `size_of::<T>()` bytes from
4435 /// `bytes` to construct a `Ref`, and returns the remaining bytes to the
4436 /// caller. If the length check fails, it returns `None`.
4437 #[inline(always)]
4438 pub fn new_unaligned_from_prefix(bytes: B) -> Option<(Ref<B, T>, B)> {
4439 Ref::new_from_prefix(bytes)
4440 }
4441
4442 /// Constructs a new `Ref` from the suffix of a byte slice for a type with
4443 /// no alignment requirement.
4444 ///
4445 /// `new_unaligned_from_suffix` verifies that `bytes.len() >=
4446 /// size_of::<T>()`. It consumes the last `size_of::<T>()` bytes from
4447 /// `bytes` to construct a `Ref`, and returns the preceding bytes to the
4448 /// caller. If the length check fails, it returns `None`.
4449 #[inline(always)]
4450 pub fn new_unaligned_from_suffix(bytes: B) -> Option<(B, Ref<B, T>)> {
4451 Ref::new_from_suffix(bytes)
4452 }
4453}
4454
4455impl<B, T> Ref<B, [T]>
4456where
4457 B: ByteSlice,
4458 T: Unaligned,
4459{
4460 /// Constructs a new `Ref` of a slice type with no alignment requirement.
4461 ///
4462 /// `new_slice_unaligned` verifies that `bytes.len()` is a multiple of
4463 /// `size_of::<T>()` and constructs a new `Ref`. If the check fails, it
4464 /// returns `None`.
4465 ///
4466 /// # Panics
4467 ///
4468 /// `new_slice` panics if `T` is a zero-sized type.
4469 #[inline(always)]
4470 pub fn new_slice_unaligned(bytes: B) -> Option<Ref<B, [T]>> {
4471 Ref::new_slice(bytes)
4472 }
4473
4474 /// Constructs a new `Ref` of a slice type with no alignment requirement
4475 /// from the prefix of a byte slice.
4476 ///
4477 /// `new_slice_from_prefix` verifies that `bytes.len() >= size_of::<T>() *
4478 /// count`. It consumes the first `size_of::<T>() * count` bytes from
4479 /// `bytes` to construct a `Ref`, and returns the remaining bytes to the
4480 /// caller. It also ensures that `sizeof::<T>() * count` does not overflow a
4481 /// `usize`. If either the length, or overflow checks fail, it returns
4482 /// `None`.
4483 ///
4484 /// # Panics
4485 ///
4486 /// `new_slice_unaligned_from_prefix` panics if `T` is a zero-sized type.
4487 #[inline(always)]
4488 pub fn new_slice_unaligned_from_prefix(bytes: B, count: usize) -> Option<(Ref<B, [T]>, B)> {
4489 Ref::new_slice_from_prefix(bytes, count)
4490 }
4491
4492 /// Constructs a new `Ref` of a slice type with no alignment requirement
4493 /// from the suffix of a byte slice.
4494 ///
4495 /// `new_slice_from_suffix` verifies that `bytes.len() >= size_of::<T>() *
4496 /// count`. It consumes the last `size_of::<T>() * count` bytes from `bytes`
4497 /// to construct a `Ref`, and returns the remaining bytes to the caller. It
4498 /// also ensures that `sizeof::<T>() * count` does not overflow a `usize`.
4499 /// If either the length, or overflow checks fail, it returns `None`.
4500 ///
4501 /// # Panics
4502 ///
4503 /// `new_slice_unaligned_from_suffix` panics if `T` is a zero-sized type.
4504 #[inline(always)]
4505 pub fn new_slice_unaligned_from_suffix(bytes: B, count: usize) -> Option<(B, Ref<B, [T]>)> {
4506 Ref::new_slice_from_suffix(bytes, count)
4507 }
4508}
4509
4510impl<B, T> Ref<B, T>
4511where
4512 B: ByteSliceMut,
4513 T: Unaligned,
4514{
4515 /// Constructs a new `Ref` for a type with no alignment requirement, zeroing
4516 /// the bytes.
4517 ///
4518 /// `new_unaligned_zeroed` verifies that `bytes.len() == size_of::<T>()` and
4519 /// constructs a new `Ref`. If the check fails, it returns `None`.
4520 ///
4521 /// If the check succeeds, then `bytes` will be initialized to zero. This
4522 /// can be useful when re-using buffers to ensure that sensitive data
4523 /// previously stored in the buffer is not leaked.
4524 #[inline(always)]
4525 pub fn new_unaligned_zeroed(bytes: B) -> Option<Ref<B, T>> {
4526 map_zeroed(Self::new_unaligned(bytes))
4527 }
4528
4529 /// Constructs a new `Ref` from the prefix of a byte slice for a type with
4530 /// no alignment requirement, zeroing the prefix.
4531 ///
4532 /// `new_unaligned_from_prefix_zeroed` verifies that `bytes.len() >=
4533 /// size_of::<T>()`. It consumes the first `size_of::<T>()` bytes from
4534 /// `bytes` to construct a `Ref`, and returns the remaining bytes to the
4535 /// caller. If the length check fails, it returns `None`.
4536 ///
4537 /// If the check succeeds, then the prefix which is consumed will be
4538 /// initialized to zero. This can be useful when re-using buffers to ensure
4539 /// that sensitive data previously stored in the buffer is not leaked.
4540 #[inline(always)]
4541 pub fn new_unaligned_from_prefix_zeroed(bytes: B) -> Option<(Ref<B, T>, B)> {
4542 map_prefix_tuple_zeroed(Self::new_unaligned_from_prefix(bytes))
4543 }
4544
4545 /// Constructs a new `Ref` from the suffix of a byte slice for a type with
4546 /// no alignment requirement, zeroing the suffix.
4547 ///
4548 /// `new_unaligned_from_suffix_zeroed` verifies that `bytes.len() >=
4549 /// size_of::<T>()`. It consumes the last `size_of::<T>()` bytes from
4550 /// `bytes` to construct a `Ref`, and returns the preceding bytes to the
4551 /// caller. If the length check fails, it returns `None`.
4552 ///
4553 /// If the check succeeds, then the suffix which is consumed will be
4554 /// initialized to zero. This can be useful when re-using buffers to ensure
4555 /// that sensitive data previously stored in the buffer is not leaked.
4556 #[inline(always)]
4557 pub fn new_unaligned_from_suffix_zeroed(bytes: B) -> Option<(B, Ref<B, T>)> {
4558 map_suffix_tuple_zeroed(Self::new_unaligned_from_suffix(bytes))
4559 }
4560}
4561
4562impl<B, T> Ref<B, [T]>
4563where
4564 B: ByteSliceMut,
4565 T: Unaligned,
4566{
4567 /// Constructs a new `Ref` for a slice type with no alignment requirement,
4568 /// zeroing the bytes.
4569 ///
4570 /// `new_slice_unaligned_zeroed` verifies that `bytes.len()` is a multiple
4571 /// of `size_of::<T>()` and constructs a new `Ref`. If the check fails, it
4572 /// returns `None`.
4573 ///
4574 /// If the check succeeds, then `bytes` will be initialized to zero. This
4575 /// can be useful when re-using buffers to ensure that sensitive data
4576 /// previously stored in the buffer is not leaked.
4577 ///
4578 /// # Panics
4579 ///
4580 /// `new_slice` panics if `T` is a zero-sized type.
4581 #[inline(always)]
4582 pub fn new_slice_unaligned_zeroed(bytes: B) -> Option<Ref<B, [T]>> {
4583 map_zeroed(Self::new_slice_unaligned(bytes))
4584 }
4585
4586 /// Constructs a new `Ref` of a slice type with no alignment requirement
4587 /// from the prefix of a byte slice, after zeroing the bytes.
4588 ///
4589 /// `new_slice_from_prefix` verifies that `bytes.len() >= size_of::<T>() *
4590 /// count`. It consumes the first `size_of::<T>() * count` bytes from
4591 /// `bytes` to construct a `Ref`, and returns the remaining bytes to the
4592 /// caller. It also ensures that `sizeof::<T>() * count` does not overflow a
4593 /// `usize`. If either the length, or overflow checks fail, it returns
4594 /// `None`.
4595 ///
4596 /// If the checks succeed, then the prefix will be initialized to zero. This
4597 /// can be useful when re-using buffers to ensure that sensitive data
4598 /// previously stored in the buffer is not leaked.
4599 ///
4600 /// # Panics
4601 ///
4602 /// `new_slice_unaligned_from_prefix_zeroed` panics if `T` is a zero-sized
4603 /// type.
4604 #[inline(always)]
4605 pub fn new_slice_unaligned_from_prefix_zeroed(
4606 bytes: B,
4607 count: usize,
4608 ) -> Option<(Ref<B, [T]>, B)> {
4609 map_prefix_tuple_zeroed(Self::new_slice_unaligned_from_prefix(bytes, count))
4610 }
4611
4612 /// Constructs a new `Ref` of a slice type with no alignment requirement
4613 /// from the suffix of a byte slice, after zeroing the bytes.
4614 ///
4615 /// `new_slice_from_suffix` verifies that `bytes.len() >= size_of::<T>() *
4616 /// count`. It consumes the last `size_of::<T>() * count` bytes from `bytes`
4617 /// to construct a `Ref`, and returns the remaining bytes to the caller. It
4618 /// also ensures that `sizeof::<T>() * count` does not overflow a `usize`.
4619 /// If either the length, or overflow checks fail, it returns `None`.
4620 ///
4621 /// If the checks succeed, then the suffix will be initialized to zero. This
4622 /// can be useful when re-using buffers to ensure that sensitive data
4623 /// previously stored in the buffer is not leaked.
4624 ///
4625 /// # Panics
4626 ///
4627 /// `new_slice_unaligned_from_suffix_zeroed` panics if `T` is a zero-sized
4628 /// type.
4629 #[inline(always)]
4630 pub fn new_slice_unaligned_from_suffix_zeroed(
4631 bytes: B,
4632 count: usize,
4633 ) -> Option<(B, Ref<B, [T]>)> {
4634 map_suffix_tuple_zeroed(Self::new_slice_unaligned_from_suffix(bytes, count))
4635 }
4636}
4637
4638impl<'a, B, T> Ref<B, T>
4639where
4640 B: 'a + ByteSlice,
4641 T: FromBytes,
4642{
4643 /// Converts this `Ref` into a reference.
4644 ///
4645 /// `into_ref` consumes the `Ref`, and returns a reference to `T`.
4646 #[inline(always)]
4647 pub fn into_ref(self) -> &'a T {
4648 assert!(B::INTO_REF_INTO_MUT_ARE_SOUND);
4649
4650 // SAFETY: According to the safety preconditions on
4651 // `ByteSlice::INTO_REF_INTO_MUT_ARE_SOUND`, the preceding assert
4652 // ensures that, given `B: 'a`, it is sound to drop `self` and still
4653 // access the underlying memory using reads for `'a`.
4654 unsafe { self.deref_helper() }
4655 }
4656}
4657
4658impl<'a, B, T> Ref<B, T>
4659where
4660 B: 'a + ByteSliceMut,
4661 T: FromBytes + AsBytes,
4662{
4663 /// Converts this `Ref` into a mutable reference.
4664 ///
4665 /// `into_mut` consumes the `Ref`, and returns a mutable reference to `T`.
4666 #[inline(always)]
4667 pub fn into_mut(mut self) -> &'a mut T {
4668 assert!(B::INTO_REF_INTO_MUT_ARE_SOUND);
4669
4670 // SAFETY: According to the safety preconditions on
4671 // `ByteSlice::INTO_REF_INTO_MUT_ARE_SOUND`, the preceding assert
4672 // ensures that, given `B: 'a + ByteSliceMut`, it is sound to drop
4673 // `self` and still access the underlying memory using both reads and
4674 // writes for `'a`.
4675 unsafe { self.deref_mut_helper() }
4676 }
4677}
4678
4679impl<'a, B, T> Ref<B, [T]>
4680where
4681 B: 'a + ByteSlice,
4682 T: FromBytes,
4683{
4684 /// Converts this `Ref` into a slice reference.
4685 ///
4686 /// `into_slice` consumes the `Ref`, and returns a reference to `[T]`.
4687 #[inline(always)]
4688 pub fn into_slice(self) -> &'a [T] {
4689 assert!(B::INTO_REF_INTO_MUT_ARE_SOUND);
4690
4691 // SAFETY: According to the safety preconditions on
4692 // `ByteSlice::INTO_REF_INTO_MUT_ARE_SOUND`, the preceding assert
4693 // ensures that, given `B: 'a`, it is sound to drop `self` and still
4694 // access the underlying memory using reads for `'a`.
4695 unsafe { self.deref_slice_helper() }
4696 }
4697}
4698
4699impl<'a, B, T> Ref<B, [T]>
4700where
4701 B: 'a + ByteSliceMut,
4702 T: FromBytes + AsBytes,
4703{
4704 /// Converts this `Ref` into a mutable slice reference.
4705 ///
4706 /// `into_mut_slice` consumes the `Ref`, and returns a mutable reference to
4707 /// `[T]`.
4708 #[inline(always)]
4709 pub fn into_mut_slice(mut self) -> &'a mut [T] {
4710 assert!(B::INTO_REF_INTO_MUT_ARE_SOUND);
4711
4712 // SAFETY: According to the safety preconditions on
4713 // `ByteSlice::INTO_REF_INTO_MUT_ARE_SOUND`, the preceding assert
4714 // ensures that, given `B: 'a + ByteSliceMut`, it is sound to drop
4715 // `self` and still access the underlying memory using both reads and
4716 // writes for `'a`.
4717 unsafe { self.deref_mut_slice_helper() }
4718 }
4719}
4720
4721impl<B, T> Ref<B, T>
4722where
4723 B: ByteSlice,
4724 T: FromBytes,
4725{
4726 /// Creates an immutable reference to `T` with a specific lifetime.
4727 ///
4728 /// # Safety
4729 ///
4730 /// The type bounds on this method guarantee that it is safe to create an
4731 /// immutable reference to `T` from `self`. However, since the lifetime `'a`
4732 /// is not required to be shorter than the lifetime of the reference to
4733 /// `self`, the caller must guarantee that the lifetime `'a` is valid for
4734 /// this reference. In particular, the referent must exist for all of `'a`,
4735 /// and no mutable references to the same memory may be constructed during
4736 /// `'a`.
4737 unsafe fn deref_helper<'a>(&self) -> &'a T {
4738 // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
4739 #[allow(clippy::undocumented_unsafe_blocks)]
4740 unsafe {
4741 &*self.0.as_ptr().cast::<T>()
4742 }
4743 }
4744}
4745
4746impl<B, T> Ref<B, T>
4747where
4748 B: ByteSliceMut,
4749 T: FromBytes + AsBytes,
4750{
4751 /// Creates a mutable reference to `T` with a specific lifetime.
4752 ///
4753 /// # Safety
4754 ///
4755 /// The type bounds on this method guarantee that it is safe to create a
4756 /// mutable reference to `T` from `self`. However, since the lifetime `'a`
4757 /// is not required to be shorter than the lifetime of the reference to
4758 /// `self`, the caller must guarantee that the lifetime `'a` is valid for
4759 /// this reference. In particular, the referent must exist for all of `'a`,
4760 /// and no other references - mutable or immutable - to the same memory may
4761 /// be constructed during `'a`.
4762 unsafe fn deref_mut_helper<'a>(&mut self) -> &'a mut T {
4763 // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
4764 #[allow(clippy::undocumented_unsafe_blocks)]
4765 unsafe {
4766 &mut *self.0.as_mut_ptr().cast::<T>()
4767 }
4768 }
4769}
4770
4771impl<B, T> Ref<B, [T]>
4772where
4773 B: ByteSlice,
4774 T: FromBytes,
4775{
4776 /// Creates an immutable reference to `[T]` with a specific lifetime.
4777 ///
4778 /// # Safety
4779 ///
4780 /// `deref_slice_helper` has the same safety requirements as `deref_helper`.
4781 unsafe fn deref_slice_helper<'a>(&self) -> &'a [T] {
4782 let len: usize = self.0.len();
4783 let elem_size: usize = mem::size_of::<T>();
4784 debug_assert_ne!(elem_size, 0);
4785 // `Ref<_, [T]>` maintains the invariant that `size_of::<T>() > 0`.
4786 // Thus, neither the mod nor division operations here can panic.
4787 #[allow(clippy::arithmetic_side_effects)]
4788 let elems: usize = {
4789 debug_assert_eq!(len % elem_size, 0);
4790 len / elem_size
4791 };
4792 // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
4793 #[allow(clippy::undocumented_unsafe_blocks)]
4794 unsafe {
4795 slice::from_raw_parts(self.0.as_ptr().cast::<T>(), len:elems)
4796 }
4797 }
4798}
4799
4800impl<B, T> Ref<B, [T]>
4801where
4802 B: ByteSliceMut,
4803 T: FromBytes + AsBytes,
4804{
4805 /// Creates a mutable reference to `[T]` with a specific lifetime.
4806 ///
4807 /// # Safety
4808 ///
4809 /// `deref_mut_slice_helper` has the same safety requirements as
4810 /// `deref_mut_helper`.
4811 unsafe fn deref_mut_slice_helper<'a>(&mut self) -> &'a mut [T] {
4812 let len = self.0.len();
4813 let elem_size = mem::size_of::<T>();
4814 debug_assert_ne!(elem_size, 0);
4815 // `Ref<_, [T]>` maintains the invariant that `size_of::<T>() > 0`.
4816 // Thus, neither the mod nor division operations here can panic.
4817 #[allow(clippy::arithmetic_side_effects)]
4818 let elems = {
4819 debug_assert_eq!(len % elem_size, 0);
4820 len / elem_size
4821 };
4822 // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
4823 #[allow(clippy::undocumented_unsafe_blocks)]
4824 unsafe {
4825 slice::from_raw_parts_mut(self.0.as_mut_ptr().cast::<T>(), elems)
4826 }
4827 }
4828}
4829
4830impl<B, T> Ref<B, T>
4831where
4832 B: ByteSlice,
4833 T: ?Sized,
4834{
4835 /// Gets the underlying bytes.
4836 #[inline]
4837 pub fn bytes(&self) -> &[u8] {
4838 &self.0
4839 }
4840}
4841
4842impl<B, T> Ref<B, T>
4843where
4844 B: ByteSliceMut,
4845 T: ?Sized,
4846{
4847 /// Gets the underlying bytes mutably.
4848 #[inline]
4849 pub fn bytes_mut(&mut self) -> &mut [u8] {
4850 &mut self.0
4851 }
4852}
4853
4854impl<B, T> Ref<B, T>
4855where
4856 B: ByteSlice,
4857 T: FromBytes,
4858{
4859 /// Reads a copy of `T`.
4860 #[inline]
4861 pub fn read(&self) -> T {
4862 // SAFETY: Because of the invariants on `Ref`, we know that `self.0` is
4863 // at least `size_of::<T>()` bytes long, and that it is at least as
4864 // aligned as `align_of::<T>()`. Because `T: FromBytes`, it is sound to
4865 // interpret these bytes as a `T`.
4866 unsafe { ptr::read(self.0.as_ptr().cast::<T>()) }
4867 }
4868}
4869
4870impl<B, T> Ref<B, T>
4871where
4872 B: ByteSliceMut,
4873 T: AsBytes,
4874{
4875 /// Writes the bytes of `t` and then forgets `t`.
4876 #[inline]
4877 pub fn write(&mut self, t: T) {
4878 // SAFETY: Because of the invariants on `Ref`, we know that `self.0` is
4879 // at least `size_of::<T>()` bytes long, and that it is at least as
4880 // aligned as `align_of::<T>()`. Writing `t` to the buffer will allow
4881 // all of the bytes of `t` to be accessed as a `[u8]`, but because `T:
4882 // AsBytes`, we know this is sound.
4883 unsafe { ptr::write(self.0.as_mut_ptr().cast::<T>(), src:t) }
4884 }
4885}
4886
4887impl<B, T> Deref for Ref<B, T>
4888where
4889 B: ByteSlice,
4890 T: FromBytes,
4891{
4892 type Target = T;
4893 #[inline]
4894 fn deref(&self) -> &T {
4895 // SAFETY: This is sound because the lifetime of `self` is the same as
4896 // the lifetime of the return value, meaning that a) the returned
4897 // reference cannot outlive `self` and, b) no mutable methods on `self`
4898 // can be called during the lifetime of the returned reference. See the
4899 // documentation on `deref_helper` for what invariants we are required
4900 // to uphold.
4901 unsafe { self.deref_helper() }
4902 }
4903}
4904
4905impl<B, T> DerefMut for Ref<B, T>
4906where
4907 B: ByteSliceMut,
4908 T: FromBytes + AsBytes,
4909{
4910 #[inline]
4911 fn deref_mut(&mut self) -> &mut T {
4912 // SAFETY: This is sound because the lifetime of `self` is the same as
4913 // the lifetime of the return value, meaning that a) the returned
4914 // reference cannot outlive `self` and, b) no other methods on `self`
4915 // can be called during the lifetime of the returned reference. See the
4916 // documentation on `deref_mut_helper` for what invariants we are
4917 // required to uphold.
4918 unsafe { self.deref_mut_helper() }
4919 }
4920}
4921
4922impl<B, T> Deref for Ref<B, [T]>
4923where
4924 B: ByteSlice,
4925 T: FromBytes,
4926{
4927 type Target = [T];
4928 #[inline]
4929 fn deref(&self) -> &[T] {
4930 // SAFETY: This is sound because the lifetime of `self` is the same as
4931 // the lifetime of the return value, meaning that a) the returned
4932 // reference cannot outlive `self` and, b) no mutable methods on `self`
4933 // can be called during the lifetime of the returned reference. See the
4934 // documentation on `deref_slice_helper` for what invariants we are
4935 // required to uphold.
4936 unsafe { self.deref_slice_helper() }
4937 }
4938}
4939
4940impl<B, T> DerefMut for Ref<B, [T]>
4941where
4942 B: ByteSliceMut,
4943 T: FromBytes + AsBytes,
4944{
4945 #[inline]
4946 fn deref_mut(&mut self) -> &mut [T] {
4947 // SAFETY: This is sound because the lifetime of `self` is the same as
4948 // the lifetime of the return value, meaning that a) the returned
4949 // reference cannot outlive `self` and, b) no other methods on `self`
4950 // can be called during the lifetime of the returned reference. See the
4951 // documentation on `deref_mut_slice_helper` for what invariants we are
4952 // required to uphold.
4953 unsafe { self.deref_mut_slice_helper() }
4954 }
4955}
4956
4957impl<T, B> Display for Ref<B, T>
4958where
4959 B: ByteSlice,
4960 T: FromBytes + Display,
4961{
4962 #[inline]
4963 fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
4964 let inner: &T = self;
4965 inner.fmt(fmt)
4966 }
4967}
4968
4969impl<T, B> Display for Ref<B, [T]>
4970where
4971 B: ByteSlice,
4972 T: FromBytes,
4973 [T]: Display,
4974{
4975 #[inline]
4976 fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
4977 let inner: &[T] = self;
4978 inner.fmt(fmt)
4979 }
4980}
4981
4982impl<T, B> Debug for Ref<B, T>
4983where
4984 B: ByteSlice,
4985 T: FromBytes + Debug,
4986{
4987 #[inline]
4988 fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
4989 let inner: &T = self;
4990 fmt.debug_tuple(name:"Ref").field(&inner).finish()
4991 }
4992}
4993
4994impl<T, B> Debug for Ref<B, [T]>
4995where
4996 B: ByteSlice,
4997 T: FromBytes + Debug,
4998{
4999 #[inline]
5000 fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
5001 let inner: &[T] = self;
5002 fmt.debug_tuple(name:"Ref").field(&inner).finish()
5003 }
5004}
5005
5006impl<T, B> Eq for Ref<B, T>
5007where
5008 B: ByteSlice,
5009 T: FromBytes + Eq,
5010{
5011}
5012
5013impl<T, B> Eq for Ref<B, [T]>
5014where
5015 B: ByteSlice,
5016 T: FromBytes + Eq,
5017{
5018}
5019
5020impl<T, B> PartialEq for Ref<B, T>
5021where
5022 B: ByteSlice,
5023 T: FromBytes + PartialEq,
5024{
5025 #[inline]
5026 fn eq(&self, other: &Self) -> bool {
5027 self.deref().eq(other.deref())
5028 }
5029}
5030
5031impl<T, B> PartialEq for Ref<B, [T]>
5032where
5033 B: ByteSlice,
5034 T: FromBytes + PartialEq,
5035{
5036 #[inline]
5037 fn eq(&self, other: &Self) -> bool {
5038 self.deref().eq(other.deref())
5039 }
5040}
5041
5042impl<T, B> Ord for Ref<B, T>
5043where
5044 B: ByteSlice,
5045 T: FromBytes + Ord,
5046{
5047 #[inline]
5048 fn cmp(&self, other: &Self) -> Ordering {
5049 let inner: &T = self;
5050 let other_inner: &T = other;
5051 inner.cmp(other_inner)
5052 }
5053}
5054
5055impl<T, B> Ord for Ref<B, [T]>
5056where
5057 B: ByteSlice,
5058 T: FromBytes + Ord,
5059{
5060 #[inline]
5061 fn cmp(&self, other: &Self) -> Ordering {
5062 let inner: &[T] = self;
5063 let other_inner: &[T] = other;
5064 inner.cmp(other_inner)
5065 }
5066}
5067
5068impl<T, B> PartialOrd for Ref<B, T>
5069where
5070 B: ByteSlice,
5071 T: FromBytes + PartialOrd,
5072{
5073 #[inline]
5074 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
5075 let inner: &T = self;
5076 let other_inner: &T = other;
5077 inner.partial_cmp(other_inner)
5078 }
5079}
5080
5081impl<T, B> PartialOrd for Ref<B, [T]>
5082where
5083 B: ByteSlice,
5084 T: FromBytes + PartialOrd,
5085{
5086 #[inline]
5087 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
5088 let inner: &[T] = self;
5089 let other_inner: &[T] = other;
5090 inner.partial_cmp(other_inner)
5091 }
5092}
5093
5094mod sealed {
5095 pub trait ByteSliceSealed {}
5096}
5097
5098// ByteSlice and ByteSliceMut abstract over [u8] references (&[u8], &mut [u8],
5099// Ref<[u8]>, RefMut<[u8]>, etc). We rely on various behaviors of these
5100// references such as that a given reference will never changes its length
5101// between calls to deref() or deref_mut(), and that split_at() works as
5102// expected. If ByteSlice or ByteSliceMut were not sealed, consumers could
5103// implement them in a way that violated these behaviors, and would break our
5104// unsafe code. Thus, we seal them and implement it only for known-good
5105// reference types. For the same reason, they're unsafe traits.
5106
5107#[allow(clippy::missing_safety_doc)] // TODO(fxbug.dev/99068)
5108/// A mutable or immutable reference to a byte slice.
5109///
5110/// `ByteSlice` abstracts over the mutability of a byte slice reference, and is
5111/// implemented for various special reference types such as `Ref<[u8]>` and
5112/// `RefMut<[u8]>`.
5113///
5114/// Note that, while it would be technically possible, `ByteSlice` is not
5115/// implemented for [`Vec<u8>`], as the only way to implement the [`split_at`]
5116/// method would involve reallocation, and `split_at` must be a very cheap
5117/// operation in order for the utilities in this crate to perform as designed.
5118///
5119/// [`split_at`]: crate::ByteSlice::split_at
5120// It may seem overkill to go to this length to ensure that this doc link never
5121// breaks. We do this because it simplifies CI - it means that generating docs
5122// always succeeds, so we don't need special logic to only generate docs under
5123// certain features.
5124#[cfg_attr(feature = "alloc", doc = "[`Vec<u8>`]: alloc::vec::Vec")]
5125#[cfg_attr(
5126 not(feature = "alloc"),
5127 doc = "[`Vec<u8>`]: https://doc.rust-lang.org/std/vec/struct.Vec.html"
5128)]
5129pub unsafe trait ByteSlice:
5130 Deref<Target = [u8]> + Sized + self::sealed::ByteSliceSealed
5131{
5132 /// Are the [`Ref::into_ref`] and [`Ref::into_mut`] methods sound when used
5133 /// with `Self`? If not, evaluating this constant must panic at compile
5134 /// time.
5135 ///
5136 /// This exists to work around #716 on versions of zerocopy prior to 0.8.
5137 ///
5138 /// # Safety
5139 ///
5140 /// This may only be set to true if the following holds: Given the
5141 /// following:
5142 /// - `Self: 'a`
5143 /// - `bytes: Self`
5144 /// - `let ptr = bytes.as_ptr()`
5145 ///
5146 /// ...then:
5147 /// - Using `ptr` to read the memory previously addressed by `bytes` is
5148 /// sound for `'a` even after `bytes` has been dropped.
5149 /// - If `Self: ByteSliceMut`, using `ptr` to write the memory previously
5150 /// addressed by `bytes` is sound for `'a` even after `bytes` has been
5151 /// dropped.
5152 #[doc(hidden)]
5153 const INTO_REF_INTO_MUT_ARE_SOUND: bool;
5154
5155 /// Gets a raw pointer to the first byte in the slice.
5156 #[inline]
5157 fn as_ptr(&self) -> *const u8 {
5158 <[u8]>::as_ptr(self)
5159 }
5160
5161 /// Splits the slice at the midpoint.
5162 ///
5163 /// `x.split_at(mid)` returns `x[..mid]` and `x[mid..]`.
5164 ///
5165 /// # Panics
5166 ///
5167 /// `x.split_at(mid)` panics if `mid > x.len()`.
5168 fn split_at(self, mid: usize) -> (Self, Self);
5169}
5170
5171#[allow(clippy::missing_safety_doc)] // TODO(fxbug.dev/99068)
5172/// A mutable reference to a byte slice.
5173///
5174/// `ByteSliceMut` abstracts over various ways of storing a mutable reference to
5175/// a byte slice, and is implemented for various special reference types such as
5176/// `RefMut<[u8]>`.
5177pub unsafe trait ByteSliceMut: ByteSlice + DerefMut {
5178 /// Gets a mutable raw pointer to the first byte in the slice.
5179 #[inline]
5180 fn as_mut_ptr(&mut self) -> *mut u8 {
5181 <[u8]>::as_mut_ptr(self)
5182 }
5183}
5184
5185impl<'a> sealed::ByteSliceSealed for &'a [u8] {}
5186// TODO(#429): Add a "SAFETY" comment and remove this `allow`.
5187#[allow(clippy::undocumented_unsafe_blocks)]
5188unsafe impl<'a> ByteSlice for &'a [u8] {
5189 // SAFETY: If `&'b [u8]: 'a`, then the underlying memory is treated as
5190 // borrowed immutably for `'a` even if the slice itself is dropped.
5191 const INTO_REF_INTO_MUT_ARE_SOUND: bool = true;
5192
5193 #[inline]
5194 fn split_at(self, mid: usize) -> (Self, Self) {
5195 <[u8]>::split_at(self, mid)
5196 }
5197}
5198
5199impl<'a> sealed::ByteSliceSealed for &'a mut [u8] {}
5200// TODO(#429): Add a "SAFETY" comment and remove this `allow`.
5201#[allow(clippy::undocumented_unsafe_blocks)]
5202unsafe impl<'a> ByteSlice for &'a mut [u8] {
5203 // SAFETY: If `&'b mut [u8]: 'a`, then the underlying memory is treated as
5204 // borrowed mutably for `'a` even if the slice itself is dropped.
5205 const INTO_REF_INTO_MUT_ARE_SOUND: bool = true;
5206
5207 #[inline]
5208 fn split_at(self, mid: usize) -> (Self, Self) {
5209 <[u8]>::split_at_mut(self, mid)
5210 }
5211}
5212
5213impl<'a> sealed::ByteSliceSealed for cell::Ref<'a, [u8]> {}
5214// TODO(#429): Add a "SAFETY" comment and remove this `allow`.
5215#[allow(clippy::undocumented_unsafe_blocks)]
5216unsafe impl<'a> ByteSlice for cell::Ref<'a, [u8]> {
5217 const INTO_REF_INTO_MUT_ARE_SOUND: bool = if !cfg!(doc) {
5218 panic!("Ref::into_ref and Ref::into_mut are unsound when used with core::cell::Ref; see https://github.com/google/zerocopy/issues/716")
5219 } else {
5220 // When compiling documentation, allow the evaluation of this constant
5221 // to succeed. This doesn't represent a soundness hole - it just delays
5222 // any error to runtime. The reason we need this is that, otherwise,
5223 // `rustdoc` will fail when trying to document this item.
5224 false
5225 };
5226
5227 #[inline]
5228 fn split_at(self, mid: usize) -> (Self, Self) {
5229 cell::Ref::map_split(self, |slice: &[u8]| <[u8]>::split_at(self:slice, mid))
5230 }
5231}
5232
5233impl<'a> sealed::ByteSliceSealed for RefMut<'a, [u8]> {}
5234// TODO(#429): Add a "SAFETY" comment and remove this `allow`.
5235#[allow(clippy::undocumented_unsafe_blocks)]
5236unsafe impl<'a> ByteSlice for RefMut<'a, [u8]> {
5237 const INTO_REF_INTO_MUT_ARE_SOUND: bool = if !cfg!(doc) {
5238 panic!("Ref::into_ref and Ref::into_mut are unsound when used with core::cell::RefMut; see https://github.com/google/zerocopy/issues/716")
5239 } else {
5240 // When compiling documentation, allow the evaluation of this constant
5241 // to succeed. This doesn't represent a soundness hole - it just delays
5242 // any error to runtime. The reason we need this is that, otherwise,
5243 // `rustdoc` will fail when trying to document this item.
5244 false
5245 };
5246
5247 #[inline]
5248 fn split_at(self, mid: usize) -> (Self, Self) {
5249 RefMut::map_split(self, |slice: &mut [u8]| <[u8]>::split_at_mut(self:slice, mid))
5250 }
5251}
5252
5253// TODO(#429): Add a "SAFETY" comment and remove this `allow`.
5254#[allow(clippy::undocumented_unsafe_blocks)]
5255unsafe impl<'a> ByteSliceMut for &'a mut [u8] {}
5256
5257// TODO(#429): Add a "SAFETY" comment and remove this `allow`.
5258#[allow(clippy::undocumented_unsafe_blocks)]
5259unsafe impl<'a> ByteSliceMut for RefMut<'a, [u8]> {}
5260
5261#[cfg(feature = "alloc")]
5262#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
5263mod alloc_support {
5264 use alloc::vec::Vec;
5265
5266 use super::*;
5267
5268 /// Extends a `Vec<T>` by pushing `additional` new items onto the end of the
5269 /// vector. The new items are initialized with zeroes.
5270 ///
5271 /// # Panics
5272 ///
5273 /// Panics if `Vec::reserve(additional)` fails to reserve enough memory.
5274 #[inline(always)]
5275 pub fn extend_vec_zeroed<T: FromZeroes>(v: &mut Vec<T>, additional: usize) {
5276 insert_vec_zeroed(v, v.len(), additional);
5277 }
5278
5279 /// Inserts `additional` new items into `Vec<T>` at `position`.
5280 /// The new items are initialized with zeroes.
5281 ///
5282 /// # Panics
5283 ///
5284 /// * Panics if `position > v.len()`.
5285 /// * Panics if `Vec::reserve(additional)` fails to reserve enough memory.
5286 #[inline]
5287 pub fn insert_vec_zeroed<T: FromZeroes>(v: &mut Vec<T>, position: usize, additional: usize) {
5288 assert!(position <= v.len());
5289 v.reserve(additional);
5290 // SAFETY: The `reserve` call guarantees that these cannot overflow:
5291 // * `ptr.add(position)`
5292 // * `position + additional`
5293 // * `v.len() + additional`
5294 //
5295 // `v.len() - position` cannot overflow because we asserted that
5296 // `position <= v.len()`.
5297 unsafe {
5298 // This is a potentially overlapping copy.
5299 let ptr = v.as_mut_ptr();
5300 #[allow(clippy::arithmetic_side_effects)]
5301 ptr.add(position).copy_to(ptr.add(position + additional), v.len() - position);
5302 ptr.add(position).write_bytes(0, additional);
5303 #[allow(clippy::arithmetic_side_effects)]
5304 v.set_len(v.len() + additional);
5305 }
5306 }
5307
5308 #[cfg(test)]
5309 mod tests {
5310 use core::convert::TryFrom as _;
5311
5312 use super::*;
5313
5314 #[test]
5315 fn test_extend_vec_zeroed() {
5316 // Test extending when there is an existing allocation.
5317 let mut v = vec![100u64, 200, 300];
5318 extend_vec_zeroed(&mut v, 3);
5319 assert_eq!(v.len(), 6);
5320 assert_eq!(&*v, &[100, 200, 300, 0, 0, 0]);
5321 drop(v);
5322
5323 // Test extending when there is no existing allocation.
5324 let mut v: Vec<u64> = Vec::new();
5325 extend_vec_zeroed(&mut v, 3);
5326 assert_eq!(v.len(), 3);
5327 assert_eq!(&*v, &[0, 0, 0]);
5328 drop(v);
5329 }
5330
5331 #[test]
5332 fn test_extend_vec_zeroed_zst() {
5333 // Test extending when there is an existing (fake) allocation.
5334 let mut v = vec![(), (), ()];
5335 extend_vec_zeroed(&mut v, 3);
5336 assert_eq!(v.len(), 6);
5337 assert_eq!(&*v, &[(), (), (), (), (), ()]);
5338 drop(v);
5339
5340 // Test extending when there is no existing (fake) allocation.
5341 let mut v: Vec<()> = Vec::new();
5342 extend_vec_zeroed(&mut v, 3);
5343 assert_eq!(&*v, &[(), (), ()]);
5344 drop(v);
5345 }
5346
5347 #[test]
5348 fn test_insert_vec_zeroed() {
5349 // Insert at start (no existing allocation).
5350 let mut v: Vec<u64> = Vec::new();
5351 insert_vec_zeroed(&mut v, 0, 2);
5352 assert_eq!(v.len(), 2);
5353 assert_eq!(&*v, &[0, 0]);
5354 drop(v);
5355
5356 // Insert at start.
5357 let mut v = vec![100u64, 200, 300];
5358 insert_vec_zeroed(&mut v, 0, 2);
5359 assert_eq!(v.len(), 5);
5360 assert_eq!(&*v, &[0, 0, 100, 200, 300]);
5361 drop(v);
5362
5363 // Insert at middle.
5364 let mut v = vec![100u64, 200, 300];
5365 insert_vec_zeroed(&mut v, 1, 1);
5366 assert_eq!(v.len(), 4);
5367 assert_eq!(&*v, &[100, 0, 200, 300]);
5368 drop(v);
5369
5370 // Insert at end.
5371 let mut v = vec![100u64, 200, 300];
5372 insert_vec_zeroed(&mut v, 3, 1);
5373 assert_eq!(v.len(), 4);
5374 assert_eq!(&*v, &[100, 200, 300, 0]);
5375 drop(v);
5376 }
5377
5378 #[test]
5379 fn test_insert_vec_zeroed_zst() {
5380 // Insert at start (no existing fake allocation).
5381 let mut v: Vec<()> = Vec::new();
5382 insert_vec_zeroed(&mut v, 0, 2);
5383 assert_eq!(v.len(), 2);
5384 assert_eq!(&*v, &[(), ()]);
5385 drop(v);
5386
5387 // Insert at start.
5388 let mut v = vec![(), (), ()];
5389 insert_vec_zeroed(&mut v, 0, 2);
5390 assert_eq!(v.len(), 5);
5391 assert_eq!(&*v, &[(), (), (), (), ()]);
5392 drop(v);
5393
5394 // Insert at middle.
5395 let mut v = vec![(), (), ()];
5396 insert_vec_zeroed(&mut v, 1, 1);
5397 assert_eq!(v.len(), 4);
5398 assert_eq!(&*v, &[(), (), (), ()]);
5399 drop(v);
5400
5401 // Insert at end.
5402 let mut v = vec![(), (), ()];
5403 insert_vec_zeroed(&mut v, 3, 1);
5404 assert_eq!(v.len(), 4);
5405 assert_eq!(&*v, &[(), (), (), ()]);
5406 drop(v);
5407 }
5408
5409 #[test]
5410 fn test_new_box_zeroed() {
5411 assert_eq!(*u64::new_box_zeroed(), 0);
5412 }
5413
5414 #[test]
5415 fn test_new_box_zeroed_array() {
5416 drop(<[u32; 0x1000]>::new_box_zeroed());
5417 }
5418
5419 #[test]
5420 fn test_new_box_zeroed_zst() {
5421 // This test exists in order to exercise unsafe code, especially
5422 // when running under Miri.
5423 #[allow(clippy::unit_cmp)]
5424 {
5425 assert_eq!(*<()>::new_box_zeroed(), ());
5426 }
5427 }
5428
5429 #[test]
5430 fn test_new_box_slice_zeroed() {
5431 let mut s: Box<[u64]> = u64::new_box_slice_zeroed(3);
5432 assert_eq!(s.len(), 3);
5433 assert_eq!(&*s, &[0, 0, 0]);
5434 s[1] = 3;
5435 assert_eq!(&*s, &[0, 3, 0]);
5436 }
5437
5438 #[test]
5439 fn test_new_box_slice_zeroed_empty() {
5440 let s: Box<[u64]> = u64::new_box_slice_zeroed(0);
5441 assert_eq!(s.len(), 0);
5442 }
5443
5444 #[test]
5445 fn test_new_box_slice_zeroed_zst() {
5446 let mut s: Box<[()]> = <()>::new_box_slice_zeroed(3);
5447 assert_eq!(s.len(), 3);
5448 assert!(s.get(10).is_none());
5449 // This test exists in order to exercise unsafe code, especially
5450 // when running under Miri.
5451 #[allow(clippy::unit_cmp)]
5452 {
5453 assert_eq!(s[1], ());
5454 }
5455 s[2] = ();
5456 }
5457
5458 #[test]
5459 fn test_new_box_slice_zeroed_zst_empty() {
5460 let s: Box<[()]> = <()>::new_box_slice_zeroed(0);
5461 assert_eq!(s.len(), 0);
5462 }
5463
5464 #[test]
5465 #[should_panic(expected = "mem::size_of::<Self>() * len overflows `usize`")]
5466 fn test_new_box_slice_zeroed_panics_mul_overflow() {
5467 let _ = u16::new_box_slice_zeroed(usize::MAX);
5468 }
5469
5470 #[test]
5471 #[should_panic(expected = "assertion failed: size <= max_alloc")]
5472 fn test_new_box_slice_zeroed_panics_isize_overflow() {
5473 let max = usize::try_from(isize::MAX).unwrap();
5474 let _ = u16::new_box_slice_zeroed((max / mem::size_of::<u16>()) + 1);
5475 }
5476 }
5477}
5478
5479#[cfg(feature = "alloc")]
5480#[doc(inline)]
5481pub use alloc_support::*;
5482
5483#[cfg(test)]
5484mod tests {
5485 #![allow(clippy::unreadable_literal)]
5486
5487 use core::{cell::UnsafeCell, convert::TryInto as _, ops::Deref};
5488
5489 use static_assertions::assert_impl_all;
5490
5491 use super::*;
5492 use crate::util::testutil::*;
5493
5494 // An unsized type.
5495 //
5496 // This is used to test the custom derives of our traits. The `[u8]` type
5497 // gets a hand-rolled impl, so it doesn't exercise our custom derives.
5498 #[derive(Debug, Eq, PartialEq, FromZeroes, FromBytes, AsBytes, Unaligned)]
5499 #[repr(transparent)]
5500 struct Unsized([u8]);
5501
5502 impl Unsized {
5503 fn from_mut_slice(slc: &mut [u8]) -> &mut Unsized {
5504 // SAFETY: This *probably* sound - since the layouts of `[u8]` and
5505 // `Unsized` are the same, so are the layouts of `&mut [u8]` and
5506 // `&mut Unsized`. [1] Even if it turns out that this isn't actually
5507 // guaranteed by the language spec, we can just change this since
5508 // it's in test code.
5509 //
5510 // [1] https://github.com/rust-lang/unsafe-code-guidelines/issues/375
5511 unsafe { mem::transmute(slc) }
5512 }
5513 }
5514
5515 /// Tests of when a sized `DstLayout` is extended with a sized field.
5516 #[allow(clippy::decimal_literal_representation)]
5517 #[test]
5518 fn test_dst_layout_extend_sized_with_sized() {
5519 // This macro constructs a layout corresponding to a `u8` and extends it
5520 // with a zero-sized trailing field of given alignment `n`. The macro
5521 // tests that the resulting layout has both size and alignment `min(n,
5522 // P)` for all valid values of `repr(packed(P))`.
5523 macro_rules! test_align_is_size {
5524 ($n:expr) => {
5525 let base = DstLayout::for_type::<u8>();
5526 let trailing_field = DstLayout::for_type::<elain::Align<$n>>();
5527
5528 let packs =
5529 core::iter::once(None).chain((0..29).map(|p| NonZeroUsize::new(2usize.pow(p))));
5530
5531 for pack in packs {
5532 let composite = base.extend(trailing_field, pack);
5533 let max_align = pack.unwrap_or(DstLayout::CURRENT_MAX_ALIGN);
5534 let align = $n.min(max_align.get());
5535 assert_eq!(
5536 composite,
5537 DstLayout {
5538 align: NonZeroUsize::new(align).unwrap(),
5539 size_info: SizeInfo::Sized { _size: align }
5540 }
5541 )
5542 }
5543 };
5544 }
5545
5546 test_align_is_size!(1);
5547 test_align_is_size!(2);
5548 test_align_is_size!(4);
5549 test_align_is_size!(8);
5550 test_align_is_size!(16);
5551 test_align_is_size!(32);
5552 test_align_is_size!(64);
5553 test_align_is_size!(128);
5554 test_align_is_size!(256);
5555 test_align_is_size!(512);
5556 test_align_is_size!(1024);
5557 test_align_is_size!(2048);
5558 test_align_is_size!(4096);
5559 test_align_is_size!(8192);
5560 test_align_is_size!(16384);
5561 test_align_is_size!(32768);
5562 test_align_is_size!(65536);
5563 test_align_is_size!(131072);
5564 test_align_is_size!(262144);
5565 test_align_is_size!(524288);
5566 test_align_is_size!(1048576);
5567 test_align_is_size!(2097152);
5568 test_align_is_size!(4194304);
5569 test_align_is_size!(8388608);
5570 test_align_is_size!(16777216);
5571 test_align_is_size!(33554432);
5572 test_align_is_size!(67108864);
5573 test_align_is_size!(33554432);
5574 test_align_is_size!(134217728);
5575 test_align_is_size!(268435456);
5576 }
5577
5578 /// Tests of when a sized `DstLayout` is extended with a DST field.
5579 #[test]
5580 fn test_dst_layout_extend_sized_with_dst() {
5581 // Test that for all combinations of real-world alignments and
5582 // `repr_packed` values, that the extension of a sized `DstLayout`` with
5583 // a DST field correctly computes the trailing offset in the composite
5584 // layout.
5585
5586 let aligns = (0..29).map(|p| NonZeroUsize::new(2usize.pow(p)).unwrap());
5587 let packs = core::iter::once(None).chain(aligns.clone().map(Some));
5588
5589 for align in aligns {
5590 for pack in packs.clone() {
5591 let base = DstLayout::for_type::<u8>();
5592 let elem_size = 42;
5593 let trailing_field_offset = 11;
5594
5595 let trailing_field = DstLayout {
5596 align,
5597 size_info: SizeInfo::SliceDst(TrailingSliceLayout {
5598 _elem_size: elem_size,
5599 _offset: 11,
5600 }),
5601 };
5602
5603 let composite = base.extend(trailing_field, pack);
5604
5605 let max_align = pack.unwrap_or(DstLayout::CURRENT_MAX_ALIGN).get();
5606
5607 let align = align.get().min(max_align);
5608
5609 assert_eq!(
5610 composite,
5611 DstLayout {
5612 align: NonZeroUsize::new(align).unwrap(),
5613 size_info: SizeInfo::SliceDst(TrailingSliceLayout {
5614 _elem_size: elem_size,
5615 _offset: align + trailing_field_offset,
5616 }),
5617 }
5618 )
5619 }
5620 }
5621 }
5622
5623 /// Tests that calling `pad_to_align` on a sized `DstLayout` adds the
5624 /// expected amount of trailing padding.
5625 #[test]
5626 fn test_dst_layout_pad_to_align_with_sized() {
5627 // For all valid alignments `align`, construct a one-byte layout aligned
5628 // to `align`, call `pad_to_align`, and assert that the size of the
5629 // resulting layout is equal to `align`.
5630 for align in (0..29).map(|p| NonZeroUsize::new(2usize.pow(p)).unwrap()) {
5631 let layout = DstLayout { align, size_info: SizeInfo::Sized { _size: 1 } };
5632
5633 assert_eq!(
5634 layout.pad_to_align(),
5635 DstLayout { align, size_info: SizeInfo::Sized { _size: align.get() } }
5636 );
5637 }
5638
5639 // Test explicitly-provided combinations of unpadded and padded
5640 // counterparts.
5641
5642 macro_rules! test {
5643 (unpadded { size: $unpadded_size:expr, align: $unpadded_align:expr }
5644 => padded { size: $padded_size:expr, align: $padded_align:expr }) => {
5645 let unpadded = DstLayout {
5646 align: NonZeroUsize::new($unpadded_align).unwrap(),
5647 size_info: SizeInfo::Sized { _size: $unpadded_size },
5648 };
5649 let padded = unpadded.pad_to_align();
5650
5651 assert_eq!(
5652 padded,
5653 DstLayout {
5654 align: NonZeroUsize::new($padded_align).unwrap(),
5655 size_info: SizeInfo::Sized { _size: $padded_size },
5656 }
5657 );
5658 };
5659 }
5660
5661 test!(unpadded { size: 0, align: 4 } => padded { size: 0, align: 4 });
5662 test!(unpadded { size: 1, align: 4 } => padded { size: 4, align: 4 });
5663 test!(unpadded { size: 2, align: 4 } => padded { size: 4, align: 4 });
5664 test!(unpadded { size: 3, align: 4 } => padded { size: 4, align: 4 });
5665 test!(unpadded { size: 4, align: 4 } => padded { size: 4, align: 4 });
5666 test!(unpadded { size: 5, align: 4 } => padded { size: 8, align: 4 });
5667 test!(unpadded { size: 6, align: 4 } => padded { size: 8, align: 4 });
5668 test!(unpadded { size: 7, align: 4 } => padded { size: 8, align: 4 });
5669 test!(unpadded { size: 8, align: 4 } => padded { size: 8, align: 4 });
5670
5671 let current_max_align = DstLayout::CURRENT_MAX_ALIGN.get();
5672
5673 test!(unpadded { size: 1, align: current_max_align }
5674 => padded { size: current_max_align, align: current_max_align });
5675
5676 test!(unpadded { size: current_max_align + 1, align: current_max_align }
5677 => padded { size: current_max_align * 2, align: current_max_align });
5678 }
5679
5680 /// Tests that calling `pad_to_align` on a DST `DstLayout` is a no-op.
5681 #[test]
5682 fn test_dst_layout_pad_to_align_with_dst() {
5683 for align in (0..29).map(|p| NonZeroUsize::new(2usize.pow(p)).unwrap()) {
5684 for offset in 0..10 {
5685 for elem_size in 0..10 {
5686 let layout = DstLayout {
5687 align,
5688 size_info: SizeInfo::SliceDst(TrailingSliceLayout {
5689 _offset: offset,
5690 _elem_size: elem_size,
5691 }),
5692 };
5693 assert_eq!(layout.pad_to_align(), layout);
5694 }
5695 }
5696 }
5697 }
5698
5699 // This test takes a long time when running under Miri, so we skip it in
5700 // that case. This is acceptable because this is a logic test that doesn't
5701 // attempt to expose UB.
5702 #[test]
5703 #[cfg_attr(miri, ignore)]
5704 fn testvalidate_cast_and_convert_metadata() {
5705 impl From<usize> for SizeInfo {
5706 fn from(_size: usize) -> SizeInfo {
5707 SizeInfo::Sized { _size }
5708 }
5709 }
5710
5711 impl From<(usize, usize)> for SizeInfo {
5712 fn from((_offset, _elem_size): (usize, usize)) -> SizeInfo {
5713 SizeInfo::SliceDst(TrailingSliceLayout { _offset, _elem_size })
5714 }
5715 }
5716
5717 fn layout<S: Into<SizeInfo>>(s: S, align: usize) -> DstLayout {
5718 DstLayout { size_info: s.into(), align: NonZeroUsize::new(align).unwrap() }
5719 }
5720
5721 /// This macro accepts arguments in the form of:
5722 ///
5723 /// layout(_, _, _).validate(_, _, _), Ok(Some((_, _)))
5724 /// | | | | | | | |
5725 /// base_size ----+ | | | | | | |
5726 /// align -----------+ | | | | | |
5727 /// trailing_size ------+ | | | | |
5728 /// addr ---------------------------+ | | | |
5729 /// bytes_len -------------------------+ | | |
5730 /// cast_type ----------------------------+ | |
5731 /// elems ---------------------------------------------+ |
5732 /// split_at ---------------------------------------------+
5733 ///
5734 /// `.validate` is shorthand for `.validate_cast_and_convert_metadata`
5735 /// for brevity.
5736 ///
5737 /// Each argument can either be an iterator or a wildcard. Each
5738 /// wildcarded variable is implicitly replaced by an iterator over a
5739 /// representative sample of values for that variable. Each `test!`
5740 /// invocation iterates over every combination of values provided by
5741 /// each variable's iterator (ie, the cartesian product) and validates
5742 /// that the results are expected.
5743 ///
5744 /// The final argument uses the same syntax, but it has a different
5745 /// meaning:
5746 /// - If it is `Ok(pat)`, then the pattern `pat` is supplied to
5747 /// `assert_matches!` to validate the computed result for each
5748 /// combination of input values.
5749 /// - If it is `Err(msg)`, then `test!` validates that the call to
5750 /// `validate_cast_and_convert_metadata` panics with the given panic
5751 /// message.
5752 ///
5753 /// Note that the meta-variables that match these variables have the
5754 /// `tt` type, and some valid expressions are not valid `tt`s (such as
5755 /// `a..b`). In this case, wrap the expression in parentheses, and it
5756 /// will become valid `tt`.
5757 macro_rules! test {
5758 ($(:$sizes:expr =>)?
5759 layout($size:tt, $align:tt)
5760 .validate($addr:tt, $bytes_len:tt, $cast_type:tt), $expect:pat $(,)?
5761 ) => {
5762 itertools::iproduct!(
5763 test!(@generate_size $size),
5764 test!(@generate_align $align),
5765 test!(@generate_usize $addr),
5766 test!(@generate_usize $bytes_len),
5767 test!(@generate_cast_type $cast_type)
5768 ).for_each(|(size_info, align, addr, bytes_len, cast_type)| {
5769 // Temporarily disable the panic hook installed by the test
5770 // harness. If we don't do this, all panic messages will be
5771 // kept in an internal log. On its own, this isn't a
5772 // problem, but if a non-caught panic ever happens (ie, in
5773 // code later in this test not in this macro), all of the
5774 // previously-buffered messages will be dumped, hiding the
5775 // real culprit.
5776 let previous_hook = std::panic::take_hook();
5777 // I don't understand why, but this seems to be required in
5778 // addition to the previous line.
5779 std::panic::set_hook(Box::new(|_| {}));
5780 let actual = std::panic::catch_unwind(|| {
5781 layout(size_info, align).validate_cast_and_convert_metadata(addr, bytes_len, cast_type)
5782 }).map_err(|d| {
5783 *d.downcast::<&'static str>().expect("expected string panic message").as_ref()
5784 });
5785 std::panic::set_hook(previous_hook);
5786
5787 assert_matches::assert_matches!(
5788 actual, $expect,
5789 "layout({size_info:?}, {align}).validate_cast_and_convert_metadata({addr}, {bytes_len}, {cast_type:?})",
5790 );
5791 });
5792 };
5793 (@generate_usize _) => { 0..8 };
5794 // Generate sizes for both Sized and !Sized types.
5795 (@generate_size _) => {
5796 test!(@generate_size (_)).chain(test!(@generate_size (_, _)))
5797 };
5798 // Generate sizes for both Sized and !Sized types by chaining
5799 // specified iterators for each.
5800 (@generate_size ($sized_sizes:tt | $unsized_sizes:tt)) => {
5801 test!(@generate_size ($sized_sizes)).chain(test!(@generate_size $unsized_sizes))
5802 };
5803 // Generate sizes for Sized types.
5804 (@generate_size (_)) => { test!(@generate_size (0..8)) };
5805 (@generate_size ($sizes:expr)) => { $sizes.into_iter().map(Into::<SizeInfo>::into) };
5806 // Generate sizes for !Sized types.
5807 (@generate_size ($min_sizes:tt, $elem_sizes:tt)) => {
5808 itertools::iproduct!(
5809 test!(@generate_min_size $min_sizes),
5810 test!(@generate_elem_size $elem_sizes)
5811 ).map(Into::<SizeInfo>::into)
5812 };
5813 (@generate_fixed_size _) => { (0..8).into_iter().map(Into::<SizeInfo>::into) };
5814 (@generate_min_size _) => { 0..8 };
5815 (@generate_elem_size _) => { 1..8 };
5816 (@generate_align _) => { [1, 2, 4, 8, 16] };
5817 (@generate_opt_usize _) => { [None].into_iter().chain((0..8).map(Some).into_iter()) };
5818 (@generate_cast_type _) => { [_CastType::_Prefix, _CastType::_Suffix] };
5819 (@generate_cast_type $variant:ident) => { [_CastType::$variant] };
5820 // Some expressions need to be wrapped in parentheses in order to be
5821 // valid `tt`s (required by the top match pattern). See the comment
5822 // below for more details. This arm removes these parentheses to
5823 // avoid generating an `unused_parens` warning.
5824 (@$_:ident ($vals:expr)) => { $vals };
5825 (@$_:ident $vals:expr) => { $vals };
5826 }
5827
5828 const EVENS: [usize; 8] = [0, 2, 4, 6, 8, 10, 12, 14];
5829 const ODDS: [usize; 8] = [1, 3, 5, 7, 9, 11, 13, 15];
5830
5831 // base_size is too big for the memory region.
5832 test!(layout(((1..8) | ((1..8), (1..8))), _).validate(_, [0], _), Ok(None));
5833 test!(layout(((2..8) | ((2..8), (2..8))), _).validate(_, [1], _), Ok(None));
5834
5835 // addr is unaligned for prefix cast
5836 test!(layout(_, [2]).validate(ODDS, _, _Prefix), Ok(None));
5837 test!(layout(_, [2]).validate(ODDS, _, _Prefix), Ok(None));
5838
5839 // addr is aligned, but end of buffer is unaligned for suffix cast
5840 test!(layout(_, [2]).validate(EVENS, ODDS, _Suffix), Ok(None));
5841 test!(layout(_, [2]).validate(EVENS, ODDS, _Suffix), Ok(None));
5842
5843 // Unfortunately, these constants cannot easily be used in the
5844 // implementation of `validate_cast_and_convert_metadata`, since
5845 // `panic!` consumes a string literal, not an expression.
5846 //
5847 // It's important that these messages be in a separate module. If they
5848 // were at the function's top level, we'd pass them to `test!` as, e.g.,
5849 // `Err(TRAILING)`, which would run into a subtle Rust footgun - the
5850 // `TRAILING` identifier would be treated as a pattern to match rather
5851 // than a value to check for equality.
5852 mod msgs {
5853 pub(super) const TRAILING: &str =
5854 "attempted to cast to slice type with zero-sized element";
5855 pub(super) const OVERFLOW: &str = "`addr` + `bytes_len` > usize::MAX";
5856 }
5857
5858 // casts with ZST trailing element types are unsupported
5859 test!(layout((_, [0]), _).validate(_, _, _), Err(msgs::TRAILING),);
5860
5861 // addr + bytes_len must not overflow usize
5862 test!(layout(_, _).validate([usize::MAX], (1..100), _), Err(msgs::OVERFLOW));
5863 test!(layout(_, _).validate((1..100), [usize::MAX], _), Err(msgs::OVERFLOW));
5864 test!(
5865 layout(_, _).validate(
5866 [usize::MAX / 2 + 1, usize::MAX],
5867 [usize::MAX / 2 + 1, usize::MAX],
5868 _
5869 ),
5870 Err(msgs::OVERFLOW)
5871 );
5872
5873 // Validates that `validate_cast_and_convert_metadata` satisfies its own
5874 // documented safety postconditions, and also a few other properties
5875 // that aren't documented but we want to guarantee anyway.
5876 fn validate_behavior(
5877 (layout, addr, bytes_len, cast_type): (DstLayout, usize, usize, _CastType),
5878 ) {
5879 if let Some((elems, split_at)) =
5880 layout.validate_cast_and_convert_metadata(addr, bytes_len, cast_type)
5881 {
5882 let (size_info, align) = (layout.size_info, layout.align);
5883 let debug_str = format!(
5884 "layout({size_info:?}, {align}).validate_cast_and_convert_metadata({addr}, {bytes_len}, {cast_type:?}) => ({elems}, {split_at})",
5885 );
5886
5887 // If this is a sized type (no trailing slice), then `elems` is
5888 // meaningless, but in practice we set it to 0. Callers are not
5889 // allowed to rely on this, but a lot of math is nicer if
5890 // they're able to, and some callers might accidentally do that.
5891 let sized = matches!(layout.size_info, SizeInfo::Sized { .. });
5892 assert!(!(sized && elems != 0), "{}", debug_str);
5893
5894 let resulting_size = match layout.size_info {
5895 SizeInfo::Sized { _size } => _size,
5896 SizeInfo::SliceDst(TrailingSliceLayout {
5897 _offset: offset,
5898 _elem_size: elem_size,
5899 }) => {
5900 let padded_size = |elems| {
5901 let without_padding = offset + elems * elem_size;
5902 without_padding
5903 + util::core_layout::padding_needed_for(without_padding, align)
5904 };
5905
5906 let resulting_size = padded_size(elems);
5907 // Test that `validate_cast_and_convert_metadata`
5908 // computed the largest possible value that fits in the
5909 // given range.
5910 assert!(padded_size(elems + 1) > bytes_len, "{}", debug_str);
5911 resulting_size
5912 }
5913 };
5914
5915 // Test safety postconditions guaranteed by
5916 // `validate_cast_and_convert_metadata`.
5917 assert!(resulting_size <= bytes_len, "{}", debug_str);
5918 match cast_type {
5919 _CastType::_Prefix => {
5920 assert_eq!(addr % align, 0, "{}", debug_str);
5921 assert_eq!(resulting_size, split_at, "{}", debug_str);
5922 }
5923 _CastType::_Suffix => {
5924 assert_eq!(split_at, bytes_len - resulting_size, "{}", debug_str);
5925 assert_eq!((addr + split_at) % align, 0, "{}", debug_str);
5926 }
5927 }
5928 } else {
5929 let min_size = match layout.size_info {
5930 SizeInfo::Sized { _size } => _size,
5931 SizeInfo::SliceDst(TrailingSliceLayout { _offset, .. }) => {
5932 _offset + util::core_layout::padding_needed_for(_offset, layout.align)
5933 }
5934 };
5935
5936 // If a cast is invalid, it is either because...
5937 // 1. there are insufficent bytes at the given region for type:
5938 let insufficient_bytes = bytes_len < min_size;
5939 // 2. performing the cast would misalign type:
5940 let base = match cast_type {
5941 _CastType::_Prefix => 0,
5942 _CastType::_Suffix => bytes_len,
5943 };
5944 let misaligned = (base + addr) % layout.align != 0;
5945
5946 assert!(insufficient_bytes || misaligned);
5947 }
5948 }
5949
5950 let sizes = 0..8;
5951 let elem_sizes = 1..8;
5952 let size_infos = sizes
5953 .clone()
5954 .map(Into::<SizeInfo>::into)
5955 .chain(itertools::iproduct!(sizes, elem_sizes).map(Into::<SizeInfo>::into));
5956 let layouts = itertools::iproduct!(size_infos, [1, 2, 4, 8, 16, 32])
5957 .filter(|(size_info, align)| !matches!(size_info, SizeInfo::Sized { _size } if _size % align != 0))
5958 .map(|(size_info, align)| layout(size_info, align));
5959 itertools::iproduct!(layouts, 0..8, 0..8, [_CastType::_Prefix, _CastType::_Suffix])
5960 .for_each(validate_behavior);
5961 }
5962
5963 #[test]
5964 #[cfg(__INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS)]
5965 fn test_validate_rust_layout() {
5966 use core::ptr::NonNull;
5967
5968 // This test synthesizes pointers with various metadata and uses Rust's
5969 // built-in APIs to confirm that Rust makes decisions about type layout
5970 // which are consistent with what we believe is guaranteed by the
5971 // language. If this test fails, it doesn't just mean our code is wrong
5972 // - it means we're misunderstanding the language's guarantees.
5973
5974 #[derive(Debug)]
5975 struct MacroArgs {
5976 offset: usize,
5977 align: NonZeroUsize,
5978 elem_size: Option<usize>,
5979 }
5980
5981 /// # Safety
5982 ///
5983 /// `test` promises to only call `addr_of_slice_field` on a `NonNull<T>`
5984 /// which points to a valid `T`.
5985 ///
5986 /// `with_elems` must produce a pointer which points to a valid `T`.
5987 fn test<T: ?Sized, W: Fn(usize) -> NonNull<T>>(
5988 args: MacroArgs,
5989 with_elems: W,
5990 addr_of_slice_field: Option<fn(NonNull<T>) -> NonNull<u8>>,
5991 ) {
5992 let dst = args.elem_size.is_some();
5993 let layout = {
5994 let size_info = match args.elem_size {
5995 Some(elem_size) => SizeInfo::SliceDst(TrailingSliceLayout {
5996 _offset: args.offset,
5997 _elem_size: elem_size,
5998 }),
5999 None => SizeInfo::Sized {
6000 // Rust only supports types whose sizes are a multiple
6001 // of their alignment. If the macro created a type like
6002 // this:
6003 //
6004 // #[repr(C, align(2))]
6005 // struct Foo([u8; 1]);
6006 //
6007 // ...then Rust will automatically round the type's size
6008 // up to 2.
6009 _size: args.offset
6010 + util::core_layout::padding_needed_for(args.offset, args.align),
6011 },
6012 };
6013 DstLayout { size_info, align: args.align }
6014 };
6015
6016 for elems in 0..128 {
6017 let ptr = with_elems(elems);
6018
6019 if let Some(addr_of_slice_field) = addr_of_slice_field {
6020 let slc_field_ptr = addr_of_slice_field(ptr).as_ptr();
6021 // SAFETY: Both `slc_field_ptr` and `ptr` are pointers to
6022 // the same valid Rust object.
6023 let offset: usize =
6024 unsafe { slc_field_ptr.byte_offset_from(ptr.as_ptr()).try_into().unwrap() };
6025 assert_eq!(offset, args.offset);
6026 }
6027
6028 // SAFETY: `ptr` points to a valid `T`.
6029 let (size, align) = unsafe {
6030 (mem::size_of_val_raw(ptr.as_ptr()), mem::align_of_val_raw(ptr.as_ptr()))
6031 };
6032
6033 // Avoid expensive allocation when running under Miri.
6034 let assert_msg = if !cfg!(miri) {
6035 format!("\n{args:?}\nsize:{size}, align:{align}")
6036 } else {
6037 String::new()
6038 };
6039
6040 let without_padding =
6041 args.offset + args.elem_size.map(|elem_size| elems * elem_size).unwrap_or(0);
6042 assert!(size >= without_padding, "{}", assert_msg);
6043 assert_eq!(align, args.align.get(), "{}", assert_msg);
6044
6045 // This encodes the most important part of the test: our
6046 // understanding of how Rust determines the layout of repr(C)
6047 // types. Sized repr(C) types are trivial, but DST types have
6048 // some subtlety. Note that:
6049 // - For sized types, `without_padding` is just the size of the
6050 // type that we constructed for `Foo`. Since we may have
6051 // requested a larger alignment, `Foo` may actually be larger
6052 // than this, hence `padding_needed_for`.
6053 // - For unsized types, `without_padding` is dynamically
6054 // computed from the offset, the element size, and element
6055 // count. We expect that the size of the object should be
6056 // `offset + elem_size * elems` rounded up to the next
6057 // alignment.
6058 let expected_size = without_padding
6059 + util::core_layout::padding_needed_for(without_padding, args.align);
6060 assert_eq!(expected_size, size, "{}", assert_msg);
6061
6062 // For zero-sized element types,
6063 // `validate_cast_and_convert_metadata` just panics, so we skip
6064 // testing those types.
6065 if args.elem_size.map(|elem_size| elem_size > 0).unwrap_or(true) {
6066 let addr = ptr.addr().get();
6067 let (got_elems, got_split_at) = layout
6068 .validate_cast_and_convert_metadata(addr, size, _CastType::_Prefix)
6069 .unwrap();
6070 // Avoid expensive allocation when running under Miri.
6071 let assert_msg = if !cfg!(miri) {
6072 format!(
6073 "{}\nvalidate_cast_and_convert_metadata({addr}, {size})",
6074 assert_msg
6075 )
6076 } else {
6077 String::new()
6078 };
6079 assert_eq!(got_split_at, size, "{}", assert_msg);
6080 if dst {
6081 assert!(got_elems >= elems, "{}", assert_msg);
6082 if got_elems != elems {
6083 // If `validate_cast_and_convert_metadata`
6084 // returned more elements than `elems`, that
6085 // means that `elems` is not the maximum number
6086 // of elements that can fit in `size` - in other
6087 // words, there is enough padding at the end of
6088 // the value to fit at least one more element.
6089 // If we use this metadata to synthesize a
6090 // pointer, despite having a different element
6091 // count, we still expect it to have the same
6092 // size.
6093 let got_ptr = with_elems(got_elems);
6094 // SAFETY: `got_ptr` is a pointer to a valid `T`.
6095 let size_of_got_ptr = unsafe { mem::size_of_val_raw(got_ptr.as_ptr()) };
6096 assert_eq!(size_of_got_ptr, size, "{}", assert_msg);
6097 }
6098 } else {
6099 // For sized casts, the returned element value is
6100 // technically meaningless, and we don't guarantee any
6101 // particular value. In practice, it's always zero.
6102 assert_eq!(got_elems, 0, "{}", assert_msg)
6103 }
6104 }
6105 }
6106 }
6107
6108 macro_rules! validate_against_rust {
6109 ($offset:literal, $align:literal $(, $elem_size:literal)?) => {{
6110 #[repr(C, align($align))]
6111 struct Foo([u8; $offset]$(, [[u8; $elem_size]])?);
6112
6113 let args = MacroArgs {
6114 offset: $offset,
6115 align: $align.try_into().unwrap(),
6116 elem_size: {
6117 #[allow(unused)]
6118 let ret = None::<usize>;
6119 $(let ret = Some($elem_size);)?
6120 ret
6121 }
6122 };
6123
6124 #[repr(C, align($align))]
6125 struct FooAlign;
6126 // Create an aligned buffer to use in order to synthesize
6127 // pointers to `Foo`. We don't ever load values from these
6128 // pointers - we just do arithmetic on them - so having a "real"
6129 // block of memory as opposed to a validly-aligned-but-dangling
6130 // pointer is only necessary to make Miri happy since we run it
6131 // with "strict provenance" checking enabled.
6132 let aligned_buf = Align::<_, FooAlign>::new([0u8; 1024]);
6133 let with_elems = |elems| {
6134 let slc = NonNull::slice_from_raw_parts(NonNull::from(&aligned_buf.t), elems);
6135 #[allow(clippy::as_conversions)]
6136 NonNull::new(slc.as_ptr() as *mut Foo).unwrap()
6137 };
6138 let addr_of_slice_field = {
6139 #[allow(unused)]
6140 let f = None::<fn(NonNull<Foo>) -> NonNull<u8>>;
6141 $(
6142 // SAFETY: `test` promises to only call `f` with a `ptr`
6143 // to a valid `Foo`.
6144 let f: Option<fn(NonNull<Foo>) -> NonNull<u8>> = Some(|ptr: NonNull<Foo>| unsafe {
6145 NonNull::new(ptr::addr_of_mut!((*ptr.as_ptr()).1)).unwrap().cast::<u8>()
6146 });
6147 let _ = $elem_size;
6148 )?
6149 f
6150 };
6151
6152 test::<Foo, _>(args, with_elems, addr_of_slice_field);
6153 }};
6154 }
6155
6156 // Every permutation of:
6157 // - offset in [0, 4]
6158 // - align in [1, 16]
6159 // - elem_size in [0, 4] (plus no elem_size)
6160 validate_against_rust!(0, 1);
6161 validate_against_rust!(0, 1, 0);
6162 validate_against_rust!(0, 1, 1);
6163 validate_against_rust!(0, 1, 2);
6164 validate_against_rust!(0, 1, 3);
6165 validate_against_rust!(0, 1, 4);
6166 validate_against_rust!(0, 2);
6167 validate_against_rust!(0, 2, 0);
6168 validate_against_rust!(0, 2, 1);
6169 validate_against_rust!(0, 2, 2);
6170 validate_against_rust!(0, 2, 3);
6171 validate_against_rust!(0, 2, 4);
6172 validate_against_rust!(0, 4);
6173 validate_against_rust!(0, 4, 0);
6174 validate_against_rust!(0, 4, 1);
6175 validate_against_rust!(0, 4, 2);
6176 validate_against_rust!(0, 4, 3);
6177 validate_against_rust!(0, 4, 4);
6178 validate_against_rust!(0, 8);
6179 validate_against_rust!(0, 8, 0);
6180 validate_against_rust!(0, 8, 1);
6181 validate_against_rust!(0, 8, 2);
6182 validate_against_rust!(0, 8, 3);
6183 validate_against_rust!(0, 8, 4);
6184 validate_against_rust!(0, 16);
6185 validate_against_rust!(0, 16, 0);
6186 validate_against_rust!(0, 16, 1);
6187 validate_against_rust!(0, 16, 2);
6188 validate_against_rust!(0, 16, 3);
6189 validate_against_rust!(0, 16, 4);
6190 validate_against_rust!(1, 1);
6191 validate_against_rust!(1, 1, 0);
6192 validate_against_rust!(1, 1, 1);
6193 validate_against_rust!(1, 1, 2);
6194 validate_against_rust!(1, 1, 3);
6195 validate_against_rust!(1, 1, 4);
6196 validate_against_rust!(1, 2);
6197 validate_against_rust!(1, 2, 0);
6198 validate_against_rust!(1, 2, 1);
6199 validate_against_rust!(1, 2, 2);
6200 validate_against_rust!(1, 2, 3);
6201 validate_against_rust!(1, 2, 4);
6202 validate_against_rust!(1, 4);
6203 validate_against_rust!(1, 4, 0);
6204 validate_against_rust!(1, 4, 1);
6205 validate_against_rust!(1, 4, 2);
6206 validate_against_rust!(1, 4, 3);
6207 validate_against_rust!(1, 4, 4);
6208 validate_against_rust!(1, 8);
6209 validate_against_rust!(1, 8, 0);
6210 validate_against_rust!(1, 8, 1);
6211 validate_against_rust!(1, 8, 2);
6212 validate_against_rust!(1, 8, 3);
6213 validate_against_rust!(1, 8, 4);
6214 validate_against_rust!(1, 16);
6215 validate_against_rust!(1, 16, 0);
6216 validate_against_rust!(1, 16, 1);
6217 validate_against_rust!(1, 16, 2);
6218 validate_against_rust!(1, 16, 3);
6219 validate_against_rust!(1, 16, 4);
6220 validate_against_rust!(2, 1);
6221 validate_against_rust!(2, 1, 0);
6222 validate_against_rust!(2, 1, 1);
6223 validate_against_rust!(2, 1, 2);
6224 validate_against_rust!(2, 1, 3);
6225 validate_against_rust!(2, 1, 4);
6226 validate_against_rust!(2, 2);
6227 validate_against_rust!(2, 2, 0);
6228 validate_against_rust!(2, 2, 1);
6229 validate_against_rust!(2, 2, 2);
6230 validate_against_rust!(2, 2, 3);
6231 validate_against_rust!(2, 2, 4);
6232 validate_against_rust!(2, 4);
6233 validate_against_rust!(2, 4, 0);
6234 validate_against_rust!(2, 4, 1);
6235 validate_against_rust!(2, 4, 2);
6236 validate_against_rust!(2, 4, 3);
6237 validate_against_rust!(2, 4, 4);
6238 validate_against_rust!(2, 8);
6239 validate_against_rust!(2, 8, 0);
6240 validate_against_rust!(2, 8, 1);
6241 validate_against_rust!(2, 8, 2);
6242 validate_against_rust!(2, 8, 3);
6243 validate_against_rust!(2, 8, 4);
6244 validate_against_rust!(2, 16);
6245 validate_against_rust!(2, 16, 0);
6246 validate_against_rust!(2, 16, 1);
6247 validate_against_rust!(2, 16, 2);
6248 validate_against_rust!(2, 16, 3);
6249 validate_against_rust!(2, 16, 4);
6250 validate_against_rust!(3, 1);
6251 validate_against_rust!(3, 1, 0);
6252 validate_against_rust!(3, 1, 1);
6253 validate_against_rust!(3, 1, 2);
6254 validate_against_rust!(3, 1, 3);
6255 validate_against_rust!(3, 1, 4);
6256 validate_against_rust!(3, 2);
6257 validate_against_rust!(3, 2, 0);
6258 validate_against_rust!(3, 2, 1);
6259 validate_against_rust!(3, 2, 2);
6260 validate_against_rust!(3, 2, 3);
6261 validate_against_rust!(3, 2, 4);
6262 validate_against_rust!(3, 4);
6263 validate_against_rust!(3, 4, 0);
6264 validate_against_rust!(3, 4, 1);
6265 validate_against_rust!(3, 4, 2);
6266 validate_against_rust!(3, 4, 3);
6267 validate_against_rust!(3, 4, 4);
6268 validate_against_rust!(3, 8);
6269 validate_against_rust!(3, 8, 0);
6270 validate_against_rust!(3, 8, 1);
6271 validate_against_rust!(3, 8, 2);
6272 validate_against_rust!(3, 8, 3);
6273 validate_against_rust!(3, 8, 4);
6274 validate_against_rust!(3, 16);
6275 validate_against_rust!(3, 16, 0);
6276 validate_against_rust!(3, 16, 1);
6277 validate_against_rust!(3, 16, 2);
6278 validate_against_rust!(3, 16, 3);
6279 validate_against_rust!(3, 16, 4);
6280 validate_against_rust!(4, 1);
6281 validate_against_rust!(4, 1, 0);
6282 validate_against_rust!(4, 1, 1);
6283 validate_against_rust!(4, 1, 2);
6284 validate_against_rust!(4, 1, 3);
6285 validate_against_rust!(4, 1, 4);
6286 validate_against_rust!(4, 2);
6287 validate_against_rust!(4, 2, 0);
6288 validate_against_rust!(4, 2, 1);
6289 validate_against_rust!(4, 2, 2);
6290 validate_against_rust!(4, 2, 3);
6291 validate_against_rust!(4, 2, 4);
6292 validate_against_rust!(4, 4);
6293 validate_against_rust!(4, 4, 0);
6294 validate_against_rust!(4, 4, 1);
6295 validate_against_rust!(4, 4, 2);
6296 validate_against_rust!(4, 4, 3);
6297 validate_against_rust!(4, 4, 4);
6298 validate_against_rust!(4, 8);
6299 validate_against_rust!(4, 8, 0);
6300 validate_against_rust!(4, 8, 1);
6301 validate_against_rust!(4, 8, 2);
6302 validate_against_rust!(4, 8, 3);
6303 validate_against_rust!(4, 8, 4);
6304 validate_against_rust!(4, 16);
6305 validate_against_rust!(4, 16, 0);
6306 validate_against_rust!(4, 16, 1);
6307 validate_against_rust!(4, 16, 2);
6308 validate_against_rust!(4, 16, 3);
6309 validate_against_rust!(4, 16, 4);
6310 }
6311
6312 #[test]
6313 fn test_known_layout() {
6314 // Test that `$ty` and `ManuallyDrop<$ty>` have the expected layout.
6315 // Test that `PhantomData<$ty>` has the same layout as `()` regardless
6316 // of `$ty`.
6317 macro_rules! test {
6318 ($ty:ty, $expect:expr) => {
6319 let expect = $expect;
6320 assert_eq!(<$ty as KnownLayout>::LAYOUT, expect);
6321 assert_eq!(<ManuallyDrop<$ty> as KnownLayout>::LAYOUT, expect);
6322 assert_eq!(<PhantomData<$ty> as KnownLayout>::LAYOUT, <() as KnownLayout>::LAYOUT);
6323 };
6324 }
6325
6326 let layout = |offset, align, _trailing_slice_elem_size| DstLayout {
6327 align: NonZeroUsize::new(align).unwrap(),
6328 size_info: match _trailing_slice_elem_size {
6329 None => SizeInfo::Sized { _size: offset },
6330 Some(elem_size) => SizeInfo::SliceDst(TrailingSliceLayout {
6331 _offset: offset,
6332 _elem_size: elem_size,
6333 }),
6334 },
6335 };
6336
6337 test!((), layout(0, 1, None));
6338 test!(u8, layout(1, 1, None));
6339 // Use `align_of` because `u64` alignment may be smaller than 8 on some
6340 // platforms.
6341 test!(u64, layout(8, mem::align_of::<u64>(), None));
6342 test!(AU64, layout(8, 8, None));
6343
6344 test!(Option<&'static ()>, usize::LAYOUT);
6345
6346 test!([()], layout(0, 1, Some(0)));
6347 test!([u8], layout(0, 1, Some(1)));
6348 test!(str, layout(0, 1, Some(1)));
6349 }
6350
6351 #[cfg(feature = "derive")]
6352 #[test]
6353 fn test_known_layout_derive() {
6354 // In this and other files (`late_compile_pass.rs`,
6355 // `mid_compile_pass.rs`, and `struct.rs`), we test success and failure
6356 // modes of `derive(KnownLayout)` for the following combination of
6357 // properties:
6358 //
6359 // +------------+--------------------------------------+-----------+
6360 // | | trailing field properties | |
6361 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6362 // |------------+----------+----------------+----------+-----------|
6363 // | N | N | N | N | KL00 |
6364 // | N | N | N | Y | KL01 |
6365 // | N | N | Y | N | KL02 |
6366 // | N | N | Y | Y | KL03 |
6367 // | N | Y | N | N | KL04 |
6368 // | N | Y | N | Y | KL05 |
6369 // | N | Y | Y | N | KL06 |
6370 // | N | Y | Y | Y | KL07 |
6371 // | Y | N | N | N | KL08 |
6372 // | Y | N | N | Y | KL09 |
6373 // | Y | N | Y | N | KL10 |
6374 // | Y | N | Y | Y | KL11 |
6375 // | Y | Y | N | N | KL12 |
6376 // | Y | Y | N | Y | KL13 |
6377 // | Y | Y | Y | N | KL14 |
6378 // | Y | Y | Y | Y | KL15 |
6379 // +------------+----------+----------------+----------+-----------+
6380
6381 struct NotKnownLayout<T = ()> {
6382 _t: T,
6383 }
6384
6385 #[derive(KnownLayout)]
6386 #[repr(C)]
6387 struct AlignSize<const ALIGN: usize, const SIZE: usize>
6388 where
6389 elain::Align<ALIGN>: elain::Alignment,
6390 {
6391 _align: elain::Align<ALIGN>,
6392 _size: [u8; SIZE],
6393 }
6394
6395 type AU16 = AlignSize<2, 2>;
6396 type AU32 = AlignSize<4, 4>;
6397
6398 fn _assert_kl<T: ?Sized + KnownLayout>(_: &T) {}
6399
6400 let sized_layout = |align, size| DstLayout {
6401 align: NonZeroUsize::new(align).unwrap(),
6402 size_info: SizeInfo::Sized { _size: size },
6403 };
6404
6405 let unsized_layout = |align, elem_size, offset| DstLayout {
6406 align: NonZeroUsize::new(align).unwrap(),
6407 size_info: SizeInfo::SliceDst(TrailingSliceLayout {
6408 _offset: offset,
6409 _elem_size: elem_size,
6410 }),
6411 };
6412
6413 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6414 // | N | N | N | Y | KL01 |
6415 #[derive(KnownLayout)]
6416 struct KL01(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
6417
6418 let expected = DstLayout::for_type::<KL01>();
6419
6420 assert_eq!(<KL01 as KnownLayout>::LAYOUT, expected);
6421 assert_eq!(<KL01 as KnownLayout>::LAYOUT, sized_layout(4, 8));
6422
6423 // ...with `align(N)`:
6424 #[derive(KnownLayout)]
6425 #[repr(align(64))]
6426 struct KL01Align(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
6427
6428 let expected = DstLayout::for_type::<KL01Align>();
6429
6430 assert_eq!(<KL01Align as KnownLayout>::LAYOUT, expected);
6431 assert_eq!(<KL01Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
6432
6433 // ...with `packed`:
6434 #[derive(KnownLayout)]
6435 #[repr(packed)]
6436 struct KL01Packed(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
6437
6438 let expected = DstLayout::for_type::<KL01Packed>();
6439
6440 assert_eq!(<KL01Packed as KnownLayout>::LAYOUT, expected);
6441 assert_eq!(<KL01Packed as KnownLayout>::LAYOUT, sized_layout(1, 6));
6442
6443 // ...with `packed(N)`:
6444 #[derive(KnownLayout)]
6445 #[repr(packed(2))]
6446 struct KL01PackedN(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
6447
6448 assert_impl_all!(KL01PackedN: KnownLayout);
6449
6450 let expected = DstLayout::for_type::<KL01PackedN>();
6451
6452 assert_eq!(<KL01PackedN as KnownLayout>::LAYOUT, expected);
6453 assert_eq!(<KL01PackedN as KnownLayout>::LAYOUT, sized_layout(2, 6));
6454
6455 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6456 // | N | N | Y | Y | KL03 |
6457 #[derive(KnownLayout)]
6458 struct KL03(NotKnownLayout, u8);
6459
6460 let expected = DstLayout::for_type::<KL03>();
6461
6462 assert_eq!(<KL03 as KnownLayout>::LAYOUT, expected);
6463 assert_eq!(<KL03 as KnownLayout>::LAYOUT, sized_layout(1, 1));
6464
6465 // ... with `align(N)`
6466 #[derive(KnownLayout)]
6467 #[repr(align(64))]
6468 struct KL03Align(NotKnownLayout<AU32>, u8);
6469
6470 let expected = DstLayout::for_type::<KL03Align>();
6471
6472 assert_eq!(<KL03Align as KnownLayout>::LAYOUT, expected);
6473 assert_eq!(<KL03Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
6474
6475 // ... with `packed`:
6476 #[derive(KnownLayout)]
6477 #[repr(packed)]
6478 struct KL03Packed(NotKnownLayout<AU32>, u8);
6479
6480 let expected = DstLayout::for_type::<KL03Packed>();
6481
6482 assert_eq!(<KL03Packed as KnownLayout>::LAYOUT, expected);
6483 assert_eq!(<KL03Packed as KnownLayout>::LAYOUT, sized_layout(1, 5));
6484
6485 // ... with `packed(N)`
6486 #[derive(KnownLayout)]
6487 #[repr(packed(2))]
6488 struct KL03PackedN(NotKnownLayout<AU32>, u8);
6489
6490 assert_impl_all!(KL03PackedN: KnownLayout);
6491
6492 let expected = DstLayout::for_type::<KL03PackedN>();
6493
6494 assert_eq!(<KL03PackedN as KnownLayout>::LAYOUT, expected);
6495 assert_eq!(<KL03PackedN as KnownLayout>::LAYOUT, sized_layout(2, 6));
6496
6497 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6498 // | N | Y | N | Y | KL05 |
6499 #[derive(KnownLayout)]
6500 struct KL05<T>(u8, T);
6501
6502 fn _test_kl05<T>(t: T) -> impl KnownLayout {
6503 KL05(0u8, t)
6504 }
6505
6506 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6507 // | N | Y | Y | Y | KL07 |
6508 #[derive(KnownLayout)]
6509 struct KL07<T: KnownLayout>(u8, T);
6510
6511 fn _test_kl07<T: KnownLayout>(t: T) -> impl KnownLayout {
6512 let _ = KL07(0u8, t);
6513 }
6514
6515 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6516 // | Y | N | Y | N | KL10 |
6517 #[derive(KnownLayout)]
6518 #[repr(C)]
6519 struct KL10(NotKnownLayout<AU32>, [u8]);
6520
6521 let expected = DstLayout::new_zst(None)
6522 .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), None)
6523 .extend(<[u8] as KnownLayout>::LAYOUT, None)
6524 .pad_to_align();
6525
6526 assert_eq!(<KL10 as KnownLayout>::LAYOUT, expected);
6527 assert_eq!(<KL10 as KnownLayout>::LAYOUT, unsized_layout(4, 1, 4));
6528
6529 // ...with `align(N)`:
6530 #[derive(KnownLayout)]
6531 #[repr(C, align(64))]
6532 struct KL10Align(NotKnownLayout<AU32>, [u8]);
6533
6534 let repr_align = NonZeroUsize::new(64);
6535
6536 let expected = DstLayout::new_zst(repr_align)
6537 .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), None)
6538 .extend(<[u8] as KnownLayout>::LAYOUT, None)
6539 .pad_to_align();
6540
6541 assert_eq!(<KL10Align as KnownLayout>::LAYOUT, expected);
6542 assert_eq!(<KL10Align as KnownLayout>::LAYOUT, unsized_layout(64, 1, 4));
6543
6544 // ...with `packed`:
6545 #[derive(KnownLayout)]
6546 #[repr(C, packed)]
6547 struct KL10Packed(NotKnownLayout<AU32>, [u8]);
6548
6549 let repr_packed = NonZeroUsize::new(1);
6550
6551 let expected = DstLayout::new_zst(None)
6552 .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), repr_packed)
6553 .extend(<[u8] as KnownLayout>::LAYOUT, repr_packed)
6554 .pad_to_align();
6555
6556 assert_eq!(<KL10Packed as KnownLayout>::LAYOUT, expected);
6557 assert_eq!(<KL10Packed as KnownLayout>::LAYOUT, unsized_layout(1, 1, 4));
6558
6559 // ...with `packed(N)`:
6560 #[derive(KnownLayout)]
6561 #[repr(C, packed(2))]
6562 struct KL10PackedN(NotKnownLayout<AU32>, [u8]);
6563
6564 let repr_packed = NonZeroUsize::new(2);
6565
6566 let expected = DstLayout::new_zst(None)
6567 .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), repr_packed)
6568 .extend(<[u8] as KnownLayout>::LAYOUT, repr_packed)
6569 .pad_to_align();
6570
6571 assert_eq!(<KL10PackedN as KnownLayout>::LAYOUT, expected);
6572 assert_eq!(<KL10PackedN as KnownLayout>::LAYOUT, unsized_layout(2, 1, 4));
6573
6574 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6575 // | Y | N | Y | Y | KL11 |
6576 #[derive(KnownLayout)]
6577 #[repr(C)]
6578 struct KL11(NotKnownLayout<AU64>, u8);
6579
6580 let expected = DstLayout::new_zst(None)
6581 .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), None)
6582 .extend(<u8 as KnownLayout>::LAYOUT, None)
6583 .pad_to_align();
6584
6585 assert_eq!(<KL11 as KnownLayout>::LAYOUT, expected);
6586 assert_eq!(<KL11 as KnownLayout>::LAYOUT, sized_layout(8, 16));
6587
6588 // ...with `align(N)`:
6589 #[derive(KnownLayout)]
6590 #[repr(C, align(64))]
6591 struct KL11Align(NotKnownLayout<AU64>, u8);
6592
6593 let repr_align = NonZeroUsize::new(64);
6594
6595 let expected = DstLayout::new_zst(repr_align)
6596 .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), None)
6597 .extend(<u8 as KnownLayout>::LAYOUT, None)
6598 .pad_to_align();
6599
6600 assert_eq!(<KL11Align as KnownLayout>::LAYOUT, expected);
6601 assert_eq!(<KL11Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
6602
6603 // ...with `packed`:
6604 #[derive(KnownLayout)]
6605 #[repr(C, packed)]
6606 struct KL11Packed(NotKnownLayout<AU64>, u8);
6607
6608 let repr_packed = NonZeroUsize::new(1);
6609
6610 let expected = DstLayout::new_zst(None)
6611 .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), repr_packed)
6612 .extend(<u8 as KnownLayout>::LAYOUT, repr_packed)
6613 .pad_to_align();
6614
6615 assert_eq!(<KL11Packed as KnownLayout>::LAYOUT, expected);
6616 assert_eq!(<KL11Packed as KnownLayout>::LAYOUT, sized_layout(1, 9));
6617
6618 // ...with `packed(N)`:
6619 #[derive(KnownLayout)]
6620 #[repr(C, packed(2))]
6621 struct KL11PackedN(NotKnownLayout<AU64>, u8);
6622
6623 let repr_packed = NonZeroUsize::new(2);
6624
6625 let expected = DstLayout::new_zst(None)
6626 .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), repr_packed)
6627 .extend(<u8 as KnownLayout>::LAYOUT, repr_packed)
6628 .pad_to_align();
6629
6630 assert_eq!(<KL11PackedN as KnownLayout>::LAYOUT, expected);
6631 assert_eq!(<KL11PackedN as KnownLayout>::LAYOUT, sized_layout(2, 10));
6632
6633 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6634 // | Y | Y | Y | N | KL14 |
6635 #[derive(KnownLayout)]
6636 #[repr(C)]
6637 struct KL14<T: ?Sized + KnownLayout>(u8, T);
6638
6639 fn _test_kl14<T: ?Sized + KnownLayout>(kl: &KL14<T>) {
6640 _assert_kl(kl)
6641 }
6642
6643 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6644 // | Y | Y | Y | Y | KL15 |
6645 #[derive(KnownLayout)]
6646 #[repr(C)]
6647 struct KL15<T: KnownLayout>(u8, T);
6648
6649 fn _test_kl15<T: KnownLayout>(t: T) -> impl KnownLayout {
6650 let _ = KL15(0u8, t);
6651 }
6652
6653 // Test a variety of combinations of field types:
6654 // - ()
6655 // - u8
6656 // - AU16
6657 // - [()]
6658 // - [u8]
6659 // - [AU16]
6660
6661 #[allow(clippy::upper_case_acronyms)]
6662 #[derive(KnownLayout)]
6663 #[repr(C)]
6664 struct KLTU<T, U: ?Sized>(T, U);
6665
6666 assert_eq!(<KLTU<(), ()> as KnownLayout>::LAYOUT, sized_layout(1, 0));
6667
6668 assert_eq!(<KLTU<(), u8> as KnownLayout>::LAYOUT, sized_layout(1, 1));
6669
6670 assert_eq!(<KLTU<(), AU16> as KnownLayout>::LAYOUT, sized_layout(2, 2));
6671
6672 assert_eq!(<KLTU<(), [()]> as KnownLayout>::LAYOUT, unsized_layout(1, 0, 0));
6673
6674 assert_eq!(<KLTU<(), [u8]> as KnownLayout>::LAYOUT, unsized_layout(1, 1, 0));
6675
6676 assert_eq!(<KLTU<(), [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 0));
6677
6678 assert_eq!(<KLTU<u8, ()> as KnownLayout>::LAYOUT, sized_layout(1, 1));
6679
6680 assert_eq!(<KLTU<u8, u8> as KnownLayout>::LAYOUT, sized_layout(1, 2));
6681
6682 assert_eq!(<KLTU<u8, AU16> as KnownLayout>::LAYOUT, sized_layout(2, 4));
6683
6684 assert_eq!(<KLTU<u8, [()]> as KnownLayout>::LAYOUT, unsized_layout(1, 0, 1));
6685
6686 assert_eq!(<KLTU<u8, [u8]> as KnownLayout>::LAYOUT, unsized_layout(1, 1, 1));
6687
6688 assert_eq!(<KLTU<u8, [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 2));
6689
6690 assert_eq!(<KLTU<AU16, ()> as KnownLayout>::LAYOUT, sized_layout(2, 2));
6691
6692 assert_eq!(<KLTU<AU16, u8> as KnownLayout>::LAYOUT, sized_layout(2, 4));
6693
6694 assert_eq!(<KLTU<AU16, AU16> as KnownLayout>::LAYOUT, sized_layout(2, 4));
6695
6696 assert_eq!(<KLTU<AU16, [()]> as KnownLayout>::LAYOUT, unsized_layout(2, 0, 2));
6697
6698 assert_eq!(<KLTU<AU16, [u8]> as KnownLayout>::LAYOUT, unsized_layout(2, 1, 2));
6699
6700 assert_eq!(<KLTU<AU16, [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 2));
6701
6702 // Test a variety of field counts.
6703
6704 #[derive(KnownLayout)]
6705 #[repr(C)]
6706 struct KLF0;
6707
6708 assert_eq!(<KLF0 as KnownLayout>::LAYOUT, sized_layout(1, 0));
6709
6710 #[derive(KnownLayout)]
6711 #[repr(C)]
6712 struct KLF1([u8]);
6713
6714 assert_eq!(<KLF1 as KnownLayout>::LAYOUT, unsized_layout(1, 1, 0));
6715
6716 #[derive(KnownLayout)]
6717 #[repr(C)]
6718 struct KLF2(NotKnownLayout<u8>, [u8]);
6719
6720 assert_eq!(<KLF2 as KnownLayout>::LAYOUT, unsized_layout(1, 1, 1));
6721
6722 #[derive(KnownLayout)]
6723 #[repr(C)]
6724 struct KLF3(NotKnownLayout<u8>, NotKnownLayout<AU16>, [u8]);
6725
6726 assert_eq!(<KLF3 as KnownLayout>::LAYOUT, unsized_layout(2, 1, 4));
6727
6728 #[derive(KnownLayout)]
6729 #[repr(C)]
6730 struct KLF4(NotKnownLayout<u8>, NotKnownLayout<AU16>, NotKnownLayout<AU32>, [u8]);
6731
6732 assert_eq!(<KLF4 as KnownLayout>::LAYOUT, unsized_layout(4, 1, 8));
6733 }
6734
6735 #[test]
6736 fn test_object_safety() {
6737 fn _takes_from_zeroes(_: &dyn FromZeroes) {}
6738 fn _takes_from_bytes(_: &dyn FromBytes) {}
6739 fn _takes_unaligned(_: &dyn Unaligned) {}
6740 }
6741
6742 #[test]
6743 fn test_from_zeroes_only() {
6744 // Test types that implement `FromZeroes` but not `FromBytes`.
6745
6746 assert!(!bool::new_zeroed());
6747 assert_eq!(char::new_zeroed(), '\0');
6748
6749 #[cfg(feature = "alloc")]
6750 {
6751 assert_eq!(bool::new_box_zeroed(), Box::new(false));
6752 assert_eq!(char::new_box_zeroed(), Box::new('\0'));
6753
6754 assert_eq!(bool::new_box_slice_zeroed(3).as_ref(), [false, false, false]);
6755 assert_eq!(char::new_box_slice_zeroed(3).as_ref(), ['\0', '\0', '\0']);
6756
6757 assert_eq!(bool::new_vec_zeroed(3).as_ref(), [false, false, false]);
6758 assert_eq!(char::new_vec_zeroed(3).as_ref(), ['\0', '\0', '\0']);
6759 }
6760
6761 let mut string = "hello".to_string();
6762 let s: &mut str = string.as_mut();
6763 assert_eq!(s, "hello");
6764 s.zero();
6765 assert_eq!(s, "\0\0\0\0\0");
6766 }
6767
6768 #[test]
6769 fn test_read_write() {
6770 const VAL: u64 = 0x12345678;
6771 #[cfg(target_endian = "big")]
6772 const VAL_BYTES: [u8; 8] = VAL.to_be_bytes();
6773 #[cfg(target_endian = "little")]
6774 const VAL_BYTES: [u8; 8] = VAL.to_le_bytes();
6775
6776 // Test `FromBytes::{read_from, read_from_prefix, read_from_suffix}`.
6777
6778 assert_eq!(u64::read_from(&VAL_BYTES[..]), Some(VAL));
6779 // The first 8 bytes are from `VAL_BYTES` and the second 8 bytes are all
6780 // zeroes.
6781 let bytes_with_prefix: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]);
6782 assert_eq!(u64::read_from_prefix(&bytes_with_prefix[..]), Some(VAL));
6783 assert_eq!(u64::read_from_suffix(&bytes_with_prefix[..]), Some(0));
6784 // The first 8 bytes are all zeroes and the second 8 bytes are from
6785 // `VAL_BYTES`
6786 let bytes_with_suffix: [u8; 16] = transmute!([[0; 8], VAL_BYTES]);
6787 assert_eq!(u64::read_from_prefix(&bytes_with_suffix[..]), Some(0));
6788 assert_eq!(u64::read_from_suffix(&bytes_with_suffix[..]), Some(VAL));
6789
6790 // Test `AsBytes::{write_to, write_to_prefix, write_to_suffix}`.
6791
6792 let mut bytes = [0u8; 8];
6793 assert_eq!(VAL.write_to(&mut bytes[..]), Some(()));
6794 assert_eq!(bytes, VAL_BYTES);
6795 let mut bytes = [0u8; 16];
6796 assert_eq!(VAL.write_to_prefix(&mut bytes[..]), Some(()));
6797 let want: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]);
6798 assert_eq!(bytes, want);
6799 let mut bytes = [0u8; 16];
6800 assert_eq!(VAL.write_to_suffix(&mut bytes[..]), Some(()));
6801 let want: [u8; 16] = transmute!([[0; 8], VAL_BYTES]);
6802 assert_eq!(bytes, want);
6803 }
6804
6805 #[test]
6806 fn test_transmute() {
6807 // Test that memory is transmuted as expected.
6808 let array_of_u8s = [0u8, 1, 2, 3, 4, 5, 6, 7];
6809 let array_of_arrays = [[0, 1], [2, 3], [4, 5], [6, 7]];
6810 let x: [[u8; 2]; 4] = transmute!(array_of_u8s);
6811 assert_eq!(x, array_of_arrays);
6812 let x: [u8; 8] = transmute!(array_of_arrays);
6813 assert_eq!(x, array_of_u8s);
6814
6815 // Test that the source expression's value is forgotten rather than
6816 // dropped.
6817 #[derive(AsBytes)]
6818 #[repr(transparent)]
6819 struct PanicOnDrop(());
6820 impl Drop for PanicOnDrop {
6821 fn drop(&mut self) {
6822 panic!("PanicOnDrop::drop");
6823 }
6824 }
6825 #[allow(clippy::let_unit_value)]
6826 let _: () = transmute!(PanicOnDrop(()));
6827
6828 // Test that `transmute!` is legal in a const context.
6829 const ARRAY_OF_U8S: [u8; 8] = [0u8, 1, 2, 3, 4, 5, 6, 7];
6830 const ARRAY_OF_ARRAYS: [[u8; 2]; 4] = [[0, 1], [2, 3], [4, 5], [6, 7]];
6831 const X: [[u8; 2]; 4] = transmute!(ARRAY_OF_U8S);
6832 assert_eq!(X, ARRAY_OF_ARRAYS);
6833 }
6834
6835 #[test]
6836 fn test_transmute_ref() {
6837 // Test that memory is transmuted as expected.
6838 let array_of_u8s = [0u8, 1, 2, 3, 4, 5, 6, 7];
6839 let array_of_arrays = [[0, 1], [2, 3], [4, 5], [6, 7]];
6840 let x: &[[u8; 2]; 4] = transmute_ref!(&array_of_u8s);
6841 assert_eq!(*x, array_of_arrays);
6842 let x: &[u8; 8] = transmute_ref!(&array_of_arrays);
6843 assert_eq!(*x, array_of_u8s);
6844
6845 // Test that `transmute_ref!` is legal in a const context.
6846 const ARRAY_OF_U8S: [u8; 8] = [0u8, 1, 2, 3, 4, 5, 6, 7];
6847 const ARRAY_OF_ARRAYS: [[u8; 2]; 4] = [[0, 1], [2, 3], [4, 5], [6, 7]];
6848 #[allow(clippy::redundant_static_lifetimes)]
6849 const X: &'static [[u8; 2]; 4] = transmute_ref!(&ARRAY_OF_U8S);
6850 assert_eq!(*X, ARRAY_OF_ARRAYS);
6851
6852 // Test that it's legal to transmute a reference while shrinking the
6853 // lifetime (note that `X` has the lifetime `'static`).
6854 let x: &[u8; 8] = transmute_ref!(X);
6855 assert_eq!(*x, ARRAY_OF_U8S);
6856
6857 // Test that `transmute_ref!` supports decreasing alignment.
6858 let u = AU64(0);
6859 let array = [0, 0, 0, 0, 0, 0, 0, 0];
6860 let x: &[u8; 8] = transmute_ref!(&u);
6861 assert_eq!(*x, array);
6862
6863 // Test that a mutable reference can be turned into an immutable one.
6864 let mut x = 0u8;
6865 #[allow(clippy::useless_transmute)]
6866 let y: &u8 = transmute_ref!(&mut x);
6867 assert_eq!(*y, 0);
6868 }
6869
6870 #[test]
6871 fn test_transmute_mut() {
6872 // Test that memory is transmuted as expected.
6873 let mut array_of_u8s = [0u8, 1, 2, 3, 4, 5, 6, 7];
6874 let mut array_of_arrays = [[0, 1], [2, 3], [4, 5], [6, 7]];
6875 let x: &mut [[u8; 2]; 4] = transmute_mut!(&mut array_of_u8s);
6876 assert_eq!(*x, array_of_arrays);
6877 let x: &mut [u8; 8] = transmute_mut!(&mut array_of_arrays);
6878 assert_eq!(*x, array_of_u8s);
6879
6880 {
6881 // Test that it's legal to transmute a reference while shrinking the
6882 // lifetime.
6883 let x: &mut [u8; 8] = transmute_mut!(&mut array_of_arrays);
6884 assert_eq!(*x, array_of_u8s);
6885 }
6886 // Test that `transmute_mut!` supports decreasing alignment.
6887 let mut u = AU64(0);
6888 let array = [0, 0, 0, 0, 0, 0, 0, 0];
6889 let x: &[u8; 8] = transmute_mut!(&mut u);
6890 assert_eq!(*x, array);
6891
6892 // Test that a mutable reference can be turned into an immutable one.
6893 let mut x = 0u8;
6894 #[allow(clippy::useless_transmute)]
6895 let y: &u8 = transmute_mut!(&mut x);
6896 assert_eq!(*y, 0);
6897 }
6898
6899 #[test]
6900 fn test_macros_evaluate_args_once() {
6901 let mut ctr = 0;
6902 let _: usize = transmute!({
6903 ctr += 1;
6904 0usize
6905 });
6906 assert_eq!(ctr, 1);
6907
6908 let mut ctr = 0;
6909 let _: &usize = transmute_ref!({
6910 ctr += 1;
6911 &0usize
6912 });
6913 assert_eq!(ctr, 1);
6914 }
6915
6916 #[test]
6917 fn test_include_value() {
6918 const AS_U32: u32 = include_value!("../testdata/include_value/data");
6919 assert_eq!(AS_U32, u32::from_ne_bytes([b'a', b'b', b'c', b'd']));
6920 const AS_I32: i32 = include_value!("../testdata/include_value/data");
6921 assert_eq!(AS_I32, i32::from_ne_bytes([b'a', b'b', b'c', b'd']));
6922 }
6923
6924 #[test]
6925 fn test_address() {
6926 // Test that the `Deref` and `DerefMut` implementations return a
6927 // reference which points to the right region of memory.
6928
6929 let buf = [0];
6930 let r = Ref::<_, u8>::new(&buf[..]).unwrap();
6931 let buf_ptr = buf.as_ptr();
6932 let deref_ptr: *const u8 = r.deref();
6933 assert_eq!(buf_ptr, deref_ptr);
6934
6935 let buf = [0];
6936 let r = Ref::<_, [u8]>::new_slice(&buf[..]).unwrap();
6937 let buf_ptr = buf.as_ptr();
6938 let deref_ptr = r.deref().as_ptr();
6939 assert_eq!(buf_ptr, deref_ptr);
6940 }
6941
6942 // Verify that values written to a `Ref` are properly shared between the
6943 // typed and untyped representations, that reads via `deref` and `read`
6944 // behave the same, and that writes via `deref_mut` and `write` behave the
6945 // same.
6946 fn test_new_helper(mut r: Ref<&mut [u8], AU64>) {
6947 // assert that the value starts at 0
6948 assert_eq!(*r, AU64(0));
6949 assert_eq!(r.read(), AU64(0));
6950
6951 // Assert that values written to the typed value are reflected in the
6952 // byte slice.
6953 const VAL1: AU64 = AU64(0xFF00FF00FF00FF00);
6954 *r = VAL1;
6955 assert_eq!(r.bytes(), &VAL1.to_bytes());
6956 *r = AU64(0);
6957 r.write(VAL1);
6958 assert_eq!(r.bytes(), &VAL1.to_bytes());
6959
6960 // Assert that values written to the byte slice are reflected in the
6961 // typed value.
6962 const VAL2: AU64 = AU64(!VAL1.0); // different from `VAL1`
6963 r.bytes_mut().copy_from_slice(&VAL2.to_bytes()[..]);
6964 assert_eq!(*r, VAL2);
6965 assert_eq!(r.read(), VAL2);
6966 }
6967
6968 // Verify that values written to a `Ref` are properly shared between the
6969 // typed and untyped representations; pass a value with `typed_len` `AU64`s
6970 // backed by an array of `typed_len * 8` bytes.
6971 fn test_new_helper_slice(mut r: Ref<&mut [u8], [AU64]>, typed_len: usize) {
6972 // Assert that the value starts out zeroed.
6973 assert_eq!(&*r, vec![AU64(0); typed_len].as_slice());
6974
6975 // Check the backing storage is the exact same slice.
6976 let untyped_len = typed_len * 8;
6977 assert_eq!(r.bytes().len(), untyped_len);
6978 assert_eq!(r.bytes().as_ptr(), r.as_ptr().cast::<u8>());
6979
6980 // Assert that values written to the typed value are reflected in the
6981 // byte slice.
6982 const VAL1: AU64 = AU64(0xFF00FF00FF00FF00);
6983 for typed in &mut *r {
6984 *typed = VAL1;
6985 }
6986 assert_eq!(r.bytes(), VAL1.0.to_ne_bytes().repeat(typed_len).as_slice());
6987
6988 // Assert that values written to the byte slice are reflected in the
6989 // typed value.
6990 const VAL2: AU64 = AU64(!VAL1.0); // different from VAL1
6991 r.bytes_mut().copy_from_slice(&VAL2.0.to_ne_bytes().repeat(typed_len));
6992 assert!(r.iter().copied().all(|x| x == VAL2));
6993 }
6994
6995 // Verify that values written to a `Ref` are properly shared between the
6996 // typed and untyped representations, that reads via `deref` and `read`
6997 // behave the same, and that writes via `deref_mut` and `write` behave the
6998 // same.
6999 fn test_new_helper_unaligned(mut r: Ref<&mut [u8], [u8; 8]>) {
7000 // assert that the value starts at 0
7001 assert_eq!(*r, [0; 8]);
7002 assert_eq!(r.read(), [0; 8]);
7003
7004 // Assert that values written to the typed value are reflected in the
7005 // byte slice.
7006 const VAL1: [u8; 8] = [0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00];
7007 *r = VAL1;
7008 assert_eq!(r.bytes(), &VAL1);
7009 *r = [0; 8];
7010 r.write(VAL1);
7011 assert_eq!(r.bytes(), &VAL1);
7012
7013 // Assert that values written to the byte slice are reflected in the
7014 // typed value.
7015 const VAL2: [u8; 8] = [0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF]; // different from VAL1
7016 r.bytes_mut().copy_from_slice(&VAL2[..]);
7017 assert_eq!(*r, VAL2);
7018 assert_eq!(r.read(), VAL2);
7019 }
7020
7021 // Verify that values written to a `Ref` are properly shared between the
7022 // typed and untyped representations; pass a value with `len` `u8`s backed
7023 // by an array of `len` bytes.
7024 fn test_new_helper_slice_unaligned(mut r: Ref<&mut [u8], [u8]>, len: usize) {
7025 // Assert that the value starts out zeroed.
7026 assert_eq!(&*r, vec![0u8; len].as_slice());
7027
7028 // Check the backing storage is the exact same slice.
7029 assert_eq!(r.bytes().len(), len);
7030 assert_eq!(r.bytes().as_ptr(), r.as_ptr());
7031
7032 // Assert that values written to the typed value are reflected in the
7033 // byte slice.
7034 let mut expected_bytes = [0xFF, 0x00].iter().copied().cycle().take(len).collect::<Vec<_>>();
7035 r.copy_from_slice(&expected_bytes);
7036 assert_eq!(r.bytes(), expected_bytes.as_slice());
7037
7038 // Assert that values written to the byte slice are reflected in the
7039 // typed value.
7040 for byte in &mut expected_bytes {
7041 *byte = !*byte; // different from `expected_len`
7042 }
7043 r.bytes_mut().copy_from_slice(&expected_bytes);
7044 assert_eq!(&*r, expected_bytes.as_slice());
7045 }
7046
7047 #[test]
7048 fn test_new_aligned_sized() {
7049 // Test that a properly-aligned, properly-sized buffer works for new,
7050 // new_from_prefix, and new_from_suffix, and that new_from_prefix and
7051 // new_from_suffix return empty slices. Test that a properly-aligned
7052 // buffer whose length is a multiple of the element size works for
7053 // new_slice. Test that xxx_zeroed behaves the same, and zeroes the
7054 // memory.
7055
7056 // A buffer with an alignment of 8.
7057 let mut buf = Align::<[u8; 8], AU64>::default();
7058 // `buf.t` should be aligned to 8, so this should always succeed.
7059 test_new_helper(Ref::<_, AU64>::new(&mut buf.t[..]).unwrap());
7060 let ascending: [u8; 8] = (0..8).collect::<Vec<_>>().try_into().unwrap();
7061 buf.t = ascending;
7062 test_new_helper(Ref::<_, AU64>::new_zeroed(&mut buf.t[..]).unwrap());
7063 {
7064 // In a block so that `r` and `suffix` don't live too long.
7065 buf.set_default();
7066 let (r, suffix) = Ref::<_, AU64>::new_from_prefix(&mut buf.t[..]).unwrap();
7067 assert!(suffix.is_empty());
7068 test_new_helper(r);
7069 }
7070 {
7071 buf.t = ascending;
7072 let (r, suffix) = Ref::<_, AU64>::new_from_prefix_zeroed(&mut buf.t[..]).unwrap();
7073 assert!(suffix.is_empty());
7074 test_new_helper(r);
7075 }
7076 {
7077 buf.set_default();
7078 let (prefix, r) = Ref::<_, AU64>::new_from_suffix(&mut buf.t[..]).unwrap();
7079 assert!(prefix.is_empty());
7080 test_new_helper(r);
7081 }
7082 {
7083 buf.t = ascending;
7084 let (prefix, r) = Ref::<_, AU64>::new_from_suffix_zeroed(&mut buf.t[..]).unwrap();
7085 assert!(prefix.is_empty());
7086 test_new_helper(r);
7087 }
7088
7089 // A buffer with alignment 8 and length 24. We choose this length very
7090 // intentionally: if we instead used length 16, then the prefix and
7091 // suffix lengths would be identical. In the past, we used length 16,
7092 // which resulted in this test failing to discover the bug uncovered in
7093 // #506.
7094 let mut buf = Align::<[u8; 24], AU64>::default();
7095 // `buf.t` should be aligned to 8 and have a length which is a multiple
7096 // of `size_of::<AU64>()`, so this should always succeed.
7097 test_new_helper_slice(Ref::<_, [AU64]>::new_slice(&mut buf.t[..]).unwrap(), 3);
7098 let ascending: [u8; 24] = (0..24).collect::<Vec<_>>().try_into().unwrap();
7099 // 16 ascending bytes followed by 8 zeros.
7100 let mut ascending_prefix = ascending;
7101 ascending_prefix[16..].copy_from_slice(&[0, 0, 0, 0, 0, 0, 0, 0]);
7102 // 8 zeros followed by 16 ascending bytes.
7103 let mut ascending_suffix = ascending;
7104 ascending_suffix[..8].copy_from_slice(&[0, 0, 0, 0, 0, 0, 0, 0]);
7105 test_new_helper_slice(Ref::<_, [AU64]>::new_slice_zeroed(&mut buf.t[..]).unwrap(), 3);
7106
7107 {
7108 buf.t = ascending_suffix;
7109 let (r, suffix) = Ref::<_, [AU64]>::new_slice_from_prefix(&mut buf.t[..], 1).unwrap();
7110 assert_eq!(suffix, &ascending[8..]);
7111 test_new_helper_slice(r, 1);
7112 }
7113 {
7114 buf.t = ascending_suffix;
7115 let (r, suffix) =
7116 Ref::<_, [AU64]>::new_slice_from_prefix_zeroed(&mut buf.t[..], 1).unwrap();
7117 assert_eq!(suffix, &ascending[8..]);
7118 test_new_helper_slice(r, 1);
7119 }
7120 {
7121 buf.t = ascending_prefix;
7122 let (prefix, r) = Ref::<_, [AU64]>::new_slice_from_suffix(&mut buf.t[..], 1).unwrap();
7123 assert_eq!(prefix, &ascending[..16]);
7124 test_new_helper_slice(r, 1);
7125 }
7126 {
7127 buf.t = ascending_prefix;
7128 let (prefix, r) =
7129 Ref::<_, [AU64]>::new_slice_from_suffix_zeroed(&mut buf.t[..], 1).unwrap();
7130 assert_eq!(prefix, &ascending[..16]);
7131 test_new_helper_slice(r, 1);
7132 }
7133 }
7134
7135 #[test]
7136 fn test_new_unaligned_sized() {
7137 // Test that an unaligned, properly-sized buffer works for
7138 // `new_unaligned`, `new_unaligned_from_prefix`, and
7139 // `new_unaligned_from_suffix`, and that `new_unaligned_from_prefix`
7140 // `new_unaligned_from_suffix` return empty slices. Test that an
7141 // unaligned buffer whose length is a multiple of the element size works
7142 // for `new_slice`. Test that `xxx_zeroed` behaves the same, and zeroes
7143 // the memory.
7144
7145 let mut buf = [0u8; 8];
7146 test_new_helper_unaligned(Ref::<_, [u8; 8]>::new_unaligned(&mut buf[..]).unwrap());
7147 buf = [0xFFu8; 8];
7148 test_new_helper_unaligned(Ref::<_, [u8; 8]>::new_unaligned_zeroed(&mut buf[..]).unwrap());
7149 {
7150 // In a block so that `r` and `suffix` don't live too long.
7151 buf = [0u8; 8];
7152 let (r, suffix) = Ref::<_, [u8; 8]>::new_unaligned_from_prefix(&mut buf[..]).unwrap();
7153 assert!(suffix.is_empty());
7154 test_new_helper_unaligned(r);
7155 }
7156 {
7157 buf = [0xFFu8; 8];
7158 let (r, suffix) =
7159 Ref::<_, [u8; 8]>::new_unaligned_from_prefix_zeroed(&mut buf[..]).unwrap();
7160 assert!(suffix.is_empty());
7161 test_new_helper_unaligned(r);
7162 }
7163 {
7164 buf = [0u8; 8];
7165 let (prefix, r) = Ref::<_, [u8; 8]>::new_unaligned_from_suffix(&mut buf[..]).unwrap();
7166 assert!(prefix.is_empty());
7167 test_new_helper_unaligned(r);
7168 }
7169 {
7170 buf = [0xFFu8; 8];
7171 let (prefix, r) =
7172 Ref::<_, [u8; 8]>::new_unaligned_from_suffix_zeroed(&mut buf[..]).unwrap();
7173 assert!(prefix.is_empty());
7174 test_new_helper_unaligned(r);
7175 }
7176
7177 let mut buf = [0u8; 16];
7178 // `buf.t` should be aligned to 8 and have a length which is a multiple
7179 // of `size_of::AU64>()`, so this should always succeed.
7180 test_new_helper_slice_unaligned(
7181 Ref::<_, [u8]>::new_slice_unaligned(&mut buf[..]).unwrap(),
7182 16,
7183 );
7184 buf = [0xFFu8; 16];
7185 test_new_helper_slice_unaligned(
7186 Ref::<_, [u8]>::new_slice_unaligned_zeroed(&mut buf[..]).unwrap(),
7187 16,
7188 );
7189
7190 {
7191 buf = [0u8; 16];
7192 let (r, suffix) =
7193 Ref::<_, [u8]>::new_slice_unaligned_from_prefix(&mut buf[..], 8).unwrap();
7194 assert_eq!(suffix, [0; 8]);
7195 test_new_helper_slice_unaligned(r, 8);
7196 }
7197 {
7198 buf = [0xFFu8; 16];
7199 let (r, suffix) =
7200 Ref::<_, [u8]>::new_slice_unaligned_from_prefix_zeroed(&mut buf[..], 8).unwrap();
7201 assert_eq!(suffix, [0xFF; 8]);
7202 test_new_helper_slice_unaligned(r, 8);
7203 }
7204 {
7205 buf = [0u8; 16];
7206 let (prefix, r) =
7207 Ref::<_, [u8]>::new_slice_unaligned_from_suffix(&mut buf[..], 8).unwrap();
7208 assert_eq!(prefix, [0; 8]);
7209 test_new_helper_slice_unaligned(r, 8);
7210 }
7211 {
7212 buf = [0xFFu8; 16];
7213 let (prefix, r) =
7214 Ref::<_, [u8]>::new_slice_unaligned_from_suffix_zeroed(&mut buf[..], 8).unwrap();
7215 assert_eq!(prefix, [0xFF; 8]);
7216 test_new_helper_slice_unaligned(r, 8);
7217 }
7218 }
7219
7220 #[test]
7221 fn test_new_oversized() {
7222 // Test that a properly-aligned, overly-sized buffer works for
7223 // `new_from_prefix` and `new_from_suffix`, and that they return the
7224 // remainder and prefix of the slice respectively. Test that
7225 // `xxx_zeroed` behaves the same, and zeroes the memory.
7226
7227 let mut buf = Align::<[u8; 16], AU64>::default();
7228 {
7229 // In a block so that `r` and `suffix` don't live too long. `buf.t`
7230 // should be aligned to 8, so this should always succeed.
7231 let (r, suffix) = Ref::<_, AU64>::new_from_prefix(&mut buf.t[..]).unwrap();
7232 assert_eq!(suffix.len(), 8);
7233 test_new_helper(r);
7234 }
7235 {
7236 buf.t = [0xFFu8; 16];
7237 // `buf.t` should be aligned to 8, so this should always succeed.
7238 let (r, suffix) = Ref::<_, AU64>::new_from_prefix_zeroed(&mut buf.t[..]).unwrap();
7239 // Assert that the suffix wasn't zeroed.
7240 assert_eq!(suffix, &[0xFFu8; 8]);
7241 test_new_helper(r);
7242 }
7243 {
7244 buf.set_default();
7245 // `buf.t` should be aligned to 8, so this should always succeed.
7246 let (prefix, r) = Ref::<_, AU64>::new_from_suffix(&mut buf.t[..]).unwrap();
7247 assert_eq!(prefix.len(), 8);
7248 test_new_helper(r);
7249 }
7250 {
7251 buf.t = [0xFFu8; 16];
7252 // `buf.t` should be aligned to 8, so this should always succeed.
7253 let (prefix, r) = Ref::<_, AU64>::new_from_suffix_zeroed(&mut buf.t[..]).unwrap();
7254 // Assert that the prefix wasn't zeroed.
7255 assert_eq!(prefix, &[0xFFu8; 8]);
7256 test_new_helper(r);
7257 }
7258 }
7259
7260 #[test]
7261 fn test_new_unaligned_oversized() {
7262 // Test than an unaligned, overly-sized buffer works for
7263 // `new_unaligned_from_prefix` and `new_unaligned_from_suffix`, and that
7264 // they return the remainder and prefix of the slice respectively. Test
7265 // that `xxx_zeroed` behaves the same, and zeroes the memory.
7266
7267 let mut buf = [0u8; 16];
7268 {
7269 // In a block so that `r` and `suffix` don't live too long.
7270 let (r, suffix) = Ref::<_, [u8; 8]>::new_unaligned_from_prefix(&mut buf[..]).unwrap();
7271 assert_eq!(suffix.len(), 8);
7272 test_new_helper_unaligned(r);
7273 }
7274 {
7275 buf = [0xFFu8; 16];
7276 let (r, suffix) =
7277 Ref::<_, [u8; 8]>::new_unaligned_from_prefix_zeroed(&mut buf[..]).unwrap();
7278 // Assert that the suffix wasn't zeroed.
7279 assert_eq!(suffix, &[0xFF; 8]);
7280 test_new_helper_unaligned(r);
7281 }
7282 {
7283 buf = [0u8; 16];
7284 let (prefix, r) = Ref::<_, [u8; 8]>::new_unaligned_from_suffix(&mut buf[..]).unwrap();
7285 assert_eq!(prefix.len(), 8);
7286 test_new_helper_unaligned(r);
7287 }
7288 {
7289 buf = [0xFFu8; 16];
7290 let (prefix, r) =
7291 Ref::<_, [u8; 8]>::new_unaligned_from_suffix_zeroed(&mut buf[..]).unwrap();
7292 // Assert that the prefix wasn't zeroed.
7293 assert_eq!(prefix, &[0xFF; 8]);
7294 test_new_helper_unaligned(r);
7295 }
7296 }
7297
7298 #[test]
7299 fn test_ref_from_mut_from() {
7300 // Test `FromBytes::{ref_from, mut_from}{,_prefix,_suffix}` success cases
7301 // Exhaustive coverage for these methods is covered by the `Ref` tests above,
7302 // which these helper methods defer to.
7303
7304 let mut buf =
7305 Align::<[u8; 16], AU64>::new([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
7306
7307 assert_eq!(
7308 AU64::ref_from(&buf.t[8..]).unwrap().0.to_ne_bytes(),
7309 [8, 9, 10, 11, 12, 13, 14, 15]
7310 );
7311 let suffix = AU64::mut_from(&mut buf.t[8..]).unwrap();
7312 suffix.0 = 0x0101010101010101;
7313 // The `[u8:9]` is a non-half size of the full buffer, which would catch
7314 // `from_prefix` having the same implementation as `from_suffix` (issues #506, #511).
7315 assert_eq!(<[u8; 9]>::ref_from_suffix(&buf.t[..]).unwrap(), &[7u8, 1, 1, 1, 1, 1, 1, 1, 1]);
7316 let suffix = AU64::mut_from_suffix(&mut buf.t[1..]).unwrap();
7317 suffix.0 = 0x0202020202020202;
7318 <[u8; 10]>::mut_from_suffix(&mut buf.t[..]).unwrap()[0] = 42;
7319 assert_eq!(<[u8; 9]>::ref_from_prefix(&buf.t[..]).unwrap(), &[0, 1, 2, 3, 4, 5, 42, 7, 2]);
7320 <[u8; 2]>::mut_from_prefix(&mut buf.t[..]).unwrap()[1] = 30;
7321 assert_eq!(buf.t, [0, 30, 2, 3, 4, 5, 42, 7, 2, 2, 2, 2, 2, 2, 2, 2]);
7322 }
7323
7324 #[test]
7325 fn test_ref_from_mut_from_error() {
7326 // Test `FromBytes::{ref_from, mut_from}{,_prefix,_suffix}` error cases.
7327
7328 // Fail because the buffer is too large.
7329 let mut buf = Align::<[u8; 16], AU64>::default();
7330 // `buf.t` should be aligned to 8, so only the length check should fail.
7331 assert!(AU64::ref_from(&buf.t[..]).is_none());
7332 assert!(AU64::mut_from(&mut buf.t[..]).is_none());
7333 assert!(<[u8; 8]>::ref_from(&buf.t[..]).is_none());
7334 assert!(<[u8; 8]>::mut_from(&mut buf.t[..]).is_none());
7335
7336 // Fail because the buffer is too small.
7337 let mut buf = Align::<[u8; 4], AU64>::default();
7338 assert!(AU64::ref_from(&buf.t[..]).is_none());
7339 assert!(AU64::mut_from(&mut buf.t[..]).is_none());
7340 assert!(<[u8; 8]>::ref_from(&buf.t[..]).is_none());
7341 assert!(<[u8; 8]>::mut_from(&mut buf.t[..]).is_none());
7342 assert!(AU64::ref_from_prefix(&buf.t[..]).is_none());
7343 assert!(AU64::mut_from_prefix(&mut buf.t[..]).is_none());
7344 assert!(AU64::ref_from_suffix(&buf.t[..]).is_none());
7345 assert!(AU64::mut_from_suffix(&mut buf.t[..]).is_none());
7346 assert!(<[u8; 8]>::ref_from_prefix(&buf.t[..]).is_none());
7347 assert!(<[u8; 8]>::mut_from_prefix(&mut buf.t[..]).is_none());
7348 assert!(<[u8; 8]>::ref_from_suffix(&buf.t[..]).is_none());
7349 assert!(<[u8; 8]>::mut_from_suffix(&mut buf.t[..]).is_none());
7350
7351 // Fail because the alignment is insufficient.
7352 let mut buf = Align::<[u8; 13], AU64>::default();
7353 assert!(AU64::ref_from(&buf.t[1..]).is_none());
7354 assert!(AU64::mut_from(&mut buf.t[1..]).is_none());
7355 assert!(AU64::ref_from(&buf.t[1..]).is_none());
7356 assert!(AU64::mut_from(&mut buf.t[1..]).is_none());
7357 assert!(AU64::ref_from_prefix(&buf.t[1..]).is_none());
7358 assert!(AU64::mut_from_prefix(&mut buf.t[1..]).is_none());
7359 assert!(AU64::ref_from_suffix(&buf.t[..]).is_none());
7360 assert!(AU64::mut_from_suffix(&mut buf.t[..]).is_none());
7361 }
7362
7363 #[test]
7364 #[allow(clippy::cognitive_complexity)]
7365 fn test_new_error() {
7366 // Fail because the buffer is too large.
7367
7368 // A buffer with an alignment of 8.
7369 let mut buf = Align::<[u8; 16], AU64>::default();
7370 // `buf.t` should be aligned to 8, so only the length check should fail.
7371 assert!(Ref::<_, AU64>::new(&buf.t[..]).is_none());
7372 assert!(Ref::<_, AU64>::new_zeroed(&mut buf.t[..]).is_none());
7373 assert!(Ref::<_, [u8; 8]>::new_unaligned(&buf.t[..]).is_none());
7374 assert!(Ref::<_, [u8; 8]>::new_unaligned_zeroed(&mut buf.t[..]).is_none());
7375
7376 // Fail because the buffer is too small.
7377
7378 // A buffer with an alignment of 8.
7379 let mut buf = Align::<[u8; 4], AU64>::default();
7380 // `buf.t` should be aligned to 8, so only the length check should fail.
7381 assert!(Ref::<_, AU64>::new(&buf.t[..]).is_none());
7382 assert!(Ref::<_, AU64>::new_zeroed(&mut buf.t[..]).is_none());
7383 assert!(Ref::<_, [u8; 8]>::new_unaligned(&buf.t[..]).is_none());
7384 assert!(Ref::<_, [u8; 8]>::new_unaligned_zeroed(&mut buf.t[..]).is_none());
7385 assert!(Ref::<_, AU64>::new_from_prefix(&buf.t[..]).is_none());
7386 assert!(Ref::<_, AU64>::new_from_prefix_zeroed(&mut buf.t[..]).is_none());
7387 assert!(Ref::<_, AU64>::new_from_suffix(&buf.t[..]).is_none());
7388 assert!(Ref::<_, AU64>::new_from_suffix_zeroed(&mut buf.t[..]).is_none());
7389 assert!(Ref::<_, [u8; 8]>::new_unaligned_from_prefix(&buf.t[..]).is_none());
7390 assert!(Ref::<_, [u8; 8]>::new_unaligned_from_prefix_zeroed(&mut buf.t[..]).is_none());
7391 assert!(Ref::<_, [u8; 8]>::new_unaligned_from_suffix(&buf.t[..]).is_none());
7392 assert!(Ref::<_, [u8; 8]>::new_unaligned_from_suffix_zeroed(&mut buf.t[..]).is_none());
7393
7394 // Fail because the length is not a multiple of the element size.
7395
7396 let mut buf = Align::<[u8; 12], AU64>::default();
7397 // `buf.t` has length 12, but element size is 8.
7398 assert!(Ref::<_, [AU64]>::new_slice(&buf.t[..]).is_none());
7399 assert!(Ref::<_, [AU64]>::new_slice_zeroed(&mut buf.t[..]).is_none());
7400 assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned(&buf.t[..]).is_none());
7401 assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_zeroed(&mut buf.t[..]).is_none());
7402
7403 // Fail because the buffer is too short.
7404 let mut buf = Align::<[u8; 12], AU64>::default();
7405 // `buf.t` has length 12, but the element size is 8 (and we're expecting
7406 // two of them).
7407 assert!(Ref::<_, [AU64]>::new_slice_from_prefix(&buf.t[..], 2).is_none());
7408 assert!(Ref::<_, [AU64]>::new_slice_from_prefix_zeroed(&mut buf.t[..], 2).is_none());
7409 assert!(Ref::<_, [AU64]>::new_slice_from_suffix(&buf.t[..], 2).is_none());
7410 assert!(Ref::<_, [AU64]>::new_slice_from_suffix_zeroed(&mut buf.t[..], 2).is_none());
7411 assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix(&buf.t[..], 2).is_none());
7412 assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix_zeroed(&mut buf.t[..], 2)
7413 .is_none());
7414 assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix(&buf.t[..], 2).is_none());
7415 assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix_zeroed(&mut buf.t[..], 2)
7416 .is_none());
7417
7418 // Fail because the alignment is insufficient.
7419
7420 // A buffer with an alignment of 8. An odd buffer size is chosen so that
7421 // the last byte of the buffer has odd alignment.
7422 let mut buf = Align::<[u8; 13], AU64>::default();
7423 // Slicing from 1, we get a buffer with size 12 (so the length check
7424 // should succeed) but an alignment of only 1, which is insufficient.
7425 assert!(Ref::<_, AU64>::new(&buf.t[1..]).is_none());
7426 assert!(Ref::<_, AU64>::new_zeroed(&mut buf.t[1..]).is_none());
7427 assert!(Ref::<_, AU64>::new_from_prefix(&buf.t[1..]).is_none());
7428 assert!(Ref::<_, AU64>::new_from_prefix_zeroed(&mut buf.t[1..]).is_none());
7429 assert!(Ref::<_, [AU64]>::new_slice(&buf.t[1..]).is_none());
7430 assert!(Ref::<_, [AU64]>::new_slice_zeroed(&mut buf.t[1..]).is_none());
7431 assert!(Ref::<_, [AU64]>::new_slice_from_prefix(&buf.t[1..], 1).is_none());
7432 assert!(Ref::<_, [AU64]>::new_slice_from_prefix_zeroed(&mut buf.t[1..], 1).is_none());
7433 assert!(Ref::<_, [AU64]>::new_slice_from_suffix(&buf.t[1..], 1).is_none());
7434 assert!(Ref::<_, [AU64]>::new_slice_from_suffix_zeroed(&mut buf.t[1..], 1).is_none());
7435 // Slicing is unnecessary here because `new_from_suffix[_zeroed]` use
7436 // the suffix of the slice, which has odd alignment.
7437 assert!(Ref::<_, AU64>::new_from_suffix(&buf.t[..]).is_none());
7438 assert!(Ref::<_, AU64>::new_from_suffix_zeroed(&mut buf.t[..]).is_none());
7439
7440 // Fail due to arithmetic overflow.
7441
7442 let mut buf = Align::<[u8; 16], AU64>::default();
7443 let unreasonable_len = usize::MAX / mem::size_of::<AU64>() + 1;
7444 assert!(Ref::<_, [AU64]>::new_slice_from_prefix(&buf.t[..], unreasonable_len).is_none());
7445 assert!(Ref::<_, [AU64]>::new_slice_from_prefix_zeroed(&mut buf.t[..], unreasonable_len)
7446 .is_none());
7447 assert!(Ref::<_, [AU64]>::new_slice_from_suffix(&buf.t[..], unreasonable_len).is_none());
7448 assert!(Ref::<_, [AU64]>::new_slice_from_suffix_zeroed(&mut buf.t[..], unreasonable_len)
7449 .is_none());
7450 assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix(&buf.t[..], unreasonable_len)
7451 .is_none());
7452 assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix_zeroed(
7453 &mut buf.t[..],
7454 unreasonable_len
7455 )
7456 .is_none());
7457 assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix(&buf.t[..], unreasonable_len)
7458 .is_none());
7459 assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix_zeroed(
7460 &mut buf.t[..],
7461 unreasonable_len
7462 )
7463 .is_none());
7464 }
7465
7466 // Tests for ensuring that, if a ZST is passed into a slice-like function,
7467 // we always panic. Since these tests need to be separate per-function, and
7468 // they tend to take up a lot of space, we generate them using a macro in a
7469 // submodule instead. The submodule ensures that we can just re-use the name
7470 // of the function under test for the name of the test itself.
7471 mod test_zst_panics {
7472 macro_rules! zst_test {
7473 ($name:ident($($tt:tt)*), $constructor_in_panic_msg:tt) => {
7474 #[test]
7475 #[should_panic = concat!("Ref::", $constructor_in_panic_msg, " called on a zero-sized type")]
7476 fn $name() {
7477 let mut buffer = [0u8];
7478 let r = $crate::Ref::<_, [()]>::$name(&mut buffer[..], $($tt)*);
7479 unreachable!("should have panicked, got {:?}", r);
7480 }
7481 }
7482 }
7483 zst_test!(new_slice(), "new_slice");
7484 zst_test!(new_slice_zeroed(), "new_slice");
7485 zst_test!(new_slice_from_prefix(1), "new_slice");
7486 zst_test!(new_slice_from_prefix_zeroed(1), "new_slice");
7487 zst_test!(new_slice_from_suffix(1), "new_slice");
7488 zst_test!(new_slice_from_suffix_zeroed(1), "new_slice");
7489 zst_test!(new_slice_unaligned(), "new_slice_unaligned");
7490 zst_test!(new_slice_unaligned_zeroed(), "new_slice_unaligned");
7491 zst_test!(new_slice_unaligned_from_prefix(1), "new_slice_unaligned");
7492 zst_test!(new_slice_unaligned_from_prefix_zeroed(1), "new_slice_unaligned");
7493 zst_test!(new_slice_unaligned_from_suffix(1), "new_slice_unaligned");
7494 zst_test!(new_slice_unaligned_from_suffix_zeroed(1), "new_slice_unaligned");
7495 }
7496
7497 #[test]
7498 fn test_as_bytes_methods() {
7499 /// Run a series of tests by calling `AsBytes` methods on `t`.
7500 ///
7501 /// `bytes` is the expected byte sequence returned from `t.as_bytes()`
7502 /// before `t` has been modified. `post_mutation` is the expected
7503 /// sequence returned from `t.as_bytes()` after `t.as_bytes_mut()[0]`
7504 /// has had its bits flipped (by applying `^= 0xFF`).
7505 ///
7506 /// `N` is the size of `t` in bytes.
7507 fn test<T: FromBytes + AsBytes + Debug + Eq + ?Sized, const N: usize>(
7508 t: &mut T,
7509 bytes: &[u8],
7510 post_mutation: &T,
7511 ) {
7512 // Test that we can access the underlying bytes, and that we get the
7513 // right bytes and the right number of bytes.
7514 assert_eq!(t.as_bytes(), bytes);
7515
7516 // Test that changes to the underlying byte slices are reflected in
7517 // the original object.
7518 t.as_bytes_mut()[0] ^= 0xFF;
7519 assert_eq!(t, post_mutation);
7520 t.as_bytes_mut()[0] ^= 0xFF;
7521
7522 // `write_to` rejects slices that are too small or too large.
7523 assert_eq!(t.write_to(&mut vec![0; N - 1][..]), None);
7524 assert_eq!(t.write_to(&mut vec![0; N + 1][..]), None);
7525
7526 // `write_to` works as expected.
7527 let mut bytes = [0; N];
7528 assert_eq!(t.write_to(&mut bytes[..]), Some(()));
7529 assert_eq!(bytes, t.as_bytes());
7530
7531 // `write_to_prefix` rejects slices that are too small.
7532 assert_eq!(t.write_to_prefix(&mut vec![0; N - 1][..]), None);
7533
7534 // `write_to_prefix` works with exact-sized slices.
7535 let mut bytes = [0; N];
7536 assert_eq!(t.write_to_prefix(&mut bytes[..]), Some(()));
7537 assert_eq!(bytes, t.as_bytes());
7538
7539 // `write_to_prefix` works with too-large slices, and any bytes past
7540 // the prefix aren't modified.
7541 let mut too_many_bytes = vec![0; N + 1];
7542 too_many_bytes[N] = 123;
7543 assert_eq!(t.write_to_prefix(&mut too_many_bytes[..]), Some(()));
7544 assert_eq!(&too_many_bytes[..N], t.as_bytes());
7545 assert_eq!(too_many_bytes[N], 123);
7546
7547 // `write_to_suffix` rejects slices that are too small.
7548 assert_eq!(t.write_to_suffix(&mut vec![0; N - 1][..]), None);
7549
7550 // `write_to_suffix` works with exact-sized slices.
7551 let mut bytes = [0; N];
7552 assert_eq!(t.write_to_suffix(&mut bytes[..]), Some(()));
7553 assert_eq!(bytes, t.as_bytes());
7554
7555 // `write_to_suffix` works with too-large slices, and any bytes
7556 // before the suffix aren't modified.
7557 let mut too_many_bytes = vec![0; N + 1];
7558 too_many_bytes[0] = 123;
7559 assert_eq!(t.write_to_suffix(&mut too_many_bytes[..]), Some(()));
7560 assert_eq!(&too_many_bytes[1..], t.as_bytes());
7561 assert_eq!(too_many_bytes[0], 123);
7562 }
7563
7564 #[derive(Debug, Eq, PartialEq, FromZeroes, FromBytes, AsBytes)]
7565 #[repr(C)]
7566 struct Foo {
7567 a: u32,
7568 b: Wrapping<u32>,
7569 c: Option<NonZeroU32>,
7570 }
7571
7572 let expected_bytes: Vec<u8> = if cfg!(target_endian = "little") {
7573 vec![1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0]
7574 } else {
7575 vec![0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0]
7576 };
7577 let post_mutation_expected_a =
7578 if cfg!(target_endian = "little") { 0x00_00_00_FE } else { 0xFF_00_00_01 };
7579 test::<_, 12>(
7580 &mut Foo { a: 1, b: Wrapping(2), c: None },
7581 expected_bytes.as_bytes(),
7582 &Foo { a: post_mutation_expected_a, b: Wrapping(2), c: None },
7583 );
7584 test::<_, 3>(
7585 Unsized::from_mut_slice(&mut [1, 2, 3]),
7586 &[1, 2, 3],
7587 Unsized::from_mut_slice(&mut [0xFE, 2, 3]),
7588 );
7589 }
7590
7591 #[test]
7592 fn test_array() {
7593 #[derive(FromZeroes, FromBytes, AsBytes)]
7594 #[repr(C)]
7595 struct Foo {
7596 a: [u16; 33],
7597 }
7598
7599 let foo = Foo { a: [0xFFFF; 33] };
7600 let expected = [0xFFu8; 66];
7601 assert_eq!(foo.as_bytes(), &expected[..]);
7602 }
7603
7604 #[test]
7605 fn test_display_debug() {
7606 let buf = Align::<[u8; 8], u64>::default();
7607 let r = Ref::<_, u64>::new(&buf.t[..]).unwrap();
7608 assert_eq!(format!("{}", r), "0");
7609 assert_eq!(format!("{:?}", r), "Ref(0)");
7610
7611 let buf = Align::<[u8; 8], u64>::default();
7612 let r = Ref::<_, [u64]>::new_slice(&buf.t[..]).unwrap();
7613 assert_eq!(format!("{:?}", r), "Ref([0])");
7614 }
7615
7616 #[test]
7617 fn test_eq() {
7618 let buf1 = 0_u64;
7619 let r1 = Ref::<_, u64>::new(buf1.as_bytes()).unwrap();
7620 let buf2 = 0_u64;
7621 let r2 = Ref::<_, u64>::new(buf2.as_bytes()).unwrap();
7622 assert_eq!(r1, r2);
7623 }
7624
7625 #[test]
7626 fn test_ne() {
7627 let buf1 = 0_u64;
7628 let r1 = Ref::<_, u64>::new(buf1.as_bytes()).unwrap();
7629 let buf2 = 1_u64;
7630 let r2 = Ref::<_, u64>::new(buf2.as_bytes()).unwrap();
7631 assert_ne!(r1, r2);
7632 }
7633
7634 #[test]
7635 fn test_ord() {
7636 let buf1 = 0_u64;
7637 let r1 = Ref::<_, u64>::new(buf1.as_bytes()).unwrap();
7638 let buf2 = 1_u64;
7639 let r2 = Ref::<_, u64>::new(buf2.as_bytes()).unwrap();
7640 assert!(r1 < r2);
7641 }
7642
7643 #[test]
7644 fn test_new_zeroed() {
7645 assert!(!bool::new_zeroed());
7646 assert_eq!(u64::new_zeroed(), 0);
7647 // This test exists in order to exercise unsafe code, especially when
7648 // running under Miri.
7649 #[allow(clippy::unit_cmp)]
7650 {
7651 assert_eq!(<()>::new_zeroed(), ());
7652 }
7653 }
7654
7655 #[test]
7656 fn test_transparent_packed_generic_struct() {
7657 #[derive(AsBytes, FromZeroes, FromBytes, Unaligned)]
7658 #[repr(transparent)]
7659 struct Foo<T> {
7660 _t: T,
7661 _phantom: PhantomData<()>,
7662 }
7663
7664 assert_impl_all!(Foo<u32>: FromZeroes, FromBytes, AsBytes);
7665 assert_impl_all!(Foo<u8>: Unaligned);
7666
7667 #[derive(AsBytes, FromZeroes, FromBytes, Unaligned)]
7668 #[repr(packed)]
7669 struct Bar<T, U> {
7670 _t: T,
7671 _u: U,
7672 }
7673
7674 assert_impl_all!(Bar<u8, AU64>: FromZeroes, FromBytes, AsBytes, Unaligned);
7675 }
7676
7677 #[test]
7678 fn test_impls() {
7679 use core::borrow::Borrow;
7680
7681 // A type that can supply test cases for testing
7682 // `TryFromBytes::is_bit_valid`. All types passed to `assert_impls!`
7683 // must implement this trait; that macro uses it to generate runtime
7684 // tests for `TryFromBytes` impls.
7685 //
7686 // All `T: FromBytes` types are provided with a blanket impl. Other
7687 // types must implement `TryFromBytesTestable` directly (ie using
7688 // `impl_try_from_bytes_testable!`).
7689 trait TryFromBytesTestable {
7690 fn with_passing_test_cases<F: Fn(&Self)>(f: F);
7691 fn with_failing_test_cases<F: Fn(&[u8])>(f: F);
7692 }
7693
7694 impl<T: FromBytes> TryFromBytesTestable for T {
7695 fn with_passing_test_cases<F: Fn(&Self)>(f: F) {
7696 // Test with a zeroed value.
7697 f(&Self::new_zeroed());
7698
7699 let ffs = {
7700 let mut t = Self::new_zeroed();
7701 let ptr: *mut T = &mut t;
7702 // SAFETY: `T: FromBytes`
7703 unsafe { ptr::write_bytes(ptr.cast::<u8>(), 0xFF, mem::size_of::<T>()) };
7704 t
7705 };
7706
7707 // Test with a value initialized with 0xFF.
7708 f(&ffs);
7709 }
7710
7711 fn with_failing_test_cases<F: Fn(&[u8])>(_f: F) {}
7712 }
7713
7714 // Implements `TryFromBytesTestable`.
7715 macro_rules! impl_try_from_bytes_testable {
7716 // Base case for recursion (when the list of types has run out).
7717 (=> @success $($success_case:expr),* $(, @failure $($failure_case:expr),*)?) => {};
7718 // Implements for type(s) with no type parameters.
7719 ($ty:ty $(,$tys:ty)* => @success $($success_case:expr),* $(, @failure $($failure_case:expr),*)?) => {
7720 impl TryFromBytesTestable for $ty {
7721 impl_try_from_bytes_testable!(
7722 @methods @success $($success_case),*
7723 $(, @failure $($failure_case),*)?
7724 );
7725 }
7726 impl_try_from_bytes_testable!($($tys),* => @success $($success_case),* $(, @failure $($failure_case),*)?);
7727 };
7728 // Implements for multiple types with no type parameters.
7729 ($($($ty:ty),* => @success $($success_case:expr), * $(, @failure $($failure_case:expr),*)?;)*) => {
7730 $(
7731 impl_try_from_bytes_testable!($($ty),* => @success $($success_case),* $(, @failure $($failure_case),*)*);
7732 )*
7733 };
7734 // Implements only the methods; caller must invoke this from inside
7735 // an impl block.
7736 (@methods @success $($success_case:expr),* $(, @failure $($failure_case:expr),*)?) => {
7737 fn with_passing_test_cases<F: Fn(&Self)>(_f: F) {
7738 $(
7739 _f($success_case.borrow());
7740 )*
7741 }
7742
7743 fn with_failing_test_cases<F: Fn(&[u8])>(_f: F) {
7744 $($(
7745 // `unused_qualifications` is spuriously triggered on
7746 // `Option::<Self>::None`.
7747 #[allow(unused_qualifications)]
7748 let case = $failure_case.as_bytes();
7749 _f(case.as_bytes());
7750 )*)?
7751 }
7752 };
7753 }
7754
7755 // Note that these impls are only for types which are not `FromBytes`.
7756 // `FromBytes` types are covered by a preceding blanket impl.
7757 impl_try_from_bytes_testable!(
7758 bool => @success true, false,
7759 @failure 2u8, 3u8, 0xFFu8;
7760 char => @success '\u{0}', '\u{D7FF}', '\u{E000}', '\u{10FFFF}',
7761 @failure 0xD800u32, 0xDFFFu32, 0x110000u32;
7762 str => @success "", "hello", "❤️🧡💛💚💙💜",
7763 @failure [0, 159, 146, 150];
7764 [u8] => @success [], [0, 1, 2];
7765 NonZeroU8, NonZeroI8, NonZeroU16, NonZeroI16, NonZeroU32,
7766 NonZeroI32, NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128,
7767 NonZeroUsize, NonZeroIsize
7768 => @success Self::new(1).unwrap(),
7769 // Doing this instead of `0` ensures that we always satisfy
7770 // the size and alignment requirements of `Self` (whereas
7771 // `0` may be any integer type with a different size or
7772 // alignment than some `NonZeroXxx` types).
7773 @failure Option::<Self>::None;
7774 [bool]
7775 => @success [true, false], [false, true],
7776 @failure [2u8], [3u8], [0xFFu8], [0u8, 1u8, 2u8];
7777 );
7778
7779 // Asserts that `$ty` implements any `$trait` and doesn't implement any
7780 // `!$trait`. Note that all `$trait`s must come before any `!$trait`s.
7781 //
7782 // For `T: TryFromBytes`, uses `TryFromBytesTestable` to test success
7783 // and failure cases for `TryFromBytes::is_bit_valid`.
7784 macro_rules! assert_impls {
7785 ($ty:ty: TryFromBytes) => {
7786 <$ty as TryFromBytesTestable>::with_passing_test_cases(|val| {
7787 let c = Ptr::from(val);
7788 // SAFETY:
7789 // - Since `val` is a normal reference, `c` is guranteed to
7790 // be aligned, to point to a single allocation, and to
7791 // have a size which doesn't overflow `isize`.
7792 // - Since `val` is a valid `$ty`, `c`'s referent satisfies
7793 // the bit validity constraints of `is_bit_valid`, which
7794 // are a superset of the bit validity constraints of
7795 // `$ty`.
7796 let res = unsafe { <$ty as TryFromBytes>::is_bit_valid(c) };
7797 assert!(res, "{}::is_bit_valid({:?}): got false, expected true", stringify!($ty), val);
7798
7799 // TODO(#5): In addition to testing `is_bit_valid`, test the
7800 // methods built on top of it. This would both allow us to
7801 // test their implementations and actually convert the bytes
7802 // to `$ty`, giving Miri a chance to catch if this is
7803 // unsound (ie, if our `is_bit_valid` impl is buggy).
7804 //
7805 // The following code was tried, but it doesn't work because
7806 // a) some types are not `AsBytes` and, b) some types are
7807 // not `Sized`.
7808 //
7809 // let r = <$ty as TryFromBytes>::try_from_ref(val.as_bytes()).unwrap();
7810 // assert_eq!(r, &val);
7811 // let r = <$ty as TryFromBytes>::try_from_mut(val.as_bytes_mut()).unwrap();
7812 // assert_eq!(r, &mut val);
7813 // let v = <$ty as TryFromBytes>::try_read_from(val.as_bytes()).unwrap();
7814 // assert_eq!(v, val);
7815 });
7816 #[allow(clippy::as_conversions)]
7817 <$ty as TryFromBytesTestable>::with_failing_test_cases(|c| {
7818 let res = <$ty as TryFromBytes>::try_from_ref(c);
7819 assert!(res.is_none(), "{}::is_bit_valid({:?}): got true, expected false", stringify!($ty), c);
7820 });
7821
7822 #[allow(dead_code)]
7823 const _: () = { static_assertions::assert_impl_all!($ty: TryFromBytes); };
7824 };
7825 ($ty:ty: $trait:ident) => {
7826 #[allow(dead_code)]
7827 const _: () = { static_assertions::assert_impl_all!($ty: $trait); };
7828 };
7829 ($ty:ty: !$trait:ident) => {
7830 #[allow(dead_code)]
7831 const _: () = { static_assertions::assert_not_impl_any!($ty: $trait); };
7832 };
7833 ($ty:ty: $($trait:ident),* $(,)? $(!$negative_trait:ident),*) => {
7834 $(
7835 assert_impls!($ty: $trait);
7836 )*
7837
7838 $(
7839 assert_impls!($ty: !$negative_trait);
7840 )*
7841 };
7842 }
7843
7844 // NOTE: The negative impl assertions here are not necessarily
7845 // prescriptive. They merely serve as change detectors to make sure
7846 // we're aware of what trait impls are getting added with a given
7847 // change. Of course, some impls would be invalid (e.g., `bool:
7848 // FromBytes`), and so this change detection is very important.
7849
7850 assert_impls!((): KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
7851 assert_impls!(u8: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
7852 assert_impls!(i8: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
7853 assert_impls!(u16: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7854 assert_impls!(i16: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7855 assert_impls!(u32: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7856 assert_impls!(i32: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7857 assert_impls!(u64: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7858 assert_impls!(i64: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7859 assert_impls!(u128: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7860 assert_impls!(i128: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7861 assert_impls!(usize: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7862 assert_impls!(isize: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7863 assert_impls!(f32: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7864 assert_impls!(f64: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7865
7866 assert_impls!(bool: KnownLayout, TryFromBytes, FromZeroes, AsBytes, Unaligned, !FromBytes);
7867 assert_impls!(char: KnownLayout, TryFromBytes, FromZeroes, AsBytes, !FromBytes, !Unaligned);
7868 assert_impls!(str: KnownLayout, TryFromBytes, FromZeroes, AsBytes, Unaligned, !FromBytes);
7869
7870 assert_impls!(NonZeroU8: KnownLayout, TryFromBytes, AsBytes, Unaligned, !FromZeroes, !FromBytes);
7871 assert_impls!(NonZeroI8: KnownLayout, TryFromBytes, AsBytes, Unaligned, !FromZeroes, !FromBytes);
7872 assert_impls!(NonZeroU16: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
7873 assert_impls!(NonZeroI16: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
7874 assert_impls!(NonZeroU32: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
7875 assert_impls!(NonZeroI32: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
7876 assert_impls!(NonZeroU64: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
7877 assert_impls!(NonZeroI64: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
7878 assert_impls!(NonZeroU128: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
7879 assert_impls!(NonZeroI128: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
7880 assert_impls!(NonZeroUsize: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
7881 assert_impls!(NonZeroIsize: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
7882
7883 assert_impls!(Option<NonZeroU8>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
7884 assert_impls!(Option<NonZeroI8>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
7885 assert_impls!(Option<NonZeroU16>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7886 assert_impls!(Option<NonZeroI16>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7887 assert_impls!(Option<NonZeroU32>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7888 assert_impls!(Option<NonZeroI32>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7889 assert_impls!(Option<NonZeroU64>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7890 assert_impls!(Option<NonZeroI64>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7891 assert_impls!(Option<NonZeroU128>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7892 assert_impls!(Option<NonZeroI128>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7893 assert_impls!(Option<NonZeroUsize>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7894 assert_impls!(Option<NonZeroIsize>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7895
7896 // Implements none of the ZC traits.
7897 struct NotZerocopy;
7898
7899 #[rustfmt::skip]
7900 type FnManyArgs = fn(
7901 NotZerocopy, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8,
7902 ) -> (NotZerocopy, NotZerocopy);
7903
7904 // Allowed, because we're not actually using this type for FFI.
7905 #[allow(improper_ctypes_definitions)]
7906 #[rustfmt::skip]
7907 type ECFnManyArgs = extern "C" fn(
7908 NotZerocopy, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8,
7909 ) -> (NotZerocopy, NotZerocopy);
7910
7911 #[cfg(feature = "alloc")]
7912 assert_impls!(Option<Box<UnsafeCell<NotZerocopy>>>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
7913 assert_impls!(Option<Box<[UnsafeCell<NotZerocopy>]>>: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7914 assert_impls!(Option<&'static UnsafeCell<NotZerocopy>>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
7915 assert_impls!(Option<&'static [UnsafeCell<NotZerocopy>]>: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7916 assert_impls!(Option<&'static mut UnsafeCell<NotZerocopy>>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
7917 assert_impls!(Option<&'static mut [UnsafeCell<NotZerocopy>]>: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7918 assert_impls!(Option<NonNull<UnsafeCell<NotZerocopy>>>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
7919 assert_impls!(Option<NonNull<[UnsafeCell<NotZerocopy>]>>: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7920 assert_impls!(Option<fn()>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
7921 assert_impls!(Option<FnManyArgs>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
7922 assert_impls!(Option<extern "C" fn()>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
7923 assert_impls!(Option<ECFnManyArgs>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
7924
7925 assert_impls!(PhantomData<NotZerocopy>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
7926 assert_impls!(PhantomData<[u8]>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
7927
7928 assert_impls!(ManuallyDrop<u8>: KnownLayout, FromZeroes, FromBytes, AsBytes, Unaligned, !TryFromBytes);
7929 assert_impls!(ManuallyDrop<[u8]>: KnownLayout, FromZeroes, FromBytes, AsBytes, Unaligned, !TryFromBytes);
7930 assert_impls!(ManuallyDrop<NotZerocopy>: !TryFromBytes, !KnownLayout, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7931 assert_impls!(ManuallyDrop<[NotZerocopy]>: !TryFromBytes, !KnownLayout, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7932
7933 assert_impls!(MaybeUninit<u8>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, Unaligned, !AsBytes);
7934 assert_impls!(MaybeUninit<NotZerocopy>: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7935
7936 assert_impls!(Wrapping<u8>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
7937 assert_impls!(Wrapping<NotZerocopy>: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7938
7939 assert_impls!(Unalign<u8>: KnownLayout, FromZeroes, FromBytes, AsBytes, Unaligned, !TryFromBytes);
7940 assert_impls!(Unalign<NotZerocopy>: Unaligned, !KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes);
7941
7942 assert_impls!([u8]: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
7943 assert_impls!([bool]: KnownLayout, TryFromBytes, FromZeroes, AsBytes, Unaligned, !FromBytes);
7944 assert_impls!([NotZerocopy]: !KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7945 assert_impls!([u8; 0]: KnownLayout, FromZeroes, FromBytes, AsBytes, Unaligned, !TryFromBytes);
7946 assert_impls!([NotZerocopy; 0]: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7947 assert_impls!([u8; 1]: KnownLayout, FromZeroes, FromBytes, AsBytes, Unaligned, !TryFromBytes);
7948 assert_impls!([NotZerocopy; 1]: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7949
7950 assert_impls!(*const NotZerocopy: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
7951 assert_impls!(*mut NotZerocopy: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
7952 assert_impls!(*const [NotZerocopy]: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7953 assert_impls!(*mut [NotZerocopy]: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7954 assert_impls!(*const dyn Debug: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7955 assert_impls!(*mut dyn Debug: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7956
7957 #[cfg(feature = "simd")]
7958 {
7959 #[allow(unused_macros)]
7960 macro_rules! test_simd_arch_mod {
7961 ($arch:ident, $($typ:ident),*) => {
7962 {
7963 use core::arch::$arch::{$($typ),*};
7964 use crate::*;
7965 $( assert_impls!($typ: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); )*
7966 }
7967 };
7968 }
7969 #[cfg(target_arch = "x86")]
7970 test_simd_arch_mod!(x86, __m128, __m128d, __m128i, __m256, __m256d, __m256i);
7971
7972 #[cfg(all(feature = "simd-nightly", target_arch = "x86"))]
7973 test_simd_arch_mod!(x86, __m512bh, __m512, __m512d, __m512i);
7974
7975 #[cfg(target_arch = "x86_64")]
7976 test_simd_arch_mod!(x86_64, __m128, __m128d, __m128i, __m256, __m256d, __m256i);
7977
7978 #[cfg(all(feature = "simd-nightly", target_arch = "x86_64"))]
7979 test_simd_arch_mod!(x86_64, __m512bh, __m512, __m512d, __m512i);
7980
7981 #[cfg(target_arch = "wasm32")]
7982 test_simd_arch_mod!(wasm32, v128);
7983
7984 #[cfg(all(feature = "simd-nightly", target_arch = "powerpc"))]
7985 test_simd_arch_mod!(
7986 powerpc,
7987 vector_bool_long,
7988 vector_double,
7989 vector_signed_long,
7990 vector_unsigned_long
7991 );
7992
7993 #[cfg(all(feature = "simd-nightly", target_arch = "powerpc64"))]
7994 test_simd_arch_mod!(
7995 powerpc64,
7996 vector_bool_long,
7997 vector_double,
7998 vector_signed_long,
7999 vector_unsigned_long
8000 );
8001 #[cfg(target_arch = "aarch64")]
8002 #[rustfmt::skip]
8003 test_simd_arch_mod!(
8004 aarch64, float32x2_t, float32x4_t, float64x1_t, float64x2_t, int8x8_t, int8x8x2_t,
8005 int8x8x3_t, int8x8x4_t, int8x16_t, int8x16x2_t, int8x16x3_t, int8x16x4_t, int16x4_t,
8006 int16x8_t, int32x2_t, int32x4_t, int64x1_t, int64x2_t, poly8x8_t, poly8x8x2_t, poly8x8x3_t,
8007 poly8x8x4_t, poly8x16_t, poly8x16x2_t, poly8x16x3_t, poly8x16x4_t, poly16x4_t, poly16x8_t,
8008 poly64x1_t, poly64x2_t, uint8x8_t, uint8x8x2_t, uint8x8x3_t, uint8x8x4_t, uint8x16_t,
8009 uint8x16x2_t, uint8x16x3_t, uint8x16x4_t, uint16x4_t, uint16x8_t, uint32x2_t, uint32x4_t,
8010 uint64x1_t, uint64x2_t
8011 );
8012 #[cfg(all(feature = "simd-nightly", target_arch = "arm"))]
8013 #[rustfmt::skip]
8014 test_simd_arch_mod!(arm, int8x4_t, uint8x4_t);
8015 }
8016 }
8017}
8018
8019#[cfg(kani)]
8020mod proofs {
8021 use super::*;
8022
8023 impl kani::Arbitrary for DstLayout {
8024 fn any() -> Self {
8025 let align: NonZeroUsize = kani::any();
8026 let size_info: SizeInfo = kani::any();
8027
8028 kani::assume(align.is_power_of_two());
8029 kani::assume(align < DstLayout::THEORETICAL_MAX_ALIGN);
8030
8031 // For testing purposes, we most care about instantiations of
8032 // `DstLayout` that can correspond to actual Rust types. We use
8033 // `Layout` to verify that our `DstLayout` satisfies the validity
8034 // conditions of Rust layouts.
8035 kani::assume(
8036 match size_info {
8037 SizeInfo::Sized { _size } => Layout::from_size_align(_size, align.get()),
8038 SizeInfo::SliceDst(TrailingSliceLayout { _offset, _elem_size }) => {
8039 // `SliceDst`` cannot encode an exact size, but we know
8040 // it is at least `_offset` bytes.
8041 Layout::from_size_align(_offset, align.get())
8042 }
8043 }
8044 .is_ok(),
8045 );
8046
8047 Self { align: align, size_info: size_info }
8048 }
8049 }
8050
8051 impl kani::Arbitrary for SizeInfo {
8052 fn any() -> Self {
8053 let is_sized: bool = kani::any();
8054
8055 match is_sized {
8056 true => {
8057 let size: usize = kani::any();
8058
8059 kani::assume(size <= isize::MAX as _);
8060
8061 SizeInfo::Sized { _size: size }
8062 }
8063 false => SizeInfo::SliceDst(kani::any()),
8064 }
8065 }
8066 }
8067
8068 impl kani::Arbitrary for TrailingSliceLayout {
8069 fn any() -> Self {
8070 let elem_size: usize = kani::any();
8071 let offset: usize = kani::any();
8072
8073 kani::assume(elem_size < isize::MAX as _);
8074 kani::assume(offset < isize::MAX as _);
8075
8076 TrailingSliceLayout { _elem_size: elem_size, _offset: offset }
8077 }
8078 }
8079
8080 #[kani::proof]
8081 fn prove_dst_layout_extend() {
8082 use crate::util::{core_layout::padding_needed_for, max, min};
8083
8084 let base: DstLayout = kani::any();
8085 let field: DstLayout = kani::any();
8086 let packed: Option<NonZeroUsize> = kani::any();
8087
8088 if let Some(max_align) = packed {
8089 kani::assume(max_align.is_power_of_two());
8090 kani::assume(base.align <= max_align);
8091 }
8092
8093 // The base can only be extended if it's sized.
8094 kani::assume(matches!(base.size_info, SizeInfo::Sized { .. }));
8095 let base_size = if let SizeInfo::Sized { _size: size } = base.size_info {
8096 size
8097 } else {
8098 unreachable!();
8099 };
8100
8101 // Under the above conditions, `DstLayout::extend` will not panic.
8102 let composite = base.extend(field, packed);
8103
8104 // The field's alignment is clamped by `max_align` (i.e., the
8105 // `packed` attribute, if any) [1].
8106 //
8107 // [1] Per https://doc.rust-lang.org/reference/type-layout.html#the-alignment-modifiers:
8108 //
8109 // The alignments of each field, for the purpose of positioning
8110 // fields, is the smaller of the specified alignment and the
8111 // alignment of the field's type.
8112 let field_align = min(field.align, packed.unwrap_or(DstLayout::THEORETICAL_MAX_ALIGN));
8113
8114 // The struct's alignment is the maximum of its previous alignment and
8115 // `field_align`.
8116 assert_eq!(composite.align, max(base.align, field_align));
8117
8118 // Compute the minimum amount of inter-field padding needed to
8119 // satisfy the field's alignment, and offset of the trailing field.
8120 // [1]
8121 //
8122 // [1] Per https://doc.rust-lang.org/reference/type-layout.html#the-alignment-modifiers:
8123 //
8124 // Inter-field padding is guaranteed to be the minimum required in
8125 // order to satisfy each field's (possibly altered) alignment.
8126 let padding = padding_needed_for(base_size, field_align);
8127 let offset = base_size + padding;
8128
8129 // For testing purposes, we'll also construct `alloc::Layout`
8130 // stand-ins for `DstLayout`, and show that `extend` behaves
8131 // comparably on both types.
8132 let base_analog = Layout::from_size_align(base_size, base.align.get()).unwrap();
8133
8134 match field.size_info {
8135 SizeInfo::Sized { _size: field_size } => {
8136 if let SizeInfo::Sized { _size: composite_size } = composite.size_info {
8137 // If the trailing field is sized, the resulting layout
8138 // will be sized. Its size will be the sum of the
8139 // preceeding layout, the size of the new field, and the
8140 // size of inter-field padding between the two.
8141 assert_eq!(composite_size, offset + field_size);
8142
8143 let field_analog =
8144 Layout::from_size_align(field_size, field_align.get()).unwrap();
8145
8146 if let Ok((actual_composite, actual_offset)) = base_analog.extend(field_analog)
8147 {
8148 assert_eq!(actual_offset, offset);
8149 assert_eq!(actual_composite.size(), composite_size);
8150 assert_eq!(actual_composite.align(), composite.align.get());
8151 } else {
8152 // An error here reflects that composite of `base`
8153 // and `field` cannot correspond to a real Rust type
8154 // fragment, because such a fragment would violate
8155 // the basic invariants of a valid Rust layout. At
8156 // the time of writing, `DstLayout` is a little more
8157 // permissive than `Layout`, so we don't assert
8158 // anything in this branch (e.g., unreachability).
8159 }
8160 } else {
8161 panic!("The composite of two sized layouts must be sized.")
8162 }
8163 }
8164 SizeInfo::SliceDst(TrailingSliceLayout {
8165 _offset: field_offset,
8166 _elem_size: field_elem_size,
8167 }) => {
8168 if let SizeInfo::SliceDst(TrailingSliceLayout {
8169 _offset: composite_offset,
8170 _elem_size: composite_elem_size,
8171 }) = composite.size_info
8172 {
8173 // The offset of the trailing slice component is the sum
8174 // of the offset of the trailing field and the trailing
8175 // slice offset within that field.
8176 assert_eq!(composite_offset, offset + field_offset);
8177 // The elem size is unchanged.
8178 assert_eq!(composite_elem_size, field_elem_size);
8179
8180 let field_analog =
8181 Layout::from_size_align(field_offset, field_align.get()).unwrap();
8182
8183 if let Ok((actual_composite, actual_offset)) = base_analog.extend(field_analog)
8184 {
8185 assert_eq!(actual_offset, offset);
8186 assert_eq!(actual_composite.size(), composite_offset);
8187 assert_eq!(actual_composite.align(), composite.align.get());
8188 } else {
8189 // An error here reflects that composite of `base`
8190 // and `field` cannot correspond to a real Rust type
8191 // fragment, because such a fragment would violate
8192 // the basic invariants of a valid Rust layout. At
8193 // the time of writing, `DstLayout` is a little more
8194 // permissive than `Layout`, so we don't assert
8195 // anything in this branch (e.g., unreachability).
8196 }
8197 } else {
8198 panic!("The extension of a layout with a DST must result in a DST.")
8199 }
8200 }
8201 }
8202 }
8203
8204 #[kani::proof]
8205 #[kani::should_panic]
8206 fn prove_dst_layout_extend_dst_panics() {
8207 let base: DstLayout = kani::any();
8208 let field: DstLayout = kani::any();
8209 let packed: Option<NonZeroUsize> = kani::any();
8210
8211 if let Some(max_align) = packed {
8212 kani::assume(max_align.is_power_of_two());
8213 kani::assume(base.align <= max_align);
8214 }
8215
8216 kani::assume(matches!(base.size_info, SizeInfo::SliceDst(..)));
8217
8218 let _ = base.extend(field, packed);
8219 }
8220
8221 #[kani::proof]
8222 fn prove_dst_layout_pad_to_align() {
8223 use crate::util::core_layout::padding_needed_for;
8224
8225 let layout: DstLayout = kani::any();
8226
8227 let padded: DstLayout = layout.pad_to_align();
8228
8229 // Calling `pad_to_align` does not alter the `DstLayout`'s alignment.
8230 assert_eq!(padded.align, layout.align);
8231
8232 if let SizeInfo::Sized { _size: unpadded_size } = layout.size_info {
8233 if let SizeInfo::Sized { _size: padded_size } = padded.size_info {
8234 // If the layout is sized, it will remain sized after padding is
8235 // added. Its sum will be its unpadded size and the size of the
8236 // trailing padding needed to satisfy its alignment
8237 // requirements.
8238 let padding = padding_needed_for(unpadded_size, layout.align);
8239 assert_eq!(padded_size, unpadded_size + padding);
8240
8241 // Prove that calling `DstLayout::pad_to_align` behaves
8242 // identically to `Layout::pad_to_align`.
8243 let layout_analog =
8244 Layout::from_size_align(unpadded_size, layout.align.get()).unwrap();
8245 let padded_analog = layout_analog.pad_to_align();
8246 assert_eq!(padded_analog.align(), layout.align.get());
8247 assert_eq!(padded_analog.size(), padded_size);
8248 } else {
8249 panic!("The padding of a sized layout must result in a sized layout.")
8250 }
8251 } else {
8252 // If the layout is a DST, padding cannot be statically added.
8253 assert_eq!(padded.size_info, layout.size_info);
8254 }
8255 }
8256}
8257