| 1 | // Copyright 2018 The Fuchsia Authors |
| 2 | // |
| 3 | // Licensed under the 2-Clause BSD License <LICENSE-BSD or |
| 4 | // https://opensource.org/license/bsd-2-clause>, Apache License, Version 2.0 |
| 5 | // <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT |
| 6 | // license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option. |
| 7 | // This file may not be copied, modified, or distributed except according to |
| 8 | // those terms. |
| 9 | |
| 10 | // After updating the following doc comment, make sure to run the following |
| 11 | // command to update `README.md` based on its contents: |
| 12 | // |
| 13 | // cargo -q run --manifest-path tools/Cargo.toml -p generate-readme > README.md |
| 14 | |
| 15 | //! *<span style="font-size: 100%; color:grey;">Need more out of zerocopy? |
| 16 | //! Submit a [customer request issue][customer-request-issue]!</span>* |
| 17 | //! |
| 18 | //! ***<span style="font-size: 140%">Fast, safe, <span |
| 19 | //! style="color:red;">compile error</span>. Pick two.</span>*** |
| 20 | //! |
| 21 | //! Zerocopy makes zero-cost memory manipulation effortless. We write `unsafe` |
| 22 | //! so you don't have to. |
| 23 | //! |
| 24 | //! *Thanks for using zerocopy 0.8! For an overview of what changes from 0.7, |
| 25 | //! check out our [release notes][release-notes], which include a step-by-step |
| 26 | //! guide for upgrading from 0.7.* |
| 27 | //! |
| 28 | //! *Have questions? Need help? Ask the maintainers on [GitHub][github-q-a] or |
| 29 | //! on [Discord][discord]!* |
| 30 | //! |
| 31 | //! [customer-request-issue]: https://github.com/google/zerocopy/issues/new/choose |
| 32 | //! [release-notes]: https://github.com/google/zerocopy/discussions/1680 |
| 33 | //! [github-q-a]: https://github.com/google/zerocopy/discussions/categories/q-a |
| 34 | //! [discord]: https://discord.gg/MAvWH2R6zk |
| 35 | //! |
| 36 | //! # Overview |
| 37 | //! |
| 38 | //! ##### Conversion Traits |
| 39 | //! |
| 40 | //! Zerocopy provides four derivable traits for zero-cost conversions: |
| 41 | //! - [`TryFromBytes`] indicates that a type may safely be converted from |
| 42 | //! certain byte sequences (conditional on runtime checks) |
| 43 | //! - [`FromZeros`] indicates that a sequence of zero bytes represents a valid |
| 44 | //! instance of a type |
| 45 | //! - [`FromBytes`] indicates that a type may safely be converted from an |
| 46 | //! arbitrary byte sequence |
| 47 | //! - [`IntoBytes`] indicates that a type may safely be converted *to* a byte |
| 48 | //! sequence |
| 49 | //! |
| 50 | //! These traits support sized types, slices, and [slice DSTs][slice-dsts]. |
| 51 | //! |
| 52 | //! [slice-dsts]: KnownLayout#dynamically-sized-types |
| 53 | //! |
| 54 | //! ##### Marker Traits |
| 55 | //! |
| 56 | //! Zerocopy provides three derivable marker traits that do not provide any |
| 57 | //! functionality themselves, but are required to call certain methods provided |
| 58 | //! by the conversion traits: |
| 59 | //! - [`KnownLayout`] indicates that zerocopy can reason about certain layout |
| 60 | //! qualities of a type |
| 61 | //! - [`Immutable`] indicates that a type is free from interior mutability, |
| 62 | //! except by ownership or an exclusive (`&mut`) borrow |
| 63 | //! - [`Unaligned`] indicates that a type's alignment requirement is 1 |
| 64 | //! |
| 65 | //! You should generally derive these marker traits whenever possible. |
| 66 | //! |
| 67 | //! ##### Conversion Macros |
| 68 | //! |
| 69 | //! Zerocopy provides six macros for safe casting between types: |
| 70 | //! |
| 71 | //! - ([`try_`][try_transmute])[`transmute`] (conditionally) converts a value of |
| 72 | //! one type to a value of another type of the same size |
| 73 | //! - ([`try_`][try_transmute_mut])[`transmute_mut`] (conditionally) converts a |
| 74 | //! mutable reference of one type to a mutable reference of another type of |
| 75 | //! the same size |
| 76 | //! - ([`try_`][try_transmute_ref])[`transmute_ref`] (conditionally) converts a |
| 77 | //! mutable or immutable reference of one type to an immutable reference of |
| 78 | //! another type of the same size |
| 79 | //! |
| 80 | //! These macros perform *compile-time* size and alignment checks, meaning that |
| 81 | //! unconditional casts have zero cost at runtime. Conditional casts do not need |
| 82 | //! to validate size or alignment runtime, but do need to validate contents. |
| 83 | //! |
| 84 | //! These macros cannot be used in generic contexts. For generic conversions, |
| 85 | //! use the methods defined by the [conversion traits](#conversion-traits). |
| 86 | //! |
| 87 | //! ##### Byteorder-Aware Numerics |
| 88 | //! |
| 89 | //! Zerocopy provides byte-order aware integer types that support these |
| 90 | //! conversions; see the [`byteorder`] module. These types are especially useful |
| 91 | //! for network parsing. |
| 92 | //! |
| 93 | //! # Cargo Features |
| 94 | //! |
| 95 | //! - **`alloc`** |
| 96 | //! By default, `zerocopy` is `no_std`. When the `alloc` feature is enabled, |
| 97 | //! the `alloc` crate is added as a dependency, and some allocation-related |
| 98 | //! functionality is added. |
| 99 | //! |
| 100 | //! - **`std`** |
| 101 | //! By default, `zerocopy` is `no_std`. When the `std` feature is enabled, the |
| 102 | //! `std` crate is added as a dependency (ie, `no_std` is disabled), and |
| 103 | //! support for some `std` types is added. `std` implies `alloc`. |
| 104 | //! |
| 105 | //! - **`derive`** |
| 106 | //! Provides derives for the core marker traits via the `zerocopy-derive` |
| 107 | //! crate. These derives are re-exported from `zerocopy`, so it is not |
| 108 | //! necessary to depend on `zerocopy-derive` directly. |
| 109 | //! |
| 110 | //! However, you may experience better compile times if you instead directly |
| 111 | //! depend on both `zerocopy` and `zerocopy-derive` in your `Cargo.toml`, |
| 112 | //! since doing so will allow Rust to compile these crates in parallel. To do |
| 113 | //! so, do *not* enable the `derive` feature, and list both dependencies in |
| 114 | //! your `Cargo.toml` with the same leading non-zero version number; e.g: |
| 115 | //! |
| 116 | //! ```toml |
| 117 | //! [dependencies] |
| 118 | //! zerocopy = "0.X" |
| 119 | //! zerocopy-derive = "0.X" |
| 120 | //! ``` |
| 121 | //! |
| 122 | //! To avoid the risk of [duplicate import errors][duplicate-import-errors] if |
| 123 | //! one of your dependencies enables zerocopy's `derive` feature, import |
| 124 | //! derives as `use zerocopy_derive::*` rather than by name (e.g., `use |
| 125 | //! zerocopy_derive::FromBytes`). |
| 126 | //! |
| 127 | //! - **`simd`** |
| 128 | //! When the `simd` feature is enabled, `FromZeros`, `FromBytes`, and |
| 129 | //! `IntoBytes` impls are emitted for all stable SIMD types which exist on the |
| 130 | //! target platform. Note that the layout of SIMD types is not yet stabilized, |
| 131 | //! so these impls may be removed in the future if layout changes make them |
| 132 | //! invalid. For more information, see the Unsafe Code Guidelines Reference |
| 133 | //! page on the [layout of packed SIMD vectors][simd-layout]. |
| 134 | //! |
| 135 | //! - **`simd-nightly`** |
| 136 | //! Enables the `simd` feature and adds support for SIMD types which are only |
| 137 | //! available on nightly. Since these types are unstable, support for any type |
| 138 | //! may be removed at any point in the future. |
| 139 | //! |
| 140 | //! - **`float-nightly`** |
| 141 | //! Adds support for the unstable `f16` and `f128` types. These types are |
| 142 | //! not yet fully implemented and may not be supported on all platforms. |
| 143 | //! |
| 144 | //! [duplicate-import-errors]: https://github.com/google/zerocopy/issues/1587 |
| 145 | //! [simd-layout]: https://rust-lang.github.io/unsafe-code-guidelines/layout/packed-simd-vectors.html |
| 146 | //! |
| 147 | //! # Security Ethos |
| 148 | //! |
| 149 | //! Zerocopy is expressly designed for use in security-critical contexts. We |
| 150 | //! strive to ensure that that zerocopy code is sound under Rust's current |
| 151 | //! memory model, and *any future memory model*. We ensure this by: |
| 152 | //! - **...not 'guessing' about Rust's semantics.** |
| 153 | //! We annotate `unsafe` code with a precise rationale for its soundness that |
| 154 | //! cites a relevant section of Rust's official documentation. When Rust's |
| 155 | //! documented semantics are unclear, we work with the Rust Operational |
| 156 | //! Semantics Team to clarify Rust's documentation. |
| 157 | //! - **...rigorously testing our implementation.** |
| 158 | //! We run tests using [Miri], ensuring that zerocopy is sound across a wide |
| 159 | //! array of supported target platforms of varying endianness and pointer |
| 160 | //! width, and across both current and experimental memory models of Rust. |
| 161 | //! - **...formally proving the correctness of our implementation.** |
| 162 | //! We apply formal verification tools like [Kani][kani] to prove zerocopy's |
| 163 | //! correctness. |
| 164 | //! |
| 165 | //! For more information, see our full [soundness policy]. |
| 166 | //! |
| 167 | //! [Miri]: https://github.com/rust-lang/miri |
| 168 | //! [Kani]: https://github.com/model-checking/kani |
| 169 | //! [soundness policy]: https://github.com/google/zerocopy/blob/main/POLICIES.md#soundness |
| 170 | //! |
| 171 | //! # Relationship to Project Safe Transmute |
| 172 | //! |
| 173 | //! [Project Safe Transmute] is an official initiative of the Rust Project to |
| 174 | //! develop language-level support for safer transmutation. The Project consults |
| 175 | //! with crates like zerocopy to identify aspects of safer transmutation that |
| 176 | //! would benefit from compiler support, and has developed an [experimental, |
| 177 | //! compiler-supported analysis][mcp-transmutability] which determines whether, |
| 178 | //! for a given type, any value of that type may be soundly transmuted into |
| 179 | //! another type. Once this functionality is sufficiently mature, zerocopy |
| 180 | //! intends to replace its internal transmutability analysis (implemented by our |
| 181 | //! custom derives) with the compiler-supported one. This change will likely be |
| 182 | //! an implementation detail that is invisible to zerocopy's users. |
| 183 | //! |
| 184 | //! Project Safe Transmute will not replace the need for most of zerocopy's |
| 185 | //! higher-level abstractions. The experimental compiler analysis is a tool for |
| 186 | //! checking the soundness of `unsafe` code, not a tool to avoid writing |
| 187 | //! `unsafe` code altogether. For the foreseeable future, crates like zerocopy |
| 188 | //! will still be required in order to provide higher-level abstractions on top |
| 189 | //! of the building block provided by Project Safe Transmute. |
| 190 | //! |
| 191 | //! [Project Safe Transmute]: https://rust-lang.github.io/rfcs/2835-project-safe-transmute.html |
| 192 | //! [mcp-transmutability]: https://github.com/rust-lang/compiler-team/issues/411 |
| 193 | //! |
| 194 | //! # MSRV |
| 195 | //! |
| 196 | //! See our [MSRV policy]. |
| 197 | //! |
| 198 | //! [MSRV policy]: https://github.com/google/zerocopy/blob/main/POLICIES.md#msrv |
| 199 | //! |
| 200 | //! # Changelog |
| 201 | //! |
| 202 | //! Zerocopy uses [GitHub Releases]. |
| 203 | //! |
| 204 | //! [GitHub Releases]: https://github.com/google/zerocopy/releases |
| 205 | |
| 206 | // Sometimes we want to use lints which were added after our MSRV. |
| 207 | // `unknown_lints` is `warn` by default and we deny warnings in CI, so without |
| 208 | // this attribute, any unknown lint would cause a CI failure when testing with |
| 209 | // our MSRV. |
| 210 | #![allow (unknown_lints, non_local_definitions, unreachable_patterns)] |
| 211 | #![deny (renamed_and_removed_lints)] |
| 212 | #![deny ( |
| 213 | anonymous_parameters, |
| 214 | deprecated_in_future, |
| 215 | late_bound_lifetime_arguments, |
| 216 | missing_copy_implementations, |
| 217 | missing_debug_implementations, |
| 218 | missing_docs, |
| 219 | path_statements, |
| 220 | patterns_in_fns_without_body, |
| 221 | rust_2018_idioms, |
| 222 | trivial_numeric_casts, |
| 223 | unreachable_pub, |
| 224 | unsafe_op_in_unsafe_fn, |
| 225 | unused_extern_crates, |
| 226 | // We intentionally choose not to deny `unused_qualifications`. When items |
| 227 | // are added to the prelude (e.g., `core::mem::size_of`), this has the |
| 228 | // consequence of making some uses trigger this lint on the latest toolchain |
| 229 | // (e.g., `mem::size_of`), but fixing it (e.g. by replacing with `size_of`) |
| 230 | // does not work on older toolchains. |
| 231 | // |
| 232 | // We tested a more complicated fix in #1413, but ultimately decided that, |
| 233 | // since this lint is just a minor style lint, the complexity isn't worth it |
| 234 | // - it's fine to occasionally have unused qualifications slip through, |
| 235 | // especially since these do not affect our user-facing API in any way. |
| 236 | variant_size_differences |
| 237 | )] |
| 238 | #![cfg_attr ( |
| 239 | __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS, |
| 240 | deny(fuzzy_provenance_casts, lossy_provenance_casts) |
| 241 | )] |
| 242 | #![deny ( |
| 243 | clippy::all, |
| 244 | clippy::alloc_instead_of_core, |
| 245 | clippy::arithmetic_side_effects, |
| 246 | clippy::as_underscore, |
| 247 | clippy::assertions_on_result_states, |
| 248 | clippy::as_conversions, |
| 249 | clippy::correctness, |
| 250 | clippy::dbg_macro, |
| 251 | clippy::decimal_literal_representation, |
| 252 | clippy::double_must_use, |
| 253 | clippy::get_unwrap, |
| 254 | clippy::indexing_slicing, |
| 255 | clippy::missing_inline_in_public_items, |
| 256 | clippy::missing_safety_doc, |
| 257 | clippy::must_use_candidate, |
| 258 | clippy::must_use_unit, |
| 259 | clippy::obfuscated_if_else, |
| 260 | clippy::perf, |
| 261 | clippy::print_stdout, |
| 262 | clippy::return_self_not_must_use, |
| 263 | clippy::std_instead_of_core, |
| 264 | clippy::style, |
| 265 | clippy::suspicious, |
| 266 | clippy::todo, |
| 267 | clippy::undocumented_unsafe_blocks, |
| 268 | clippy::unimplemented, |
| 269 | clippy::unnested_or_patterns, |
| 270 | clippy::unwrap_used, |
| 271 | clippy::use_debug |
| 272 | )] |
| 273 | #![allow (clippy::type_complexity)] |
| 274 | #![deny ( |
| 275 | rustdoc::bare_urls, |
| 276 | rustdoc::broken_intra_doc_links, |
| 277 | rustdoc::invalid_codeblock_attributes, |
| 278 | rustdoc::invalid_html_tags, |
| 279 | rustdoc::invalid_rust_codeblocks, |
| 280 | rustdoc::missing_crate_level_docs, |
| 281 | rustdoc::private_intra_doc_links |
| 282 | )] |
| 283 | // In test code, it makes sense to weight more heavily towards concise, readable |
| 284 | // code over correct or debuggable code. |
| 285 | #![cfg_attr (any(test, kani), allow( |
| 286 | // In tests, you get line numbers and have access to source code, so panic |
| 287 | // messages are less important. You also often unwrap a lot, which would |
| 288 | // make expect'ing instead very verbose. |
| 289 | clippy::unwrap_used, |
| 290 | // In tests, there's no harm to "panic risks" - the worst that can happen is |
| 291 | // that your test will fail, and you'll fix it. By contrast, panic risks in |
| 292 | // production code introduce the possibly of code panicking unexpectedly "in |
| 293 | // the field". |
| 294 | clippy::arithmetic_side_effects, |
| 295 | clippy::indexing_slicing, |
| 296 | ))] |
| 297 | #![cfg_attr (not(any(test, feature = "std" )), no_std)] |
| 298 | #![cfg_attr ( |
| 299 | all(feature = "simd-nightly" , any(target_arch = "x86" , target_arch = "x86_64" )), |
| 300 | feature(stdarch_x86_avx512) |
| 301 | )] |
| 302 | #![cfg_attr ( |
| 303 | all(feature = "simd-nightly" , target_arch = "arm" ), |
| 304 | feature(stdarch_arm_dsp, stdarch_arm_neon_intrinsics) |
| 305 | )] |
| 306 | #![cfg_attr ( |
| 307 | all(feature = "simd-nightly" , any(target_arch = "powerpc" , target_arch = "powerpc64" )), |
| 308 | feature(stdarch_powerpc) |
| 309 | )] |
| 310 | #![cfg_attr (feature = "float-nightly" , feature(f16, f128))] |
| 311 | #![cfg_attr (doc_cfg, feature(doc_cfg))] |
| 312 | #![cfg_attr ( |
| 313 | __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS, |
| 314 | feature(layout_for_ptr, coverage_attribute) |
| 315 | )] |
| 316 | |
| 317 | // This is a hack to allow zerocopy-derive derives to work in this crate. They |
| 318 | // assume that zerocopy is linked as an extern crate, so they access items from |
| 319 | // it as `zerocopy::Xxx`. This makes that still work. |
| 320 | #[cfg (any(feature = "derive" , test))] |
| 321 | extern crate self as zerocopy; |
| 322 | |
| 323 | #[doc (hidden)] |
| 324 | #[macro_use ] |
| 325 | pub mod util; |
| 326 | |
| 327 | pub mod byte_slice; |
| 328 | pub mod byteorder; |
| 329 | mod deprecated; |
| 330 | // This module is `pub` so that zerocopy's error types and error handling |
| 331 | // documentation is grouped together in a cohesive module. In practice, we |
| 332 | // expect most users to use the re-export of `error`'s items to avoid identifier |
| 333 | // stuttering. |
| 334 | pub mod error; |
| 335 | mod impls; |
| 336 | #[doc (hidden)] |
| 337 | pub mod layout; |
| 338 | mod macros; |
| 339 | #[doc (hidden)] |
| 340 | pub mod pointer; |
| 341 | mod r#ref; |
| 342 | // TODO(#252): If we make this pub, come up with a better name. |
| 343 | mod wrappers; |
| 344 | |
| 345 | pub use crate::byte_slice::*; |
| 346 | pub use crate::byteorder::*; |
| 347 | pub use crate::error::*; |
| 348 | pub use crate::r#ref::*; |
| 349 | pub use crate::wrappers::*; |
| 350 | |
| 351 | use core::{ |
| 352 | cell::UnsafeCell, |
| 353 | cmp::Ordering, |
| 354 | fmt::{self, Debug, Display, Formatter}, |
| 355 | hash::Hasher, |
| 356 | marker::PhantomData, |
| 357 | mem::{self, ManuallyDrop, MaybeUninit as CoreMaybeUninit}, |
| 358 | num::{ |
| 359 | NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize, NonZeroU128, |
| 360 | NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize, Wrapping, |
| 361 | }, |
| 362 | ops::{Deref, DerefMut}, |
| 363 | ptr::{self, NonNull}, |
| 364 | slice, |
| 365 | }; |
| 366 | |
| 367 | #[cfg (feature = "std" )] |
| 368 | use std::io; |
| 369 | |
| 370 | use crate::pointer::{invariant, BecauseExclusive}; |
| 371 | |
| 372 | #[cfg (any(feature = "alloc" , test))] |
| 373 | extern crate alloc; |
| 374 | #[cfg (any(feature = "alloc" , test))] |
| 375 | use alloc::{boxed::Box, vec::Vec}; |
| 376 | |
| 377 | #[cfg (any(feature = "alloc" , test, kani))] |
| 378 | use core::alloc::Layout; |
| 379 | |
| 380 | // Used by `TryFromBytes::is_bit_valid`. |
| 381 | #[doc (hidden)] |
| 382 | pub use crate::pointer::{BecauseImmutable, Maybe, MaybeAligned, Ptr}; |
| 383 | // Used by `KnownLayout`. |
| 384 | #[doc (hidden)] |
| 385 | pub use crate::layout::*; |
| 386 | |
| 387 | // For each trait polyfill, as soon as the corresponding feature is stable, the |
| 388 | // polyfill import will be unused because method/function resolution will prefer |
| 389 | // the inherent method/function over a trait method/function. Thus, we suppress |
| 390 | // the `unused_imports` warning. |
| 391 | // |
| 392 | // See the documentation on `util::polyfills` for more information. |
| 393 | #[allow (unused_imports)] |
| 394 | use crate::util::polyfills::{self, NonNullExt as _, NumExt as _}; |
| 395 | |
| 396 | #[rustversion::nightly] |
| 397 | #[cfg (all(test, not(__ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS)))] |
| 398 | const _: () = { |
| 399 | #[deprecated = "some tests may be skipped due to missing RUSTFLAGS= \"--cfg __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS \"" ] |
| 400 | const _WARNING: () = (); |
| 401 | #[warn (deprecated)] |
| 402 | _WARNING |
| 403 | }; |
| 404 | |
| 405 | // These exist so that code which was written against the old names will get |
| 406 | // less confusing error messages when they upgrade to a more recent version of |
| 407 | // zerocopy. On our MSRV toolchain, the error messages read, for example: |
| 408 | // |
| 409 | // error[E0603]: trait `FromZeroes` is private |
| 410 | // --> examples/deprecated.rs:1:15 |
| 411 | // | |
| 412 | // 1 | use zerocopy::FromZeroes; |
| 413 | // | ^^^^^^^^^^ private trait |
| 414 | // | |
| 415 | // note: the trait `FromZeroes` is defined here |
| 416 | // --> /Users/josh/workspace/zerocopy/src/lib.rs:1845:5 |
| 417 | // | |
| 418 | // 1845 | use FromZeros as FromZeroes; |
| 419 | // | ^^^^^^^^^^^^^^^^^^^^^^^ |
| 420 | // |
| 421 | // The "note" provides enough context to make it easy to figure out how to fix |
| 422 | // the error. |
| 423 | #[allow (unused)] |
| 424 | use {FromZeros as FromZeroes, IntoBytes as AsBytes, Ref as LayoutVerified}; |
| 425 | |
| 426 | /// Implements [`KnownLayout`]. |
| 427 | /// |
| 428 | /// This derive analyzes various aspects of a type's layout that are needed for |
| 429 | /// some of zerocopy's APIs. It can be applied to structs, enums, and unions; |
| 430 | /// e.g.: |
| 431 | /// |
| 432 | /// ``` |
| 433 | /// # use zerocopy_derive::KnownLayout; |
| 434 | /// #[derive(KnownLayout)] |
| 435 | /// struct MyStruct { |
| 436 | /// # /* |
| 437 | /// ... |
| 438 | /// # */ |
| 439 | /// } |
| 440 | /// |
| 441 | /// #[derive(KnownLayout)] |
| 442 | /// enum MyEnum { |
| 443 | /// # V00, |
| 444 | /// # /* |
| 445 | /// ... |
| 446 | /// # */ |
| 447 | /// } |
| 448 | /// |
| 449 | /// #[derive(KnownLayout)] |
| 450 | /// union MyUnion { |
| 451 | /// # variant: u8, |
| 452 | /// # /* |
| 453 | /// ... |
| 454 | /// # */ |
| 455 | /// } |
| 456 | /// ``` |
| 457 | /// |
| 458 | /// # Limitations |
| 459 | /// |
| 460 | /// This derive cannot currently be applied to unsized structs without an |
| 461 | /// explicit `repr` attribute. |
| 462 | /// |
| 463 | /// Some invocations of this derive run afoul of a [known bug] in Rust's type |
| 464 | /// privacy checker. For example, this code: |
| 465 | /// |
| 466 | /// ```compile_fail,E0446 |
| 467 | /// use zerocopy::*; |
| 468 | /// # use zerocopy_derive::*; |
| 469 | /// |
| 470 | /// #[derive(KnownLayout)] |
| 471 | /// #[repr(C)] |
| 472 | /// pub struct PublicType { |
| 473 | /// leading: Foo, |
| 474 | /// trailing: Bar, |
| 475 | /// } |
| 476 | /// |
| 477 | /// #[derive(KnownLayout)] |
| 478 | /// struct Foo; |
| 479 | /// |
| 480 | /// #[derive(KnownLayout)] |
| 481 | /// struct Bar; |
| 482 | /// ``` |
| 483 | /// |
| 484 | /// ...results in a compilation error: |
| 485 | /// |
| 486 | /// ```text |
| 487 | /// error[E0446]: private type `Bar` in public interface |
| 488 | /// --> examples/bug.rs:3:10 |
| 489 | /// | |
| 490 | /// 3 | #[derive(KnownLayout)] |
| 491 | /// | ^^^^^^^^^^^ can't leak private type |
| 492 | /// ... |
| 493 | /// 14 | struct Bar; |
| 494 | /// | ---------- `Bar` declared as private |
| 495 | /// | |
| 496 | /// = note: this error originates in the derive macro `KnownLayout` (in Nightly builds, run with -Z macro-backtrace for more info) |
| 497 | /// ``` |
| 498 | /// |
| 499 | /// This issue arises when `#[derive(KnownLayout)]` is applied to `repr(C)` |
| 500 | /// structs whose trailing field type is less public than the enclosing struct. |
| 501 | /// |
| 502 | /// To work around this, mark the trailing field type `pub` and annotate it with |
| 503 | /// `#[doc(hidden)]`; e.g.: |
| 504 | /// |
| 505 | /// ```no_run |
| 506 | /// use zerocopy::*; |
| 507 | /// # use zerocopy_derive::*; |
| 508 | /// |
| 509 | /// #[derive(KnownLayout)] |
| 510 | /// #[repr(C)] |
| 511 | /// pub struct PublicType { |
| 512 | /// leading: Foo, |
| 513 | /// trailing: Bar, |
| 514 | /// } |
| 515 | /// |
| 516 | /// #[derive(KnownLayout)] |
| 517 | /// struct Foo; |
| 518 | /// |
| 519 | /// #[doc(hidden)] |
| 520 | /// #[derive(KnownLayout)] |
| 521 | /// pub struct Bar; // <- `Bar` is now also `pub` |
| 522 | /// ``` |
| 523 | /// |
| 524 | /// [known bug]: https://github.com/rust-lang/rust/issues/45713 |
| 525 | #[cfg (any(feature = "derive" , test))] |
| 526 | #[cfg_attr (doc_cfg, doc(cfg(feature = "derive" )))] |
| 527 | pub use zerocopy_derive::KnownLayout; |
| 528 | |
| 529 | /// Indicates that zerocopy can reason about certain aspects of a type's layout. |
| 530 | /// |
| 531 | /// This trait is required by many of zerocopy's APIs. It supports sized types, |
| 532 | /// slices, and [slice DSTs](#dynamically-sized-types). |
| 533 | /// |
| 534 | /// # Implementation |
| 535 | /// |
| 536 | /// **Do not implement this trait yourself!** Instead, use |
| 537 | /// [`#[derive(KnownLayout)]`][derive]; e.g.: |
| 538 | /// |
| 539 | /// ``` |
| 540 | /// # use zerocopy_derive::KnownLayout; |
| 541 | /// #[derive(KnownLayout)] |
| 542 | /// struct MyStruct { |
| 543 | /// # /* |
| 544 | /// ... |
| 545 | /// # */ |
| 546 | /// } |
| 547 | /// |
| 548 | /// #[derive(KnownLayout)] |
| 549 | /// enum MyEnum { |
| 550 | /// # /* |
| 551 | /// ... |
| 552 | /// # */ |
| 553 | /// } |
| 554 | /// |
| 555 | /// #[derive(KnownLayout)] |
| 556 | /// union MyUnion { |
| 557 | /// # variant: u8, |
| 558 | /// # /* |
| 559 | /// ... |
| 560 | /// # */ |
| 561 | /// } |
| 562 | /// ``` |
| 563 | /// |
| 564 | /// This derive performs a sophisticated analysis to deduce the layout |
| 565 | /// characteristics of types. You **must** implement this trait via the derive. |
| 566 | /// |
| 567 | /// # Dynamically-sized types |
| 568 | /// |
| 569 | /// `KnownLayout` supports slice-based dynamically sized types ("slice DSTs"). |
| 570 | /// |
| 571 | /// A slice DST is a type whose trailing field is either a slice or another |
| 572 | /// slice DST, rather than a type with fixed size. For example: |
| 573 | /// |
| 574 | /// ``` |
| 575 | /// #[repr(C)] |
| 576 | /// struct PacketHeader { |
| 577 | /// # /* |
| 578 | /// ... |
| 579 | /// # */ |
| 580 | /// } |
| 581 | /// |
| 582 | /// #[repr(C)] |
| 583 | /// struct Packet { |
| 584 | /// header: PacketHeader, |
| 585 | /// body: [u8], |
| 586 | /// } |
| 587 | /// ``` |
| 588 | /// |
| 589 | /// It can be useful to think of slice DSTs as a generalization of slices - in |
| 590 | /// other words, a normal slice is just the special case of a slice DST with |
| 591 | /// zero leading fields. In particular: |
| 592 | /// - Like slices, slice DSTs can have different lengths at runtime |
| 593 | /// - Like slices, slice DSTs cannot be passed by-value, but only by reference |
| 594 | /// or via other indirection such as `Box` |
| 595 | /// - Like slices, a reference (or `Box`, or other pointer type) to a slice DST |
| 596 | /// encodes the number of elements in the trailing slice field |
| 597 | /// |
| 598 | /// ## Slice DST layout |
| 599 | /// |
| 600 | /// Just like other composite Rust types, the layout of a slice DST is not |
| 601 | /// well-defined unless it is specified using an explicit `#[repr(...)]` |
| 602 | /// attribute such as `#[repr(C)]`. [Other representations are |
| 603 | /// supported][reprs], but in this section, we'll use `#[repr(C)]` as our |
| 604 | /// example. |
| 605 | /// |
| 606 | /// A `#[repr(C)]` slice DST is laid out [just like sized `#[repr(C)]` |
| 607 | /// types][repr-c-structs], but the presenence of a variable-length field |
| 608 | /// introduces the possibility of *dynamic padding*. In particular, it may be |
| 609 | /// necessary to add trailing padding *after* the trailing slice field in order |
| 610 | /// to satisfy the outer type's alignment, and the amount of padding required |
| 611 | /// may be a function of the length of the trailing slice field. This is just a |
| 612 | /// natural consequence of the normal `#[repr(C)]` rules applied to slice DSTs, |
| 613 | /// but it can result in surprising behavior. For example, consider the |
| 614 | /// following type: |
| 615 | /// |
| 616 | /// ``` |
| 617 | /// #[repr(C)] |
| 618 | /// struct Foo { |
| 619 | /// a: u32, |
| 620 | /// b: u8, |
| 621 | /// z: [u16], |
| 622 | /// } |
| 623 | /// ``` |
| 624 | /// |
| 625 | /// Assuming that `u32` has alignment 4 (this is not true on all platforms), |
| 626 | /// then `Foo` has alignment 4 as well. Here is the smallest possible value for |
| 627 | /// `Foo`: |
| 628 | /// |
| 629 | /// ```text |
| 630 | /// byte offset | 01234567 |
| 631 | /// field | aaaab--- |
| 632 | /// >< |
| 633 | /// ``` |
| 634 | /// |
| 635 | /// In this value, `z` has length 0. Abiding by `#[repr(C)]`, the lowest offset |
| 636 | /// that we can place `z` at is 5, but since `z` has alignment 2, we need to |
| 637 | /// round up to offset 6. This means that there is one byte of padding between |
| 638 | /// `b` and `z`, then 0 bytes of `z` itself (denoted `><` in this diagram), and |
| 639 | /// then two bytes of padding after `z` in order to satisfy the overall |
| 640 | /// alignment of `Foo`. The size of this instance is 8 bytes. |
| 641 | /// |
| 642 | /// What about if `z` has length 1? |
| 643 | /// |
| 644 | /// ```text |
| 645 | /// byte offset | 01234567 |
| 646 | /// field | aaaab-zz |
| 647 | /// ``` |
| 648 | /// |
| 649 | /// In this instance, `z` has length 1, and thus takes up 2 bytes. That means |
| 650 | /// that we no longer need padding after `z` in order to satisfy `Foo`'s |
| 651 | /// alignment. We've now seen two different values of `Foo` with two different |
| 652 | /// lengths of `z`, but they both have the same size - 8 bytes. |
| 653 | /// |
| 654 | /// What about if `z` has length 2? |
| 655 | /// |
| 656 | /// ```text |
| 657 | /// byte offset | 012345678901 |
| 658 | /// field | aaaab-zzzz-- |
| 659 | /// ``` |
| 660 | /// |
| 661 | /// Now `z` has length 2, and thus takes up 4 bytes. This brings our un-padded |
| 662 | /// size to 10, and so we now need another 2 bytes of padding after `z` to |
| 663 | /// satisfy `Foo`'s alignment. |
| 664 | /// |
| 665 | /// Again, all of this is just a logical consequence of the `#[repr(C)]` rules |
| 666 | /// applied to slice DSTs, but it can be surprising that the amount of trailing |
| 667 | /// padding becomes a function of the trailing slice field's length, and thus |
| 668 | /// can only be computed at runtime. |
| 669 | /// |
| 670 | /// [reprs]: https://doc.rust-lang.org/reference/type-layout.html#representations |
| 671 | /// [repr-c-structs]: https://doc.rust-lang.org/reference/type-layout.html#reprc-structs |
| 672 | /// |
| 673 | /// ## What is a valid size? |
| 674 | /// |
| 675 | /// There are two places in zerocopy's API that we refer to "a valid size" of a |
| 676 | /// type. In normal casts or conversions, where the source is a byte slice, we |
| 677 | /// need to know whether the source byte slice is a valid size of the |
| 678 | /// destination type. In prefix or suffix casts, we need to know whether *there |
| 679 | /// exists* a valid size of the destination type which fits in the source byte |
| 680 | /// slice and, if so, what the largest such size is. |
| 681 | /// |
| 682 | /// As outlined above, a slice DST's size is defined by the number of elements |
| 683 | /// in its trailing slice field. However, there is not necessarily a 1-to-1 |
| 684 | /// mapping between trailing slice field length and overall size. As we saw in |
| 685 | /// the previous section with the type `Foo`, instances with both 0 and 1 |
| 686 | /// elements in the trailing `z` field result in a `Foo` whose size is 8 bytes. |
| 687 | /// |
| 688 | /// When we say "x is a valid size of `T`", we mean one of two things: |
| 689 | /// - If `T: Sized`, then we mean that `x == size_of::<T>()` |
| 690 | /// - If `T` is a slice DST, then we mean that there exists a `len` such that the instance of |
| 691 | /// `T` with `len` trailing slice elements has size `x` |
| 692 | /// |
| 693 | /// When we say "largest possible size of `T` that fits in a byte slice", we |
| 694 | /// mean one of two things: |
| 695 | /// - If `T: Sized`, then we mean `size_of::<T>()` if the byte slice is at least |
| 696 | /// `size_of::<T>()` bytes long |
| 697 | /// - If `T` is a slice DST, then we mean to consider all values, `len`, such |
| 698 | /// that the instance of `T` with `len` trailing slice elements fits in the |
| 699 | /// byte slice, and to choose the largest such `len`, if any |
| 700 | /// |
| 701 | /// |
| 702 | /// # Safety |
| 703 | /// |
| 704 | /// This trait does not convey any safety guarantees to code outside this crate. |
| 705 | /// |
| 706 | /// You must not rely on the `#[doc(hidden)]` internals of `KnownLayout`. Future |
| 707 | /// releases of zerocopy may make backwards-breaking changes to these items, |
| 708 | /// including changes that only affect soundness, which may cause code which |
| 709 | /// uses those items to silently become unsound. |
| 710 | /// |
| 711 | #[cfg_attr (feature = "derive" , doc = "[derive]: zerocopy_derive::KnownLayout" )] |
| 712 | #[cfg_attr ( |
| 713 | not(feature = "derive" ), |
| 714 | doc = concat!("[derive]: https://docs.rs/zerocopy/" , env!("CARGO_PKG_VERSION" ), "/zerocopy/derive.KnownLayout.html" ), |
| 715 | )] |
| 716 | #[cfg_attr ( |
| 717 | zerocopy_diagnostic_on_unimplemented_1_78_0, |
| 718 | diagnostic::on_unimplemented(note = "Consider adding `#[derive(KnownLayout)]` to `{Self}`" ) |
| 719 | )] |
| 720 | pub unsafe trait KnownLayout { |
| 721 | // The `Self: Sized` bound makes it so that `KnownLayout` can still be |
| 722 | // object safe. It's not currently object safe thanks to `const LAYOUT`, and |
| 723 | // it likely won't be in the future, but there's no reason not to be |
| 724 | // forwards-compatible with object safety. |
| 725 | #[doc (hidden)] |
| 726 | fn only_derive_is_allowed_to_implement_this_trait() |
| 727 | where |
| 728 | Self: Sized; |
| 729 | |
| 730 | /// The type of metadata stored in a pointer to `Self`. |
| 731 | /// |
| 732 | /// This is `()` for sized types and `usize` for slice DSTs. |
| 733 | type PointerMetadata: PointerMetadata; |
| 734 | |
| 735 | /// A maybe-uninitialized analog of `Self` |
| 736 | /// |
| 737 | /// # Safety |
| 738 | /// |
| 739 | /// `Self::LAYOUT` and `Self::MaybeUninit::LAYOUT` are identical. |
| 740 | /// `Self::MaybeUninit` admits uninitialized bytes in all positions. |
| 741 | #[doc (hidden)] |
| 742 | type MaybeUninit: ?Sized + KnownLayout<PointerMetadata = Self::PointerMetadata>; |
| 743 | |
| 744 | /// The layout of `Self`. |
| 745 | /// |
| 746 | /// # Safety |
| 747 | /// |
| 748 | /// Callers may assume that `LAYOUT` accurately reflects the layout of |
| 749 | /// `Self`. In particular: |
| 750 | /// - `LAYOUT.align` is equal to `Self`'s alignment |
| 751 | /// - If `Self: Sized`, then `LAYOUT.size_info == SizeInfo::Sized { size }` |
| 752 | /// where `size == size_of::<Self>()` |
| 753 | /// - If `Self` is a slice DST, then `LAYOUT.size_info == |
| 754 | /// SizeInfo::SliceDst(slice_layout)` where: |
| 755 | /// - The size, `size`, of an instance of `Self` with `elems` trailing |
| 756 | /// slice elements is equal to `slice_layout.offset + |
| 757 | /// slice_layout.elem_size * elems` rounded up to the nearest multiple |
| 758 | /// of `LAYOUT.align` |
| 759 | /// - For such an instance, any bytes in the range `[slice_layout.offset + |
| 760 | /// slice_layout.elem_size * elems, size)` are padding and must not be |
| 761 | /// assumed to be initialized |
| 762 | #[doc (hidden)] |
| 763 | const LAYOUT: DstLayout; |
| 764 | |
| 765 | /// SAFETY: The returned pointer has the same address and provenance as |
| 766 | /// `bytes`. If `Self` is a DST, the returned pointer's referent has `elems` |
| 767 | /// elements in its trailing slice. |
| 768 | #[doc (hidden)] |
| 769 | fn raw_from_ptr_len(bytes: NonNull<u8>, meta: Self::PointerMetadata) -> NonNull<Self>; |
| 770 | |
| 771 | /// Extracts the metadata from a pointer to `Self`. |
| 772 | /// |
| 773 | /// # Safety |
| 774 | /// |
| 775 | /// `pointer_to_metadata` always returns the correct metadata stored in |
| 776 | /// `ptr`. |
| 777 | #[doc (hidden)] |
| 778 | fn pointer_to_metadata(ptr: *mut Self) -> Self::PointerMetadata; |
| 779 | |
| 780 | /// Computes the length of the byte range addressed by `ptr`. |
| 781 | /// |
| 782 | /// Returns `None` if the resulting length would not fit in an `usize`. |
| 783 | /// |
| 784 | /// # Safety |
| 785 | /// |
| 786 | /// Callers may assume that `size_of_val_raw` always returns the correct |
| 787 | /// size. |
| 788 | /// |
| 789 | /// Callers may assume that, if `ptr` addresses a byte range whose length |
| 790 | /// fits in an `usize`, this will return `Some`. |
| 791 | #[doc (hidden)] |
| 792 | #[must_use ] |
| 793 | #[inline (always)] |
| 794 | fn size_of_val_raw(ptr: NonNull<Self>) -> Option<usize> { |
| 795 | let meta = Self::pointer_to_metadata(ptr.as_ptr()); |
| 796 | // SAFETY: `size_for_metadata` promises to only return `None` if the |
| 797 | // resulting size would not fit in a `usize`. |
| 798 | meta.size_for_metadata(Self::LAYOUT) |
| 799 | } |
| 800 | } |
| 801 | |
| 802 | /// The metadata associated with a [`KnownLayout`] type. |
| 803 | #[doc (hidden)] |
| 804 | pub trait PointerMetadata: Copy + Eq + Debug { |
| 805 | /// Constructs a `Self` from an element count. |
| 806 | /// |
| 807 | /// If `Self = ()`, this returns `()`. If `Self = usize`, this returns |
| 808 | /// `elems`. No other types are currently supported. |
| 809 | fn from_elem_count(elems: usize) -> Self; |
| 810 | |
| 811 | /// Computes the size of the object with the given layout and pointer |
| 812 | /// metadata. |
| 813 | /// |
| 814 | /// # Panics |
| 815 | /// |
| 816 | /// If `Self = ()`, `layout` must describe a sized type. If `Self = usize`, |
| 817 | /// `layout` must describe a slice DST. Otherwise, `size_for_metadata` may |
| 818 | /// panic. |
| 819 | /// |
| 820 | /// # Safety |
| 821 | /// |
| 822 | /// `size_for_metadata` promises to only return `None` if the resulting size |
| 823 | /// would not fit in a `usize`. |
| 824 | fn size_for_metadata(&self, layout: DstLayout) -> Option<usize>; |
| 825 | } |
| 826 | |
| 827 | impl PointerMetadata for () { |
| 828 | #[inline ] |
| 829 | #[allow (clippy::unused_unit)] |
| 830 | fn from_elem_count(_elems: usize) -> () {} |
| 831 | |
| 832 | #[inline ] |
| 833 | fn size_for_metadata(&self, layout: DstLayout) -> Option<usize> { |
| 834 | match layout.size_info { |
| 835 | SizeInfo::Sized { size: usize } => Some(size), |
| 836 | // NOTE: This branch is unreachable, but we return `None` rather |
| 837 | // than `unreachable!()` to avoid generating panic paths. |
| 838 | SizeInfo::SliceDst(_) => None, |
| 839 | } |
| 840 | } |
| 841 | } |
| 842 | |
| 843 | impl PointerMetadata for usize { |
| 844 | #[inline ] |
| 845 | fn from_elem_count(elems: usize) -> usize { |
| 846 | elems |
| 847 | } |
| 848 | |
| 849 | #[inline ] |
| 850 | fn size_for_metadata(&self, layout: DstLayout) -> Option<usize> { |
| 851 | match layout.size_info { |
| 852 | SizeInfo::SliceDst(TrailingSliceLayout { offset: usize, elem_size: usize }) => { |
| 853 | let slice_len: usize = elem_size.checked_mul(*self)?; |
| 854 | let without_padding: usize = offset.checked_add(slice_len)?; |
| 855 | without_padding.checked_add(util::padding_needed_for(len:without_padding, layout.align)) |
| 856 | } |
| 857 | // NOTE: This branch is unreachable, but we return `None` rather |
| 858 | // than `unreachable!()` to avoid generating panic paths. |
| 859 | SizeInfo::Sized { .. } => None, |
| 860 | } |
| 861 | } |
| 862 | } |
| 863 | |
| 864 | // SAFETY: Delegates safety to `DstLayout::for_slice`. |
| 865 | unsafe impl<T> KnownLayout for [T] { |
| 866 | #[allow (clippy::missing_inline_in_public_items)] |
| 867 | #[cfg_attr (coverage_nightly, coverage(off))] |
| 868 | fn only_derive_is_allowed_to_implement_this_trait() |
| 869 | where |
| 870 | Self: Sized, |
| 871 | { |
| 872 | } |
| 873 | |
| 874 | type PointerMetadata = usize; |
| 875 | |
| 876 | // SAFETY: `CoreMaybeUninit<T>::LAYOUT` and `T::LAYOUT` are identical |
| 877 | // because `CoreMaybeUninit<T>` has the same size and alignment as `T` [1]. |
| 878 | // Consequently, `[CoreMaybeUninit<T>]::LAYOUT` and `[T]::LAYOUT` are |
| 879 | // identical, because they both lack a fixed-sized prefix and because they |
| 880 | // inherit the alignments of their inner element type (which are identical) |
| 881 | // [2][3]. |
| 882 | // |
| 883 | // `[CoreMaybeUninit<T>]` admits uninitialized bytes at all positions |
| 884 | // because `CoreMaybeUninit<T>` admits uninitialized bytes at all positions |
| 885 | // and because the inner elements of `[CoreMaybeUninit<T>]` are laid out |
| 886 | // back-to-back [2][3]. |
| 887 | // |
| 888 | // [1] Per https://doc.rust-lang.org/1.81.0/std/mem/union.MaybeUninit.html#layout-1: |
| 889 | // |
| 890 | // `MaybeUninit<T>` is guaranteed to have the same size, alignment, and ABI as |
| 891 | // `T` |
| 892 | // |
| 893 | // [2] Per https://doc.rust-lang.org/1.82.0/reference/type-layout.html#slice-layout: |
| 894 | // |
| 895 | // Slices have the same layout as the section of the array they slice. |
| 896 | // |
| 897 | // [3] Per https://doc.rust-lang.org/1.82.0/reference/type-layout.html#array-layout: |
| 898 | // |
| 899 | // An array of `[T; N]` has a size of `size_of::<T>() * N` and the same |
| 900 | // alignment of `T`. Arrays are laid out so that the zero-based `nth` |
| 901 | // element of the array is offset from the start of the array by `n * |
| 902 | // size_of::<T>()` bytes. |
| 903 | type MaybeUninit = [CoreMaybeUninit<T>]; |
| 904 | |
| 905 | const LAYOUT: DstLayout = DstLayout::for_slice::<T>(); |
| 906 | |
| 907 | // SAFETY: `.cast` preserves address and provenance. The returned pointer |
| 908 | // refers to an object with `elems` elements by construction. |
| 909 | #[inline (always)] |
| 910 | fn raw_from_ptr_len(data: NonNull<u8>, elems: usize) -> NonNull<Self> { |
| 911 | // TODO(#67): Remove this allow. See NonNullExt for more details. |
| 912 | #[allow (unstable_name_collisions)] |
| 913 | NonNull::slice_from_raw_parts(data.cast::<T>(), elems) |
| 914 | } |
| 915 | |
| 916 | #[inline (always)] |
| 917 | fn pointer_to_metadata(ptr: *mut [T]) -> usize { |
| 918 | #[allow (clippy::as_conversions)] |
| 919 | let slc = ptr as *const [()]; |
| 920 | |
| 921 | // SAFETY: |
| 922 | // - `()` has alignment 1, so `slc` is trivially aligned. |
| 923 | // - `slc` was derived from a non-null pointer. |
| 924 | // - The size is 0 regardless of the length, so it is sound to |
| 925 | // materialize a reference regardless of location. |
| 926 | // - By invariant, `self.ptr` has valid provenance. |
| 927 | let slc = unsafe { &*slc }; |
| 928 | |
| 929 | // This is correct because the preceding `as` cast preserves the number |
| 930 | // of slice elements. [1] |
| 931 | // |
| 932 | // [1] Per https://doc.rust-lang.org/reference/expressions/operator-expr.html#pointer-to-pointer-cast: |
| 933 | // |
| 934 | // For slice types like `[T]` and `[U]`, the raw pointer types `*const |
| 935 | // [T]`, `*mut [T]`, `*const [U]`, and `*mut [U]` encode the number of |
| 936 | // elements in this slice. Casts between these raw pointer types |
| 937 | // preserve the number of elements. ... The same holds for `str` and |
| 938 | // any compound type whose unsized tail is a slice type, such as |
| 939 | // struct `Foo(i32, [u8])` or `(u64, Foo)`. |
| 940 | slc.len() |
| 941 | } |
| 942 | } |
| 943 | |
| 944 | #[rustfmt::skip] |
| 945 | impl_known_layout!( |
| 946 | (), |
| 947 | u8, i8, u16, i16, u32, i32, u64, i64, u128, i128, usize, isize, f32, f64, |
| 948 | bool, char, |
| 949 | NonZeroU8, NonZeroI8, NonZeroU16, NonZeroI16, NonZeroU32, NonZeroI32, |
| 950 | NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, NonZeroUsize, NonZeroIsize |
| 951 | ); |
| 952 | #[rustfmt::skip] |
| 953 | impl_known_layout!( |
| 954 | T => Option<T>, |
| 955 | T: ?Sized => PhantomData<T>, |
| 956 | T => Wrapping<T>, |
| 957 | T => CoreMaybeUninit<T>, |
| 958 | T: ?Sized => *const T, |
| 959 | T: ?Sized => *mut T, |
| 960 | T: ?Sized => &'_ T, |
| 961 | T: ?Sized => &'_ mut T, |
| 962 | ); |
| 963 | impl_known_layout!(const N: usize, T => [T; N]); |
| 964 | |
| 965 | safety_comment! { |
| 966 | /// SAFETY: |
| 967 | /// `str`, `ManuallyDrop<[T]>` [1], and `UnsafeCell<T>` [2] have the same |
| 968 | /// representations as `[u8]`, `[T]`, and `T` repsectively. `str` has |
| 969 | /// different bit validity than `[u8]`, but that doesn't affect the |
| 970 | /// soundness of this impl. |
| 971 | /// |
| 972 | /// [1] Per https://doc.rust-lang.org/nightly/core/mem/struct.ManuallyDrop.html: |
| 973 | /// |
| 974 | /// `ManuallyDrop<T>` is guaranteed to have the same layout and bit |
| 975 | /// validity as `T` |
| 976 | /// |
| 977 | /// [2] Per https://doc.rust-lang.org/core/cell/struct.UnsafeCell.html#memory-layout: |
| 978 | /// |
| 979 | /// `UnsafeCell<T>` has the same in-memory representation as its inner |
| 980 | /// type `T`. |
| 981 | /// |
| 982 | /// TODO(#429): |
| 983 | /// - Add quotes from docs. |
| 984 | /// - Once [1] (added in |
| 985 | /// https://github.com/rust-lang/rust/pull/115522) is available on stable, |
| 986 | /// quote the stable docs instead of the nightly docs. |
| 987 | unsafe_impl_known_layout!(#[repr([u8])] str); |
| 988 | unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] ManuallyDrop<T>); |
| 989 | unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] UnsafeCell<T>); |
| 990 | } |
| 991 | |
| 992 | safety_comment! { |
| 993 | /// SAFETY: |
| 994 | /// - By consequence of the invariant on `T::MaybeUninit` that `T::LAYOUT` |
| 995 | /// and `T::MaybeUninit::LAYOUT` are equal, `T` and `T::MaybeUninit` |
| 996 | /// have the same: |
| 997 | /// - Fixed prefix size |
| 998 | /// - Alignment |
| 999 | /// - (For DSTs) trailing slice element size |
| 1000 | /// - By consequence of the above, referents `T::MaybeUninit` and `T` have |
| 1001 | /// the require the same kind of pointer metadata, and thus it is valid to |
| 1002 | /// perform an `as` cast from `*mut T` and `*mut T::MaybeUninit`, and this |
| 1003 | /// operation preserves referent size (ie, `size_of_val_raw`). |
| 1004 | unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T::MaybeUninit)] MaybeUninit<T>); |
| 1005 | } |
| 1006 | |
| 1007 | /// Analyzes whether a type is [`FromZeros`]. |
| 1008 | /// |
| 1009 | /// This derive analyzes, at compile time, whether the annotated type satisfies |
| 1010 | /// the [safety conditions] of `FromZeros` and implements `FromZeros` and its |
| 1011 | /// supertraits if it is sound to do so. This derive can be applied to structs, |
| 1012 | /// enums, and unions; e.g.: |
| 1013 | /// |
| 1014 | /// ``` |
| 1015 | /// # use zerocopy_derive::{FromZeros, Immutable}; |
| 1016 | /// #[derive(FromZeros)] |
| 1017 | /// struct MyStruct { |
| 1018 | /// # /* |
| 1019 | /// ... |
| 1020 | /// # */ |
| 1021 | /// } |
| 1022 | /// |
| 1023 | /// #[derive(FromZeros)] |
| 1024 | /// #[repr(u8)] |
| 1025 | /// enum MyEnum { |
| 1026 | /// # Variant0, |
| 1027 | /// # /* |
| 1028 | /// ... |
| 1029 | /// # */ |
| 1030 | /// } |
| 1031 | /// |
| 1032 | /// #[derive(FromZeros, Immutable)] |
| 1033 | /// union MyUnion { |
| 1034 | /// # variant: u8, |
| 1035 | /// # /* |
| 1036 | /// ... |
| 1037 | /// # */ |
| 1038 | /// } |
| 1039 | /// ``` |
| 1040 | /// |
| 1041 | /// [safety conditions]: trait@FromZeros#safety |
| 1042 | /// |
| 1043 | /// # Analysis |
| 1044 | /// |
| 1045 | /// *This section describes, roughly, the analysis performed by this derive to |
| 1046 | /// determine whether it is sound to implement `FromZeros` for a given type. |
| 1047 | /// Unless you are modifying the implementation of this derive, or attempting to |
| 1048 | /// manually implement `FromZeros` for a type yourself, you don't need to read |
| 1049 | /// this section.* |
| 1050 | /// |
| 1051 | /// If a type has the following properties, then this derive can implement |
| 1052 | /// `FromZeros` for that type: |
| 1053 | /// |
| 1054 | /// - If the type is a struct, all of its fields must be `FromZeros`. |
| 1055 | /// - If the type is an enum: |
| 1056 | /// - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`, |
| 1057 | /// `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`). |
| 1058 | /// - It must have a variant with a discriminant/tag of `0`, and its fields |
| 1059 | /// must be `FromZeros`. See [the reference] for a description of |
| 1060 | /// discriminant values are specified. |
| 1061 | /// - The fields of that variant must be `FromZeros`. |
| 1062 | /// |
| 1063 | /// This analysis is subject to change. Unsafe code may *only* rely on the |
| 1064 | /// documented [safety conditions] of `FromZeros`, and must *not* rely on the |
| 1065 | /// implementation details of this derive. |
| 1066 | /// |
| 1067 | /// [the reference]: https://doc.rust-lang.org/reference/items/enumerations.html#custom-discriminant-values-for-fieldless-enumerations |
| 1068 | /// |
| 1069 | /// ## Why isn't an explicit representation required for structs? |
| 1070 | /// |
| 1071 | /// Neither this derive, nor the [safety conditions] of `FromZeros`, requires |
| 1072 | /// that structs are marked with `#[repr(C)]`. |
| 1073 | /// |
| 1074 | /// Per the [Rust reference](reference), |
| 1075 | /// |
| 1076 | /// > The representation of a type can change the padding between fields, but |
| 1077 | /// > does not change the layout of the fields themselves. |
| 1078 | /// |
| 1079 | /// [reference]: https://doc.rust-lang.org/reference/type-layout.html#representations |
| 1080 | /// |
| 1081 | /// Since the layout of structs only consists of padding bytes and field bytes, |
| 1082 | /// a struct is soundly `FromZeros` if: |
| 1083 | /// 1. its padding is soundly `FromZeros`, and |
| 1084 | /// 2. its fields are soundly `FromZeros`. |
| 1085 | /// |
| 1086 | /// The answer to the first question is always yes: padding bytes do not have |
| 1087 | /// any validity constraints. A [discussion] of this question in the Unsafe Code |
| 1088 | /// Guidelines Working Group concluded that it would be virtually unimaginable |
| 1089 | /// for future versions of rustc to add validity constraints to padding bytes. |
| 1090 | /// |
| 1091 | /// [discussion]: https://github.com/rust-lang/unsafe-code-guidelines/issues/174 |
| 1092 | /// |
| 1093 | /// Whether a struct is soundly `FromZeros` therefore solely depends on whether |
| 1094 | /// its fields are `FromZeros`. |
| 1095 | // TODO(#146): Document why we don't require an enum to have an explicit `repr` |
| 1096 | // attribute. |
| 1097 | #[cfg (any(feature = "derive" , test))] |
| 1098 | #[cfg_attr (doc_cfg, doc(cfg(feature = "derive" )))] |
| 1099 | pub use zerocopy_derive::FromZeros; |
| 1100 | |
| 1101 | /// Analyzes whether a type is [`Immutable`]. |
| 1102 | /// |
| 1103 | /// This derive analyzes, at compile time, whether the annotated type satisfies |
| 1104 | /// the [safety conditions] of `Immutable` and implements `Immutable` if it is |
| 1105 | /// sound to do so. This derive can be applied to structs, enums, and unions; |
| 1106 | /// e.g.: |
| 1107 | /// |
| 1108 | /// ``` |
| 1109 | /// # use zerocopy_derive::Immutable; |
| 1110 | /// #[derive(Immutable)] |
| 1111 | /// struct MyStruct { |
| 1112 | /// # /* |
| 1113 | /// ... |
| 1114 | /// # */ |
| 1115 | /// } |
| 1116 | /// |
| 1117 | /// #[derive(Immutable)] |
| 1118 | /// enum MyEnum { |
| 1119 | /// # Variant0, |
| 1120 | /// # /* |
| 1121 | /// ... |
| 1122 | /// # */ |
| 1123 | /// } |
| 1124 | /// |
| 1125 | /// #[derive(Immutable)] |
| 1126 | /// union MyUnion { |
| 1127 | /// # variant: u8, |
| 1128 | /// # /* |
| 1129 | /// ... |
| 1130 | /// # */ |
| 1131 | /// } |
| 1132 | /// ``` |
| 1133 | /// |
| 1134 | /// # Analysis |
| 1135 | /// |
| 1136 | /// *This section describes, roughly, the analysis performed by this derive to |
| 1137 | /// determine whether it is sound to implement `Immutable` for a given type. |
| 1138 | /// Unless you are modifying the implementation of this derive, you don't need |
| 1139 | /// to read this section.* |
| 1140 | /// |
| 1141 | /// If a type has the following properties, then this derive can implement |
| 1142 | /// `Immutable` for that type: |
| 1143 | /// |
| 1144 | /// - All fields must be `Immutable`. |
| 1145 | /// |
| 1146 | /// This analysis is subject to change. Unsafe code may *only* rely on the |
| 1147 | /// documented [safety conditions] of `Immutable`, and must *not* rely on the |
| 1148 | /// implementation details of this derive. |
| 1149 | /// |
| 1150 | /// [safety conditions]: trait@Immutable#safety |
| 1151 | #[cfg (any(feature = "derive" , test))] |
| 1152 | #[cfg_attr (doc_cfg, doc(cfg(feature = "derive" )))] |
| 1153 | pub use zerocopy_derive::Immutable; |
| 1154 | |
| 1155 | /// Types which are free from interior mutability. |
| 1156 | /// |
| 1157 | /// `T: Immutable` indicates that `T` does not permit interior mutation, except |
| 1158 | /// by ownership or an exclusive (`&mut`) borrow. |
| 1159 | /// |
| 1160 | /// # Implementation |
| 1161 | /// |
| 1162 | /// **Do not implement this trait yourself!** Instead, use |
| 1163 | /// [`#[derive(Immutable)]`][derive] (requires the `derive` Cargo feature); |
| 1164 | /// e.g.: |
| 1165 | /// |
| 1166 | /// ``` |
| 1167 | /// # use zerocopy_derive::Immutable; |
| 1168 | /// #[derive(Immutable)] |
| 1169 | /// struct MyStruct { |
| 1170 | /// # /* |
| 1171 | /// ... |
| 1172 | /// # */ |
| 1173 | /// } |
| 1174 | /// |
| 1175 | /// #[derive(Immutable)] |
| 1176 | /// enum MyEnum { |
| 1177 | /// # /* |
| 1178 | /// ... |
| 1179 | /// # */ |
| 1180 | /// } |
| 1181 | /// |
| 1182 | /// #[derive(Immutable)] |
| 1183 | /// union MyUnion { |
| 1184 | /// # variant: u8, |
| 1185 | /// # /* |
| 1186 | /// ... |
| 1187 | /// # */ |
| 1188 | /// } |
| 1189 | /// ``` |
| 1190 | /// |
| 1191 | /// This derive performs a sophisticated, compile-time safety analysis to |
| 1192 | /// determine whether a type is `Immutable`. |
| 1193 | /// |
| 1194 | /// # Safety |
| 1195 | /// |
| 1196 | /// Unsafe code outside of this crate must not make any assumptions about `T` |
| 1197 | /// based on `T: Immutable`. We reserve the right to relax the requirements for |
| 1198 | /// `Immutable` in the future, and if unsafe code outside of this crate makes |
| 1199 | /// assumptions based on `T: Immutable`, future relaxations may cause that code |
| 1200 | /// to become unsound. |
| 1201 | /// |
| 1202 | // # Safety (Internal) |
| 1203 | // |
| 1204 | // If `T: Immutable`, unsafe code *inside of this crate* may assume that, given |
| 1205 | // `t: &T`, `t` does not contain any [`UnsafeCell`]s at any byte location |
| 1206 | // within the byte range addressed by `t`. This includes ranges of length 0 |
| 1207 | // (e.g., `UnsafeCell<()>` and `[UnsafeCell<u8>; 0]`). If a type implements |
| 1208 | // `Immutable` which violates this assumptions, it may cause this crate to |
| 1209 | // exhibit [undefined behavior]. |
| 1210 | // |
| 1211 | // [`UnsafeCell`]: core::cell::UnsafeCell |
| 1212 | // [undefined behavior]: https://raphlinus.github.io/programming/rust/2018/08/17/undefined-behavior.html |
| 1213 | #[cfg_attr ( |
| 1214 | feature = "derive" , |
| 1215 | doc = "[derive]: zerocopy_derive::Immutable" , |
| 1216 | doc = "[derive-analysis]: zerocopy_derive::Immutable#analysis" |
| 1217 | )] |
| 1218 | #[cfg_attr ( |
| 1219 | not(feature = "derive" ), |
| 1220 | doc = concat!("[derive]: https://docs.rs/zerocopy/" , env!("CARGO_PKG_VERSION" ), "/zerocopy/derive.Immutable.html" ), |
| 1221 | doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/" , env!("CARGO_PKG_VERSION" ), "/zerocopy/derive.Immutable.html#analysis" ), |
| 1222 | )] |
| 1223 | #[cfg_attr ( |
| 1224 | zerocopy_diagnostic_on_unimplemented_1_78_0, |
| 1225 | diagnostic::on_unimplemented(note = "Consider adding `#[derive(Immutable)]` to `{Self}`" ) |
| 1226 | )] |
| 1227 | pub unsafe trait Immutable { |
| 1228 | // The `Self: Sized` bound makes it so that `Immutable` is still object |
| 1229 | // safe. |
| 1230 | #[doc (hidden)] |
| 1231 | fn only_derive_is_allowed_to_implement_this_trait() |
| 1232 | where |
| 1233 | Self: Sized; |
| 1234 | } |
| 1235 | |
| 1236 | /// Implements [`TryFromBytes`]. |
| 1237 | /// |
| 1238 | /// This derive synthesizes the runtime checks required to check whether a |
| 1239 | /// sequence of initialized bytes corresponds to a valid instance of a type. |
| 1240 | /// This derive can be applied to structs, enums, and unions; e.g.: |
| 1241 | /// |
| 1242 | /// ``` |
| 1243 | /// # use zerocopy_derive::{TryFromBytes, Immutable}; |
| 1244 | /// #[derive(TryFromBytes)] |
| 1245 | /// struct MyStruct { |
| 1246 | /// # /* |
| 1247 | /// ... |
| 1248 | /// # */ |
| 1249 | /// } |
| 1250 | /// |
| 1251 | /// #[derive(TryFromBytes)] |
| 1252 | /// #[repr(u8)] |
| 1253 | /// enum MyEnum { |
| 1254 | /// # V00, |
| 1255 | /// # /* |
| 1256 | /// ... |
| 1257 | /// # */ |
| 1258 | /// } |
| 1259 | /// |
| 1260 | /// #[derive(TryFromBytes, Immutable)] |
| 1261 | /// union MyUnion { |
| 1262 | /// # variant: u8, |
| 1263 | /// # /* |
| 1264 | /// ... |
| 1265 | /// # */ |
| 1266 | /// } |
| 1267 | /// ``` |
| 1268 | /// |
| 1269 | /// [safety conditions]: trait@TryFromBytes#safety |
| 1270 | #[cfg (any(feature = "derive" , test))] |
| 1271 | #[cfg_attr (doc_cfg, doc(cfg(feature = "derive" )))] |
| 1272 | pub use zerocopy_derive::TryFromBytes; |
| 1273 | |
| 1274 | /// Types for which some bit patterns are valid. |
| 1275 | /// |
| 1276 | /// A memory region of the appropriate length which contains initialized bytes |
| 1277 | /// can be viewed as a `TryFromBytes` type so long as the runtime value of those |
| 1278 | /// bytes corresponds to a [*valid instance*] of that type. For example, |
| 1279 | /// [`bool`] is `TryFromBytes`, so zerocopy can transmute a [`u8`] into a |
| 1280 | /// [`bool`] so long as it first checks that the value of the [`u8`] is `0` or |
| 1281 | /// `1`. |
| 1282 | /// |
| 1283 | /// # Implementation |
| 1284 | /// |
| 1285 | /// **Do not implement this trait yourself!** Instead, use |
| 1286 | /// [`#[derive(TryFromBytes)]`][derive]; e.g.: |
| 1287 | /// |
| 1288 | /// ``` |
| 1289 | /// # use zerocopy_derive::{TryFromBytes, Immutable}; |
| 1290 | /// #[derive(TryFromBytes)] |
| 1291 | /// struct MyStruct { |
| 1292 | /// # /* |
| 1293 | /// ... |
| 1294 | /// # */ |
| 1295 | /// } |
| 1296 | /// |
| 1297 | /// #[derive(TryFromBytes)] |
| 1298 | /// #[repr(u8)] |
| 1299 | /// enum MyEnum { |
| 1300 | /// # V00, |
| 1301 | /// # /* |
| 1302 | /// ... |
| 1303 | /// # */ |
| 1304 | /// } |
| 1305 | /// |
| 1306 | /// #[derive(TryFromBytes, Immutable)] |
| 1307 | /// union MyUnion { |
| 1308 | /// # variant: u8, |
| 1309 | /// # /* |
| 1310 | /// ... |
| 1311 | /// # */ |
| 1312 | /// } |
| 1313 | /// ``` |
| 1314 | /// |
| 1315 | /// This derive ensures that the runtime check of whether bytes correspond to a |
| 1316 | /// valid instance is sound. You **must** implement this trait via the derive. |
| 1317 | /// |
| 1318 | /// # What is a "valid instance"? |
| 1319 | /// |
| 1320 | /// In Rust, each type has *bit validity*, which refers to the set of bit |
| 1321 | /// patterns which may appear in an instance of that type. It is impossible for |
| 1322 | /// safe Rust code to produce values which violate bit validity (ie, values |
| 1323 | /// outside of the "valid" set of bit patterns). If `unsafe` code produces an |
| 1324 | /// invalid value, this is considered [undefined behavior]. |
| 1325 | /// |
| 1326 | /// Rust's bit validity rules are currently being decided, which means that some |
| 1327 | /// types have three classes of bit patterns: those which are definitely valid, |
| 1328 | /// and whose validity is documented in the language; those which may or may not |
| 1329 | /// be considered valid at some point in the future; and those which are |
| 1330 | /// definitely invalid. |
| 1331 | /// |
| 1332 | /// Zerocopy takes a conservative approach, and only considers a bit pattern to |
| 1333 | /// be valid if its validity is a documenteed guarantee provided by the |
| 1334 | /// language. |
| 1335 | /// |
| 1336 | /// For most use cases, Rust's current guarantees align with programmers' |
| 1337 | /// intuitions about what ought to be valid. As a result, zerocopy's |
| 1338 | /// conservatism should not affect most users. |
| 1339 | /// |
| 1340 | /// If you are negatively affected by lack of support for a particular type, |
| 1341 | /// we encourage you to let us know by [filing an issue][github-repo]. |
| 1342 | /// |
| 1343 | /// # `TryFromBytes` is not symmetrical with [`IntoBytes`] |
| 1344 | /// |
| 1345 | /// There are some types which implement both `TryFromBytes` and [`IntoBytes`], |
| 1346 | /// but for which `TryFromBytes` is not guaranteed to accept all byte sequences |
| 1347 | /// produced by `IntoBytes`. In other words, for some `T: TryFromBytes + |
| 1348 | /// IntoBytes`, there exist values of `t: T` such that |
| 1349 | /// `TryFromBytes::try_ref_from_bytes(t.as_bytes()) == None`. Code should not |
| 1350 | /// generally assume that values produced by `IntoBytes` will necessarily be |
| 1351 | /// accepted as valid by `TryFromBytes`. |
| 1352 | /// |
| 1353 | /// # Safety |
| 1354 | /// |
| 1355 | /// On its own, `T: TryFromBytes` does not make any guarantees about the layout |
| 1356 | /// or representation of `T`. It merely provides the ability to perform a |
| 1357 | /// validity check at runtime via methods like [`try_ref_from_bytes`]. |
| 1358 | /// |
| 1359 | /// You must not rely on the `#[doc(hidden)]` internals of `TryFromBytes`. |
| 1360 | /// Future releases of zerocopy may make backwards-breaking changes to these |
| 1361 | /// items, including changes that only affect soundness, which may cause code |
| 1362 | /// which uses those items to silently become unsound. |
| 1363 | /// |
| 1364 | /// [undefined behavior]: https://raphlinus.github.io/programming/rust/2018/08/17/undefined-behavior.html |
| 1365 | /// [github-repo]: https://github.com/google/zerocopy |
| 1366 | /// [`try_ref_from_bytes`]: TryFromBytes::try_ref_from_bytes |
| 1367 | /// [*valid instance*]: #what-is-a-valid-instance |
| 1368 | #[cfg_attr (feature = "derive" , doc = "[derive]: zerocopy_derive::TryFromBytes" )] |
| 1369 | #[cfg_attr ( |
| 1370 | not(feature = "derive" ), |
| 1371 | doc = concat!("[derive]: https://docs.rs/zerocopy/" , env!("CARGO_PKG_VERSION" ), "/zerocopy/derive.TryFromBytes.html" ), |
| 1372 | )] |
| 1373 | #[cfg_attr ( |
| 1374 | zerocopy_diagnostic_on_unimplemented_1_78_0, |
| 1375 | diagnostic::on_unimplemented(note = "Consider adding `#[derive(TryFromBytes)]` to `{Self}`" ) |
| 1376 | )] |
| 1377 | pub unsafe trait TryFromBytes { |
| 1378 | // The `Self: Sized` bound makes it so that `TryFromBytes` is still object |
| 1379 | // safe. |
| 1380 | #[doc (hidden)] |
| 1381 | fn only_derive_is_allowed_to_implement_this_trait() |
| 1382 | where |
| 1383 | Self: Sized; |
| 1384 | |
| 1385 | /// Does a given memory range contain a valid instance of `Self`? |
| 1386 | /// |
| 1387 | /// # Safety |
| 1388 | /// |
| 1389 | /// Unsafe code may assume that, if `is_bit_valid(candidate)` returns true, |
| 1390 | /// `*candidate` contains a valid `Self`. |
| 1391 | /// |
| 1392 | /// # Panics |
| 1393 | /// |
| 1394 | /// `is_bit_valid` may panic. Callers are responsible for ensuring that any |
| 1395 | /// `unsafe` code remains sound even in the face of `is_bit_valid` |
| 1396 | /// panicking. (We support user-defined validation routines; so long as |
| 1397 | /// these routines are not required to be `unsafe`, there is no way to |
| 1398 | /// ensure that these do not generate panics.) |
| 1399 | /// |
| 1400 | /// Besides user-defined validation routines panicking, `is_bit_valid` will |
| 1401 | /// either panic or fail to compile if called on a pointer with [`Shared`] |
| 1402 | /// aliasing when `Self: !Immutable`. |
| 1403 | /// |
| 1404 | /// [`UnsafeCell`]: core::cell::UnsafeCell |
| 1405 | /// [`Shared`]: invariant::Shared |
| 1406 | #[doc (hidden)] |
| 1407 | fn is_bit_valid<A: invariant::Aliasing + invariant::AtLeast<invariant::Shared>>( |
| 1408 | candidate: Maybe<'_, Self, A>, |
| 1409 | ) -> bool; |
| 1410 | |
| 1411 | /// Attempts to interpret the given `source` as a `&Self`. |
| 1412 | /// |
| 1413 | /// If the bytes of `source` are a valid instance of `Self`, this method |
| 1414 | /// returns a reference to those bytes interpreted as a `Self`. If the |
| 1415 | /// length of `source` is not a [valid size of `Self`][valid-size], or if |
| 1416 | /// `source` is not appropriately aligned, or if `source` is not a valid |
| 1417 | /// instance of `Self`, this returns `Err`. If [`Self: |
| 1418 | /// Unaligned`][self-unaligned], you can [infallibly discard the alignment |
| 1419 | /// error][ConvertError::from]. |
| 1420 | /// |
| 1421 | /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst]. |
| 1422 | /// |
| 1423 | /// [valid-size]: crate::KnownLayout#what-is-a-valid-size |
| 1424 | /// [self-unaligned]: Unaligned |
| 1425 | /// [slice-dst]: KnownLayout#dynamically-sized-types |
| 1426 | /// |
| 1427 | /// # Compile-Time Assertions |
| 1428 | /// |
| 1429 | /// This method cannot yet be used on unsized types whose dynamically-sized |
| 1430 | /// component is zero-sized. Attempting to use this method on such types |
| 1431 | /// results in a compile-time assertion error; e.g.: |
| 1432 | /// |
| 1433 | /// ```compile_fail,E0080 |
| 1434 | /// use zerocopy::*; |
| 1435 | /// # use zerocopy_derive::*; |
| 1436 | /// |
| 1437 | /// #[derive(TryFromBytes, Immutable, KnownLayout)] |
| 1438 | /// #[repr(C)] |
| 1439 | /// struct ZSTy { |
| 1440 | /// leading_sized: u16, |
| 1441 | /// trailing_dst: [()], |
| 1442 | /// } |
| 1443 | /// |
| 1444 | /// let _ = ZSTy::try_ref_from_bytes(0u16.as_bytes()); // âš Compile Error! |
| 1445 | /// ``` |
| 1446 | /// |
| 1447 | /// # Examples |
| 1448 | /// |
| 1449 | /// ``` |
| 1450 | /// use zerocopy::TryFromBytes; |
| 1451 | /// # use zerocopy_derive::*; |
| 1452 | /// |
| 1453 | /// // The only valid value of this type is the byte `0xC0` |
| 1454 | /// #[derive(TryFromBytes, KnownLayout, Immutable)] |
| 1455 | /// #[repr(u8)] |
| 1456 | /// enum C0 { xC0 = 0xC0 } |
| 1457 | /// |
| 1458 | /// // The only valid value of this type is the byte sequence `0xC0C0`. |
| 1459 | /// #[derive(TryFromBytes, KnownLayout, Immutable)] |
| 1460 | /// #[repr(C)] |
| 1461 | /// struct C0C0(C0, C0); |
| 1462 | /// |
| 1463 | /// #[derive(TryFromBytes, KnownLayout, Immutable)] |
| 1464 | /// #[repr(C)] |
| 1465 | /// struct Packet { |
| 1466 | /// magic_number: C0C0, |
| 1467 | /// mug_size: u8, |
| 1468 | /// temperature: u8, |
| 1469 | /// marshmallows: [[u8; 2]], |
| 1470 | /// } |
| 1471 | /// |
| 1472 | /// let bytes = &[0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5][..]; |
| 1473 | /// |
| 1474 | /// let packet = Packet::try_ref_from_bytes(bytes).unwrap(); |
| 1475 | /// |
| 1476 | /// assert_eq!(packet.mug_size, 240); |
| 1477 | /// assert_eq!(packet.temperature, 77); |
| 1478 | /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]); |
| 1479 | /// |
| 1480 | /// // These bytes are not valid instance of `Packet`. |
| 1481 | /// let bytes = &[0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5][..]; |
| 1482 | /// assert!(Packet::try_ref_from_bytes(bytes).is_err()); |
| 1483 | /// ``` |
| 1484 | #[must_use = "has no side effects" ] |
| 1485 | #[inline ] |
| 1486 | fn try_ref_from_bytes(source: &[u8]) -> Result<&Self, TryCastError<&[u8], Self>> |
| 1487 | where |
| 1488 | Self: KnownLayout + Immutable, |
| 1489 | { |
| 1490 | static_assert_dst_is_not_zst!(Self); |
| 1491 | match Ptr::from_ref(source).try_cast_into_no_leftover::<Self, BecauseImmutable>(None) { |
| 1492 | Ok(source) => { |
| 1493 | // This call may panic. If that happens, it doesn't cause any soundness |
| 1494 | // issues, as we have not generated any invalid state which we need to |
| 1495 | // fix before returning. |
| 1496 | // |
| 1497 | // Note that one panic or post-monomorphization error condition is |
| 1498 | // calling `try_into_valid` (and thus `is_bit_valid`) with a shared |
| 1499 | // pointer when `Self: !Immutable`. Since `Self: Immutable`, this panic |
| 1500 | // condition will not happen. |
| 1501 | match source.try_into_valid() { |
| 1502 | Ok(valid) => Ok(valid.as_ref()), |
| 1503 | Err(e) => { |
| 1504 | Err(e.map_src(|src| src.as_bytes::<BecauseImmutable>().as_ref()).into()) |
| 1505 | } |
| 1506 | } |
| 1507 | } |
| 1508 | Err(e) => Err(e.map_src(Ptr::as_ref).into()), |
| 1509 | } |
| 1510 | } |
| 1511 | |
| 1512 | /// Attempts to interpret the prefix of the given `source` as a `&Self`. |
| 1513 | /// |
| 1514 | /// This method computes the [largest possible size of `Self`][valid-size] |
| 1515 | /// that can fit in the leading bytes of `source`. If that prefix is a valid |
| 1516 | /// instance of `Self`, this method returns a reference to those bytes |
| 1517 | /// interpreted as `Self`, and a reference to the remaining bytes. If there |
| 1518 | /// are insufficient bytes, or if `source` is not appropriately aligned, or |
| 1519 | /// if those bytes are not a valid instance of `Self`, this returns `Err`. |
| 1520 | /// If [`Self: Unaligned`][self-unaligned], you can [infallibly discard the |
| 1521 | /// alignment error][ConvertError::from]. |
| 1522 | /// |
| 1523 | /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst]. |
| 1524 | /// |
| 1525 | /// [valid-size]: crate::KnownLayout#what-is-a-valid-size |
| 1526 | /// [self-unaligned]: Unaligned |
| 1527 | /// [slice-dst]: KnownLayout#dynamically-sized-types |
| 1528 | /// |
| 1529 | /// # Compile-Time Assertions |
| 1530 | /// |
| 1531 | /// This method cannot yet be used on unsized types whose dynamically-sized |
| 1532 | /// component is zero-sized. Attempting to use this method on such types |
| 1533 | /// results in a compile-time assertion error; e.g.: |
| 1534 | /// |
| 1535 | /// ```compile_fail,E0080 |
| 1536 | /// use zerocopy::*; |
| 1537 | /// # use zerocopy_derive::*; |
| 1538 | /// |
| 1539 | /// #[derive(TryFromBytes, Immutable, KnownLayout)] |
| 1540 | /// #[repr(C)] |
| 1541 | /// struct ZSTy { |
| 1542 | /// leading_sized: u16, |
| 1543 | /// trailing_dst: [()], |
| 1544 | /// } |
| 1545 | /// |
| 1546 | /// let _ = ZSTy::try_ref_from_prefix(0u16.as_bytes()); // âš Compile Error! |
| 1547 | /// ``` |
| 1548 | /// |
| 1549 | /// # Examples |
| 1550 | /// |
| 1551 | /// ``` |
| 1552 | /// use zerocopy::TryFromBytes; |
| 1553 | /// # use zerocopy_derive::*; |
| 1554 | /// |
| 1555 | /// // The only valid value of this type is the byte `0xC0` |
| 1556 | /// #[derive(TryFromBytes, KnownLayout, Immutable)] |
| 1557 | /// #[repr(u8)] |
| 1558 | /// enum C0 { xC0 = 0xC0 } |
| 1559 | /// |
| 1560 | /// // The only valid value of this type is the bytes `0xC0C0`. |
| 1561 | /// #[derive(TryFromBytes, KnownLayout, Immutable)] |
| 1562 | /// #[repr(C)] |
| 1563 | /// struct C0C0(C0, C0); |
| 1564 | /// |
| 1565 | /// #[derive(TryFromBytes, KnownLayout, Immutable)] |
| 1566 | /// #[repr(C)] |
| 1567 | /// struct Packet { |
| 1568 | /// magic_number: C0C0, |
| 1569 | /// mug_size: u8, |
| 1570 | /// temperature: u8, |
| 1571 | /// marshmallows: [[u8; 2]], |
| 1572 | /// } |
| 1573 | /// |
| 1574 | /// // These are more bytes than are needed to encode a `Packet`. |
| 1575 | /// let bytes = &[0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..]; |
| 1576 | /// |
| 1577 | /// let (packet, suffix) = Packet::try_ref_from_prefix(bytes).unwrap(); |
| 1578 | /// |
| 1579 | /// assert_eq!(packet.mug_size, 240); |
| 1580 | /// assert_eq!(packet.temperature, 77); |
| 1581 | /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]); |
| 1582 | /// assert_eq!(suffix, &[6u8][..]); |
| 1583 | /// |
| 1584 | /// // These bytes are not valid instance of `Packet`. |
| 1585 | /// let bytes = &[0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..]; |
| 1586 | /// assert!(Packet::try_ref_from_prefix(bytes).is_err()); |
| 1587 | /// ``` |
| 1588 | #[must_use = "has no side effects" ] |
| 1589 | #[inline ] |
| 1590 | fn try_ref_from_prefix(source: &[u8]) -> Result<(&Self, &[u8]), TryCastError<&[u8], Self>> |
| 1591 | where |
| 1592 | Self: KnownLayout + Immutable, |
| 1593 | { |
| 1594 | static_assert_dst_is_not_zst!(Self); |
| 1595 | try_ref_from_prefix_suffix(source, CastType::Prefix, None) |
| 1596 | } |
| 1597 | |
| 1598 | /// Attempts to interpret the suffix of the given `source` as a `&Self`. |
| 1599 | /// |
| 1600 | /// This method computes the [largest possible size of `Self`][valid-size] |
| 1601 | /// that can fit in the trailing bytes of `source`. If that suffix is a |
| 1602 | /// valid instance of `Self`, this method returns a reference to those bytes |
| 1603 | /// interpreted as `Self`, and a reference to the preceding bytes. If there |
| 1604 | /// are insufficient bytes, or if the suffix of `source` would not be |
| 1605 | /// appropriately aligned, or if the suffix is not a valid instance of |
| 1606 | /// `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned], you |
| 1607 | /// can [infallibly discard the alignment error][ConvertError::from]. |
| 1608 | /// |
| 1609 | /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst]. |
| 1610 | /// |
| 1611 | /// [valid-size]: crate::KnownLayout#what-is-a-valid-size |
| 1612 | /// [self-unaligned]: Unaligned |
| 1613 | /// [slice-dst]: KnownLayout#dynamically-sized-types |
| 1614 | /// |
| 1615 | /// # Compile-Time Assertions |
| 1616 | /// |
| 1617 | /// This method cannot yet be used on unsized types whose dynamically-sized |
| 1618 | /// component is zero-sized. Attempting to use this method on such types |
| 1619 | /// results in a compile-time assertion error; e.g.: |
| 1620 | /// |
| 1621 | /// ```compile_fail,E0080 |
| 1622 | /// use zerocopy::*; |
| 1623 | /// # use zerocopy_derive::*; |
| 1624 | /// |
| 1625 | /// #[derive(TryFromBytes, Immutable, KnownLayout)] |
| 1626 | /// #[repr(C)] |
| 1627 | /// struct ZSTy { |
| 1628 | /// leading_sized: u16, |
| 1629 | /// trailing_dst: [()], |
| 1630 | /// } |
| 1631 | /// |
| 1632 | /// let _ = ZSTy::try_ref_from_suffix(0u16.as_bytes()); // âš Compile Error! |
| 1633 | /// ``` |
| 1634 | /// |
| 1635 | /// # Examples |
| 1636 | /// |
| 1637 | /// ``` |
| 1638 | /// use zerocopy::TryFromBytes; |
| 1639 | /// # use zerocopy_derive::*; |
| 1640 | /// |
| 1641 | /// // The only valid value of this type is the byte `0xC0` |
| 1642 | /// #[derive(TryFromBytes, KnownLayout, Immutable)] |
| 1643 | /// #[repr(u8)] |
| 1644 | /// enum C0 { xC0 = 0xC0 } |
| 1645 | /// |
| 1646 | /// // The only valid value of this type is the bytes `0xC0C0`. |
| 1647 | /// #[derive(TryFromBytes, KnownLayout, Immutable)] |
| 1648 | /// #[repr(C)] |
| 1649 | /// struct C0C0(C0, C0); |
| 1650 | /// |
| 1651 | /// #[derive(TryFromBytes, KnownLayout, Immutable)] |
| 1652 | /// #[repr(C)] |
| 1653 | /// struct Packet { |
| 1654 | /// magic_number: C0C0, |
| 1655 | /// mug_size: u8, |
| 1656 | /// temperature: u8, |
| 1657 | /// marshmallows: [[u8; 2]], |
| 1658 | /// } |
| 1659 | /// |
| 1660 | /// // These are more bytes than are needed to encode a `Packet`. |
| 1661 | /// let bytes = &[0, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..]; |
| 1662 | /// |
| 1663 | /// let (prefix, packet) = Packet::try_ref_from_suffix(bytes).unwrap(); |
| 1664 | /// |
| 1665 | /// assert_eq!(packet.mug_size, 240); |
| 1666 | /// assert_eq!(packet.temperature, 77); |
| 1667 | /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]); |
| 1668 | /// assert_eq!(prefix, &[0u8][..]); |
| 1669 | /// |
| 1670 | /// // These bytes are not valid instance of `Packet`. |
| 1671 | /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0x10][..]; |
| 1672 | /// assert!(Packet::try_ref_from_suffix(bytes).is_err()); |
| 1673 | /// ``` |
| 1674 | #[must_use = "has no side effects" ] |
| 1675 | #[inline ] |
| 1676 | fn try_ref_from_suffix(source: &[u8]) -> Result<(&[u8], &Self), TryCastError<&[u8], Self>> |
| 1677 | where |
| 1678 | Self: KnownLayout + Immutable, |
| 1679 | { |
| 1680 | static_assert_dst_is_not_zst!(Self); |
| 1681 | try_ref_from_prefix_suffix(source, CastType::Suffix, None).map(swap) |
| 1682 | } |
| 1683 | |
| 1684 | /// Attempts to interpret the given `source` as a `&mut Self` without |
| 1685 | /// copying. |
| 1686 | /// |
| 1687 | /// If the bytes of `source` are a valid instance of `Self`, this method |
| 1688 | /// returns a reference to those bytes interpreted as a `Self`. If the |
| 1689 | /// length of `source` is not a [valid size of `Self`][valid-size], or if |
| 1690 | /// `source` is not appropriately aligned, or if `source` is not a valid |
| 1691 | /// instance of `Self`, this returns `Err`. If [`Self: |
| 1692 | /// Unaligned`][self-unaligned], you can [infallibly discard the alignment |
| 1693 | /// error][ConvertError::from]. |
| 1694 | /// |
| 1695 | /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst]. |
| 1696 | /// |
| 1697 | /// [valid-size]: crate::KnownLayout#what-is-a-valid-size |
| 1698 | /// [self-unaligned]: Unaligned |
| 1699 | /// [slice-dst]: KnownLayout#dynamically-sized-types |
| 1700 | /// |
| 1701 | /// # Compile-Time Assertions |
| 1702 | /// |
| 1703 | /// This method cannot yet be used on unsized types whose dynamically-sized |
| 1704 | /// component is zero-sized. Attempting to use this method on such types |
| 1705 | /// results in a compile-time assertion error; e.g.: |
| 1706 | /// |
| 1707 | /// ```compile_fail,E0080 |
| 1708 | /// use zerocopy::*; |
| 1709 | /// # use zerocopy_derive::*; |
| 1710 | /// |
| 1711 | /// #[derive(TryFromBytes, KnownLayout)] |
| 1712 | /// #[repr(C)] |
| 1713 | /// struct ZSTy { |
| 1714 | /// leading_sized: [u8; 2], |
| 1715 | /// trailing_dst: [()], |
| 1716 | /// } |
| 1717 | /// |
| 1718 | /// let mut source = [85, 85]; |
| 1719 | /// let _ = ZSTy::try_mut_from_bytes(&mut source[..]); // âš Compile Error! |
| 1720 | /// ``` |
| 1721 | /// |
| 1722 | /// # Examples |
| 1723 | /// |
| 1724 | /// ``` |
| 1725 | /// use zerocopy::TryFromBytes; |
| 1726 | /// # use zerocopy_derive::*; |
| 1727 | /// |
| 1728 | /// // The only valid value of this type is the byte `0xC0` |
| 1729 | /// #[derive(TryFromBytes, KnownLayout)] |
| 1730 | /// #[repr(u8)] |
| 1731 | /// enum C0 { xC0 = 0xC0 } |
| 1732 | /// |
| 1733 | /// // The only valid value of this type is the bytes `0xC0C0`. |
| 1734 | /// #[derive(TryFromBytes, KnownLayout)] |
| 1735 | /// #[repr(C)] |
| 1736 | /// struct C0C0(C0, C0); |
| 1737 | /// |
| 1738 | /// #[derive(TryFromBytes, KnownLayout)] |
| 1739 | /// #[repr(C)] |
| 1740 | /// struct Packet { |
| 1741 | /// magic_number: C0C0, |
| 1742 | /// mug_size: u8, |
| 1743 | /// temperature: u8, |
| 1744 | /// marshmallows: [[u8; 2]], |
| 1745 | /// } |
| 1746 | /// |
| 1747 | /// let bytes = &mut [0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5][..]; |
| 1748 | /// |
| 1749 | /// let packet = Packet::try_mut_from_bytes(bytes).unwrap(); |
| 1750 | /// |
| 1751 | /// assert_eq!(packet.mug_size, 240); |
| 1752 | /// assert_eq!(packet.temperature, 77); |
| 1753 | /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]); |
| 1754 | /// |
| 1755 | /// packet.temperature = 111; |
| 1756 | /// |
| 1757 | /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 0, 1, 2, 3, 4, 5]); |
| 1758 | /// |
| 1759 | /// // These bytes are not valid instance of `Packet`. |
| 1760 | /// let bytes = &mut [0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..]; |
| 1761 | /// assert!(Packet::try_mut_from_bytes(bytes).is_err()); |
| 1762 | /// ``` |
| 1763 | #[must_use = "has no side effects" ] |
| 1764 | #[inline ] |
| 1765 | fn try_mut_from_bytes(bytes: &mut [u8]) -> Result<&mut Self, TryCastError<&mut [u8], Self>> |
| 1766 | where |
| 1767 | Self: KnownLayout, |
| 1768 | { |
| 1769 | static_assert_dst_is_not_zst!(Self); |
| 1770 | match Ptr::from_mut(bytes).try_cast_into_no_leftover::<Self, BecauseExclusive>(None) { |
| 1771 | Ok(source) => { |
| 1772 | // This call may panic. If that happens, it doesn't cause any soundness |
| 1773 | // issues, as we have not generated any invalid state which we need to |
| 1774 | // fix before returning. |
| 1775 | // |
| 1776 | // Note that one panic or post-monomorphization error condition is |
| 1777 | // calling `try_into_valid` (and thus `is_bit_valid`) with a shared |
| 1778 | // pointer when `Self: !Immutable`. Since `Self: Immutable`, this panic |
| 1779 | // condition will not happen. |
| 1780 | match source.try_into_valid() { |
| 1781 | Ok(source) => Ok(source.as_mut()), |
| 1782 | Err(e) => { |
| 1783 | Err(e.map_src(|src| src.as_bytes::<BecauseExclusive>().as_mut()).into()) |
| 1784 | } |
| 1785 | } |
| 1786 | } |
| 1787 | Err(e) => Err(e.map_src(Ptr::as_mut).into()), |
| 1788 | } |
| 1789 | } |
| 1790 | |
| 1791 | /// Attempts to interpret the prefix of the given `source` as a `&mut |
| 1792 | /// Self`. |
| 1793 | /// |
| 1794 | /// This method computes the [largest possible size of `Self`][valid-size] |
| 1795 | /// that can fit in the leading bytes of `source`. If that prefix is a valid |
| 1796 | /// instance of `Self`, this method returns a reference to those bytes |
| 1797 | /// interpreted as `Self`, and a reference to the remaining bytes. If there |
| 1798 | /// are insufficient bytes, or if `source` is not appropriately aligned, or |
| 1799 | /// if the bytes are not a valid instance of `Self`, this returns `Err`. If |
| 1800 | /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the |
| 1801 | /// alignment error][ConvertError::from]. |
| 1802 | /// |
| 1803 | /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst]. |
| 1804 | /// |
| 1805 | /// [valid-size]: crate::KnownLayout#what-is-a-valid-size |
| 1806 | /// [self-unaligned]: Unaligned |
| 1807 | /// [slice-dst]: KnownLayout#dynamically-sized-types |
| 1808 | /// |
| 1809 | /// # Compile-Time Assertions |
| 1810 | /// |
| 1811 | /// This method cannot yet be used on unsized types whose dynamically-sized |
| 1812 | /// component is zero-sized. Attempting to use this method on such types |
| 1813 | /// results in a compile-time assertion error; e.g.: |
| 1814 | /// |
| 1815 | /// ```compile_fail,E0080 |
| 1816 | /// use zerocopy::*; |
| 1817 | /// # use zerocopy_derive::*; |
| 1818 | /// |
| 1819 | /// #[derive(TryFromBytes, KnownLayout)] |
| 1820 | /// #[repr(C)] |
| 1821 | /// struct ZSTy { |
| 1822 | /// leading_sized: [u8; 2], |
| 1823 | /// trailing_dst: [()], |
| 1824 | /// } |
| 1825 | /// |
| 1826 | /// let mut source = [85, 85]; |
| 1827 | /// let _ = ZSTy::try_mut_from_prefix(&mut source[..]); // âš Compile Error! |
| 1828 | /// ``` |
| 1829 | /// |
| 1830 | /// # Examples |
| 1831 | /// |
| 1832 | /// ``` |
| 1833 | /// use zerocopy::TryFromBytes; |
| 1834 | /// # use zerocopy_derive::*; |
| 1835 | /// |
| 1836 | /// // The only valid value of this type is the byte `0xC0` |
| 1837 | /// #[derive(TryFromBytes, KnownLayout)] |
| 1838 | /// #[repr(u8)] |
| 1839 | /// enum C0 { xC0 = 0xC0 } |
| 1840 | /// |
| 1841 | /// // The only valid value of this type is the bytes `0xC0C0`. |
| 1842 | /// #[derive(TryFromBytes, KnownLayout)] |
| 1843 | /// #[repr(C)] |
| 1844 | /// struct C0C0(C0, C0); |
| 1845 | /// |
| 1846 | /// #[derive(TryFromBytes, KnownLayout)] |
| 1847 | /// #[repr(C)] |
| 1848 | /// struct Packet { |
| 1849 | /// magic_number: C0C0, |
| 1850 | /// mug_size: u8, |
| 1851 | /// temperature: u8, |
| 1852 | /// marshmallows: [[u8; 2]], |
| 1853 | /// } |
| 1854 | /// |
| 1855 | /// // These are more bytes than are needed to encode a `Packet`. |
| 1856 | /// let bytes = &mut [0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..]; |
| 1857 | /// |
| 1858 | /// let (packet, suffix) = Packet::try_mut_from_prefix(bytes).unwrap(); |
| 1859 | /// |
| 1860 | /// assert_eq!(packet.mug_size, 240); |
| 1861 | /// assert_eq!(packet.temperature, 77); |
| 1862 | /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]); |
| 1863 | /// assert_eq!(suffix, &[6u8][..]); |
| 1864 | /// |
| 1865 | /// packet.temperature = 111; |
| 1866 | /// suffix[0] = 222; |
| 1867 | /// |
| 1868 | /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 0, 1, 2, 3, 4, 5, 222]); |
| 1869 | /// |
| 1870 | /// // These bytes are not valid instance of `Packet`. |
| 1871 | /// let bytes = &mut [0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..]; |
| 1872 | /// assert!(Packet::try_mut_from_prefix(bytes).is_err()); |
| 1873 | /// ``` |
| 1874 | #[must_use = "has no side effects" ] |
| 1875 | #[inline ] |
| 1876 | fn try_mut_from_prefix( |
| 1877 | source: &mut [u8], |
| 1878 | ) -> Result<(&mut Self, &mut [u8]), TryCastError<&mut [u8], Self>> |
| 1879 | where |
| 1880 | Self: KnownLayout, |
| 1881 | { |
| 1882 | static_assert_dst_is_not_zst!(Self); |
| 1883 | try_mut_from_prefix_suffix(source, CastType::Prefix, None) |
| 1884 | } |
| 1885 | |
| 1886 | /// Attempts to interpret the suffix of the given `source` as a `&mut |
| 1887 | /// Self`. |
| 1888 | /// |
| 1889 | /// This method computes the [largest possible size of `Self`][valid-size] |
| 1890 | /// that can fit in the trailing bytes of `source`. If that suffix is a |
| 1891 | /// valid instance of `Self`, this method returns a reference to those bytes |
| 1892 | /// interpreted as `Self`, and a reference to the preceding bytes. If there |
| 1893 | /// are insufficient bytes, or if the suffix of `source` would not be |
| 1894 | /// appropriately aligned, or if the suffix is not a valid instance of |
| 1895 | /// `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned], you |
| 1896 | /// can [infallibly discard the alignment error][ConvertError::from]. |
| 1897 | /// |
| 1898 | /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst]. |
| 1899 | /// |
| 1900 | /// [valid-size]: crate::KnownLayout#what-is-a-valid-size |
| 1901 | /// [self-unaligned]: Unaligned |
| 1902 | /// [slice-dst]: KnownLayout#dynamically-sized-types |
| 1903 | /// |
| 1904 | /// # Compile-Time Assertions |
| 1905 | /// |
| 1906 | /// This method cannot yet be used on unsized types whose dynamically-sized |
| 1907 | /// component is zero-sized. Attempting to use this method on such types |
| 1908 | /// results in a compile-time assertion error; e.g.: |
| 1909 | /// |
| 1910 | /// ```compile_fail,E0080 |
| 1911 | /// use zerocopy::*; |
| 1912 | /// # use zerocopy_derive::*; |
| 1913 | /// |
| 1914 | /// #[derive(TryFromBytes, KnownLayout)] |
| 1915 | /// #[repr(C)] |
| 1916 | /// struct ZSTy { |
| 1917 | /// leading_sized: u16, |
| 1918 | /// trailing_dst: [()], |
| 1919 | /// } |
| 1920 | /// |
| 1921 | /// let mut source = [85, 85]; |
| 1922 | /// let _ = ZSTy::try_mut_from_suffix(&mut source[..]); // âš Compile Error! |
| 1923 | /// ``` |
| 1924 | /// |
| 1925 | /// # Examples |
| 1926 | /// |
| 1927 | /// ``` |
| 1928 | /// use zerocopy::TryFromBytes; |
| 1929 | /// # use zerocopy_derive::*; |
| 1930 | /// |
| 1931 | /// // The only valid value of this type is the byte `0xC0` |
| 1932 | /// #[derive(TryFromBytes, KnownLayout)] |
| 1933 | /// #[repr(u8)] |
| 1934 | /// enum C0 { xC0 = 0xC0 } |
| 1935 | /// |
| 1936 | /// // The only valid value of this type is the bytes `0xC0C0`. |
| 1937 | /// #[derive(TryFromBytes, KnownLayout)] |
| 1938 | /// #[repr(C)] |
| 1939 | /// struct C0C0(C0, C0); |
| 1940 | /// |
| 1941 | /// #[derive(TryFromBytes, KnownLayout)] |
| 1942 | /// #[repr(C)] |
| 1943 | /// struct Packet { |
| 1944 | /// magic_number: C0C0, |
| 1945 | /// mug_size: u8, |
| 1946 | /// temperature: u8, |
| 1947 | /// marshmallows: [[u8; 2]], |
| 1948 | /// } |
| 1949 | /// |
| 1950 | /// // These are more bytes than are needed to encode a `Packet`. |
| 1951 | /// let bytes = &mut [0, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..]; |
| 1952 | /// |
| 1953 | /// let (prefix, packet) = Packet::try_mut_from_suffix(bytes).unwrap(); |
| 1954 | /// |
| 1955 | /// assert_eq!(packet.mug_size, 240); |
| 1956 | /// assert_eq!(packet.temperature, 77); |
| 1957 | /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]); |
| 1958 | /// assert_eq!(prefix, &[0u8][..]); |
| 1959 | /// |
| 1960 | /// prefix[0] = 111; |
| 1961 | /// packet.temperature = 222; |
| 1962 | /// |
| 1963 | /// assert_eq!(bytes, [111, 0xC0, 0xC0, 240, 222, 2, 3, 4, 5, 6, 7]); |
| 1964 | /// |
| 1965 | /// // These bytes are not valid instance of `Packet`. |
| 1966 | /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0x10][..]; |
| 1967 | /// assert!(Packet::try_mut_from_suffix(bytes).is_err()); |
| 1968 | /// ``` |
| 1969 | #[must_use = "has no side effects" ] |
| 1970 | #[inline ] |
| 1971 | fn try_mut_from_suffix( |
| 1972 | source: &mut [u8], |
| 1973 | ) -> Result<(&mut [u8], &mut Self), TryCastError<&mut [u8], Self>> |
| 1974 | where |
| 1975 | Self: KnownLayout, |
| 1976 | { |
| 1977 | static_assert_dst_is_not_zst!(Self); |
| 1978 | try_mut_from_prefix_suffix(source, CastType::Suffix, None).map(swap) |
| 1979 | } |
| 1980 | |
| 1981 | /// Attempts to interpret the given `source` as a `&Self` with a DST length |
| 1982 | /// equal to `count`. |
| 1983 | /// |
| 1984 | /// This method attempts to return a reference to `source` interpreted as a |
| 1985 | /// `Self` with `count` trailing elements. If the length of `source` is not |
| 1986 | /// equal to the size of `Self` with `count` elements, if `source` is not |
| 1987 | /// appropriately aligned, or if `source` does not contain a valid instance |
| 1988 | /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned], |
| 1989 | /// you can [infallibly discard the alignment error][ConvertError::from]. |
| 1990 | /// |
| 1991 | /// [self-unaligned]: Unaligned |
| 1992 | /// [slice-dst]: KnownLayout#dynamically-sized-types |
| 1993 | /// |
| 1994 | /// # Examples |
| 1995 | /// |
| 1996 | /// ``` |
| 1997 | /// # #![allow (non_camel_case_types)] // For C0::xC0 |
| 1998 | /// use zerocopy::TryFromBytes; |
| 1999 | /// # use zerocopy_derive::*; |
| 2000 | /// |
| 2001 | /// // The only valid value of this type is the byte `0xC0` |
| 2002 | /// #[derive(TryFromBytes, KnownLayout, Immutable)] |
| 2003 | /// #[repr(u8)] |
| 2004 | /// enum C0 { xC0 = 0xC0 } |
| 2005 | /// |
| 2006 | /// // The only valid value of this type is the bytes `0xC0C0`. |
| 2007 | /// #[derive(TryFromBytes, KnownLayout, Immutable)] |
| 2008 | /// #[repr(C)] |
| 2009 | /// struct C0C0(C0, C0); |
| 2010 | /// |
| 2011 | /// #[derive(TryFromBytes, KnownLayout, Immutable)] |
| 2012 | /// #[repr(C)] |
| 2013 | /// struct Packet { |
| 2014 | /// magic_number: C0C0, |
| 2015 | /// mug_size: u8, |
| 2016 | /// temperature: u8, |
| 2017 | /// marshmallows: [[u8; 2]], |
| 2018 | /// } |
| 2019 | /// |
| 2020 | /// let bytes = &[0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..]; |
| 2021 | /// |
| 2022 | /// let packet = Packet::try_ref_from_bytes_with_elems(bytes, 3).unwrap(); |
| 2023 | /// |
| 2024 | /// assert_eq!(packet.mug_size, 240); |
| 2025 | /// assert_eq!(packet.temperature, 77); |
| 2026 | /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]); |
| 2027 | /// |
| 2028 | /// // These bytes are not valid instance of `Packet`. |
| 2029 | /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0xC0][..]; |
| 2030 | /// assert!(Packet::try_ref_from_bytes_with_elems(bytes, 3).is_err()); |
| 2031 | /// ``` |
| 2032 | /// |
| 2033 | /// Since an explicit `count` is provided, this method supports types with |
| 2034 | /// zero-sized trailing slice elements. Methods such as [`try_ref_from_bytes`] |
| 2035 | /// which do not take an explicit count do not support such types. |
| 2036 | /// |
| 2037 | /// ``` |
| 2038 | /// use core::num::NonZeroU16; |
| 2039 | /// use zerocopy::*; |
| 2040 | /// # use zerocopy_derive::*; |
| 2041 | /// |
| 2042 | /// #[derive(TryFromBytes, Immutable, KnownLayout)] |
| 2043 | /// #[repr(C)] |
| 2044 | /// struct ZSTy { |
| 2045 | /// leading_sized: NonZeroU16, |
| 2046 | /// trailing_dst: [()], |
| 2047 | /// } |
| 2048 | /// |
| 2049 | /// let src = 0xCAFEu16.as_bytes(); |
| 2050 | /// let zsty = ZSTy::try_ref_from_bytes_with_elems(src, 42).unwrap(); |
| 2051 | /// assert_eq!(zsty.trailing_dst.len(), 42); |
| 2052 | /// ``` |
| 2053 | /// |
| 2054 | /// [`try_ref_from_bytes`]: TryFromBytes::try_ref_from_bytes |
| 2055 | #[must_use = "has no side effects" ] |
| 2056 | #[inline ] |
| 2057 | fn try_ref_from_bytes_with_elems( |
| 2058 | source: &[u8], |
| 2059 | count: usize, |
| 2060 | ) -> Result<&Self, TryCastError<&[u8], Self>> |
| 2061 | where |
| 2062 | Self: KnownLayout<PointerMetadata = usize> + Immutable, |
| 2063 | { |
| 2064 | match Ptr::from_ref(source).try_cast_into_no_leftover::<Self, BecauseImmutable>(Some(count)) |
| 2065 | { |
| 2066 | Ok(source) => { |
| 2067 | // This call may panic. If that happens, it doesn't cause any soundness |
| 2068 | // issues, as we have not generated any invalid state which we need to |
| 2069 | // fix before returning. |
| 2070 | // |
| 2071 | // Note that one panic or post-monomorphization error condition is |
| 2072 | // calling `try_into_valid` (and thus `is_bit_valid`) with a shared |
| 2073 | // pointer when `Self: !Immutable`. Since `Self: Immutable`, this panic |
| 2074 | // condition will not happen. |
| 2075 | match source.try_into_valid() { |
| 2076 | Ok(source) => Ok(source.as_ref()), |
| 2077 | Err(e) => { |
| 2078 | Err(e.map_src(|src| src.as_bytes::<BecauseImmutable>().as_ref()).into()) |
| 2079 | } |
| 2080 | } |
| 2081 | } |
| 2082 | Err(e) => Err(e.map_src(Ptr::as_ref).into()), |
| 2083 | } |
| 2084 | } |
| 2085 | |
| 2086 | /// Attempts to interpret the prefix of the given `source` as a `&Self` with |
| 2087 | /// a DST length equal to `count`. |
| 2088 | /// |
| 2089 | /// This method attempts to return a reference to the prefix of `source` |
| 2090 | /// interpreted as a `Self` with `count` trailing elements, and a reference |
| 2091 | /// to the remaining bytes. If the length of `source` is less than the size |
| 2092 | /// of `Self` with `count` elements, if `source` is not appropriately |
| 2093 | /// aligned, or if the prefix of `source` does not contain a valid instance |
| 2094 | /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned], |
| 2095 | /// you can [infallibly discard the alignment error][ConvertError::from]. |
| 2096 | /// |
| 2097 | /// [self-unaligned]: Unaligned |
| 2098 | /// [slice-dst]: KnownLayout#dynamically-sized-types |
| 2099 | /// |
| 2100 | /// # Examples |
| 2101 | /// |
| 2102 | /// ``` |
| 2103 | /// # #![allow (non_camel_case_types)] // For C0::xC0 |
| 2104 | /// use zerocopy::TryFromBytes; |
| 2105 | /// # use zerocopy_derive::*; |
| 2106 | /// |
| 2107 | /// // The only valid value of this type is the byte `0xC0` |
| 2108 | /// #[derive(TryFromBytes, KnownLayout, Immutable)] |
| 2109 | /// #[repr(u8)] |
| 2110 | /// enum C0 { xC0 = 0xC0 } |
| 2111 | /// |
| 2112 | /// // The only valid value of this type is the bytes `0xC0C0`. |
| 2113 | /// #[derive(TryFromBytes, KnownLayout, Immutable)] |
| 2114 | /// #[repr(C)] |
| 2115 | /// struct C0C0(C0, C0); |
| 2116 | /// |
| 2117 | /// #[derive(TryFromBytes, KnownLayout, Immutable)] |
| 2118 | /// #[repr(C)] |
| 2119 | /// struct Packet { |
| 2120 | /// magic_number: C0C0, |
| 2121 | /// mug_size: u8, |
| 2122 | /// temperature: u8, |
| 2123 | /// marshmallows: [[u8; 2]], |
| 2124 | /// } |
| 2125 | /// |
| 2126 | /// let bytes = &[0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7, 8][..]; |
| 2127 | /// |
| 2128 | /// let (packet, suffix) = Packet::try_ref_from_prefix_with_elems(bytes, 3).unwrap(); |
| 2129 | /// |
| 2130 | /// assert_eq!(packet.mug_size, 240); |
| 2131 | /// assert_eq!(packet.temperature, 77); |
| 2132 | /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]); |
| 2133 | /// assert_eq!(suffix, &[8u8][..]); |
| 2134 | /// |
| 2135 | /// // These bytes are not valid instance of `Packet`. |
| 2136 | /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..]; |
| 2137 | /// assert!(Packet::try_ref_from_prefix_with_elems(bytes, 3).is_err()); |
| 2138 | /// ``` |
| 2139 | /// |
| 2140 | /// Since an explicit `count` is provided, this method supports types with |
| 2141 | /// zero-sized trailing slice elements. Methods such as [`try_ref_from_prefix`] |
| 2142 | /// which do not take an explicit count do not support such types. |
| 2143 | /// |
| 2144 | /// ``` |
| 2145 | /// use core::num::NonZeroU16; |
| 2146 | /// use zerocopy::*; |
| 2147 | /// # use zerocopy_derive::*; |
| 2148 | /// |
| 2149 | /// #[derive(TryFromBytes, Immutable, KnownLayout)] |
| 2150 | /// #[repr(C)] |
| 2151 | /// struct ZSTy { |
| 2152 | /// leading_sized: NonZeroU16, |
| 2153 | /// trailing_dst: [()], |
| 2154 | /// } |
| 2155 | /// |
| 2156 | /// let src = 0xCAFEu16.as_bytes(); |
| 2157 | /// let (zsty, _) = ZSTy::try_ref_from_prefix_with_elems(src, 42).unwrap(); |
| 2158 | /// assert_eq!(zsty.trailing_dst.len(), 42); |
| 2159 | /// ``` |
| 2160 | /// |
| 2161 | /// [`try_ref_from_prefix`]: TryFromBytes::try_ref_from_prefix |
| 2162 | #[must_use = "has no side effects" ] |
| 2163 | #[inline ] |
| 2164 | fn try_ref_from_prefix_with_elems( |
| 2165 | source: &[u8], |
| 2166 | count: usize, |
| 2167 | ) -> Result<(&Self, &[u8]), TryCastError<&[u8], Self>> |
| 2168 | where |
| 2169 | Self: KnownLayout<PointerMetadata = usize> + Immutable, |
| 2170 | { |
| 2171 | try_ref_from_prefix_suffix(source, CastType::Prefix, Some(count)) |
| 2172 | } |
| 2173 | |
| 2174 | /// Attempts to interpret the suffix of the given `source` as a `&Self` with |
| 2175 | /// a DST length equal to `count`. |
| 2176 | /// |
| 2177 | /// This method attempts to return a reference to the suffix of `source` |
| 2178 | /// interpreted as a `Self` with `count` trailing elements, and a reference |
| 2179 | /// to the preceding bytes. If the length of `source` is less than the size |
| 2180 | /// of `Self` with `count` elements, if the suffix of `source` is not |
| 2181 | /// appropriately aligned, or if the suffix of `source` does not contain a |
| 2182 | /// valid instance of `Self`, this returns `Err`. If [`Self: |
| 2183 | /// Unaligned`][self-unaligned], you can [infallibly discard the alignment |
| 2184 | /// error][ConvertError::from]. |
| 2185 | /// |
| 2186 | /// [self-unaligned]: Unaligned |
| 2187 | /// [slice-dst]: KnownLayout#dynamically-sized-types |
| 2188 | /// |
| 2189 | /// # Examples |
| 2190 | /// |
| 2191 | /// ``` |
| 2192 | /// # #![allow (non_camel_case_types)] // For C0::xC0 |
| 2193 | /// use zerocopy::TryFromBytes; |
| 2194 | /// # use zerocopy_derive::*; |
| 2195 | /// |
| 2196 | /// // The only valid value of this type is the byte `0xC0` |
| 2197 | /// #[derive(TryFromBytes, KnownLayout, Immutable)] |
| 2198 | /// #[repr(u8)] |
| 2199 | /// enum C0 { xC0 = 0xC0 } |
| 2200 | /// |
| 2201 | /// // The only valid value of this type is the bytes `0xC0C0`. |
| 2202 | /// #[derive(TryFromBytes, KnownLayout, Immutable)] |
| 2203 | /// #[repr(C)] |
| 2204 | /// struct C0C0(C0, C0); |
| 2205 | /// |
| 2206 | /// #[derive(TryFromBytes, KnownLayout, Immutable)] |
| 2207 | /// #[repr(C)] |
| 2208 | /// struct Packet { |
| 2209 | /// magic_number: C0C0, |
| 2210 | /// mug_size: u8, |
| 2211 | /// temperature: u8, |
| 2212 | /// marshmallows: [[u8; 2]], |
| 2213 | /// } |
| 2214 | /// |
| 2215 | /// let bytes = &[123, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..]; |
| 2216 | /// |
| 2217 | /// let (prefix, packet) = Packet::try_ref_from_suffix_with_elems(bytes, 3).unwrap(); |
| 2218 | /// |
| 2219 | /// assert_eq!(packet.mug_size, 240); |
| 2220 | /// assert_eq!(packet.temperature, 77); |
| 2221 | /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]); |
| 2222 | /// assert_eq!(prefix, &[123u8][..]); |
| 2223 | /// |
| 2224 | /// // These bytes are not valid instance of `Packet`. |
| 2225 | /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..]; |
| 2226 | /// assert!(Packet::try_ref_from_suffix_with_elems(bytes, 3).is_err()); |
| 2227 | /// ``` |
| 2228 | /// |
| 2229 | /// Since an explicit `count` is provided, this method supports types with |
| 2230 | /// zero-sized trailing slice elements. Methods such as [`try_ref_from_prefix`] |
| 2231 | /// which do not take an explicit count do not support such types. |
| 2232 | /// |
| 2233 | /// ``` |
| 2234 | /// use core::num::NonZeroU16; |
| 2235 | /// use zerocopy::*; |
| 2236 | /// # use zerocopy_derive::*; |
| 2237 | /// |
| 2238 | /// #[derive(TryFromBytes, Immutable, KnownLayout)] |
| 2239 | /// #[repr(C)] |
| 2240 | /// struct ZSTy { |
| 2241 | /// leading_sized: NonZeroU16, |
| 2242 | /// trailing_dst: [()], |
| 2243 | /// } |
| 2244 | /// |
| 2245 | /// let src = 0xCAFEu16.as_bytes(); |
| 2246 | /// let (_, zsty) = ZSTy::try_ref_from_suffix_with_elems(src, 42).unwrap(); |
| 2247 | /// assert_eq!(zsty.trailing_dst.len(), 42); |
| 2248 | /// ``` |
| 2249 | /// |
| 2250 | /// [`try_ref_from_prefix`]: TryFromBytes::try_ref_from_prefix |
| 2251 | #[must_use = "has no side effects" ] |
| 2252 | #[inline ] |
| 2253 | fn try_ref_from_suffix_with_elems( |
| 2254 | source: &[u8], |
| 2255 | count: usize, |
| 2256 | ) -> Result<(&[u8], &Self), TryCastError<&[u8], Self>> |
| 2257 | where |
| 2258 | Self: KnownLayout<PointerMetadata = usize> + Immutable, |
| 2259 | { |
| 2260 | try_ref_from_prefix_suffix(source, CastType::Suffix, Some(count)).map(swap) |
| 2261 | } |
| 2262 | |
| 2263 | /// Attempts to interpret the given `source` as a `&mut Self` with a DST |
| 2264 | /// length equal to `count`. |
| 2265 | /// |
| 2266 | /// This method attempts to return a reference to `source` interpreted as a |
| 2267 | /// `Self` with `count` trailing elements. If the length of `source` is not |
| 2268 | /// equal to the size of `Self` with `count` elements, if `source` is not |
| 2269 | /// appropriately aligned, or if `source` does not contain a valid instance |
| 2270 | /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned], |
| 2271 | /// you can [infallibly discard the alignment error][ConvertError::from]. |
| 2272 | /// |
| 2273 | /// [self-unaligned]: Unaligned |
| 2274 | /// [slice-dst]: KnownLayout#dynamically-sized-types |
| 2275 | /// |
| 2276 | /// # Examples |
| 2277 | /// |
| 2278 | /// ``` |
| 2279 | /// # #![allow (non_camel_case_types)] // For C0::xC0 |
| 2280 | /// use zerocopy::TryFromBytes; |
| 2281 | /// # use zerocopy_derive::*; |
| 2282 | /// |
| 2283 | /// // The only valid value of this type is the byte `0xC0` |
| 2284 | /// #[derive(TryFromBytes, KnownLayout)] |
| 2285 | /// #[repr(u8)] |
| 2286 | /// enum C0 { xC0 = 0xC0 } |
| 2287 | /// |
| 2288 | /// // The only valid value of this type is the bytes `0xC0C0`. |
| 2289 | /// #[derive(TryFromBytes, KnownLayout)] |
| 2290 | /// #[repr(C)] |
| 2291 | /// struct C0C0(C0, C0); |
| 2292 | /// |
| 2293 | /// #[derive(TryFromBytes, KnownLayout)] |
| 2294 | /// #[repr(C)] |
| 2295 | /// struct Packet { |
| 2296 | /// magic_number: C0C0, |
| 2297 | /// mug_size: u8, |
| 2298 | /// temperature: u8, |
| 2299 | /// marshmallows: [[u8; 2]], |
| 2300 | /// } |
| 2301 | /// |
| 2302 | /// let bytes = &mut [0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..]; |
| 2303 | /// |
| 2304 | /// let packet = Packet::try_mut_from_bytes_with_elems(bytes, 3).unwrap(); |
| 2305 | /// |
| 2306 | /// assert_eq!(packet.mug_size, 240); |
| 2307 | /// assert_eq!(packet.temperature, 77); |
| 2308 | /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]); |
| 2309 | /// |
| 2310 | /// packet.temperature = 111; |
| 2311 | /// |
| 2312 | /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 2, 3, 4, 5, 6, 7]); |
| 2313 | /// |
| 2314 | /// // These bytes are not valid instance of `Packet`. |
| 2315 | /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0xC0][..]; |
| 2316 | /// assert!(Packet::try_mut_from_bytes_with_elems(bytes, 3).is_err()); |
| 2317 | /// ``` |
| 2318 | /// |
| 2319 | /// Since an explicit `count` is provided, this method supports types with |
| 2320 | /// zero-sized trailing slice elements. Methods such as [`try_mut_from_bytes`] |
| 2321 | /// which do not take an explicit count do not support such types. |
| 2322 | /// |
| 2323 | /// ``` |
| 2324 | /// use core::num::NonZeroU16; |
| 2325 | /// use zerocopy::*; |
| 2326 | /// # use zerocopy_derive::*; |
| 2327 | /// |
| 2328 | /// #[derive(TryFromBytes, KnownLayout)] |
| 2329 | /// #[repr(C)] |
| 2330 | /// struct ZSTy { |
| 2331 | /// leading_sized: NonZeroU16, |
| 2332 | /// trailing_dst: [()], |
| 2333 | /// } |
| 2334 | /// |
| 2335 | /// let mut src = 0xCAFEu16; |
| 2336 | /// let src = src.as_mut_bytes(); |
| 2337 | /// let zsty = ZSTy::try_mut_from_bytes_with_elems(src, 42).unwrap(); |
| 2338 | /// assert_eq!(zsty.trailing_dst.len(), 42); |
| 2339 | /// ``` |
| 2340 | /// |
| 2341 | /// [`try_mut_from_bytes`]: TryFromBytes::try_mut_from_bytes |
| 2342 | #[must_use = "has no side effects" ] |
| 2343 | #[inline ] |
| 2344 | fn try_mut_from_bytes_with_elems( |
| 2345 | source: &mut [u8], |
| 2346 | count: usize, |
| 2347 | ) -> Result<&mut Self, TryCastError<&mut [u8], Self>> |
| 2348 | where |
| 2349 | Self: KnownLayout<PointerMetadata = usize>, |
| 2350 | { |
| 2351 | match Ptr::from_mut(source).try_cast_into_no_leftover::<Self, BecauseExclusive>(Some(count)) |
| 2352 | { |
| 2353 | Ok(source) => { |
| 2354 | // This call may panic. If that happens, it doesn't cause any soundness |
| 2355 | // issues, as we have not generated any invalid state which we need to |
| 2356 | // fix before returning. |
| 2357 | // |
| 2358 | // Note that one panic or post-monomorphization error condition is |
| 2359 | // calling `try_into_valid` (and thus `is_bit_valid`) with a shared |
| 2360 | // pointer when `Self: !Immutable`. Since `Self: Immutable`, this panic |
| 2361 | // condition will not happen. |
| 2362 | match source.try_into_valid() { |
| 2363 | Ok(source) => Ok(source.as_mut()), |
| 2364 | Err(e) => { |
| 2365 | Err(e.map_src(|src| src.as_bytes::<BecauseExclusive>().as_mut()).into()) |
| 2366 | } |
| 2367 | } |
| 2368 | } |
| 2369 | Err(e) => Err(e.map_src(Ptr::as_mut).into()), |
| 2370 | } |
| 2371 | } |
| 2372 | |
| 2373 | /// Attempts to interpret the prefix of the given `source` as a `&mut Self` |
| 2374 | /// with a DST length equal to `count`. |
| 2375 | /// |
| 2376 | /// This method attempts to return a reference to the prefix of `source` |
| 2377 | /// interpreted as a `Self` with `count` trailing elements, and a reference |
| 2378 | /// to the remaining bytes. If the length of `source` is less than the size |
| 2379 | /// of `Self` with `count` elements, if `source` is not appropriately |
| 2380 | /// aligned, or if the prefix of `source` does not contain a valid instance |
| 2381 | /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned], |
| 2382 | /// you can [infallibly discard the alignment error][ConvertError::from]. |
| 2383 | /// |
| 2384 | /// [self-unaligned]: Unaligned |
| 2385 | /// [slice-dst]: KnownLayout#dynamically-sized-types |
| 2386 | /// |
| 2387 | /// # Examples |
| 2388 | /// |
| 2389 | /// ``` |
| 2390 | /// # #![allow (non_camel_case_types)] // For C0::xC0 |
| 2391 | /// use zerocopy::TryFromBytes; |
| 2392 | /// # use zerocopy_derive::*; |
| 2393 | /// |
| 2394 | /// // The only valid value of this type is the byte `0xC0` |
| 2395 | /// #[derive(TryFromBytes, KnownLayout)] |
| 2396 | /// #[repr(u8)] |
| 2397 | /// enum C0 { xC0 = 0xC0 } |
| 2398 | /// |
| 2399 | /// // The only valid value of this type is the bytes `0xC0C0`. |
| 2400 | /// #[derive(TryFromBytes, KnownLayout)] |
| 2401 | /// #[repr(C)] |
| 2402 | /// struct C0C0(C0, C0); |
| 2403 | /// |
| 2404 | /// #[derive(TryFromBytes, KnownLayout)] |
| 2405 | /// #[repr(C)] |
| 2406 | /// struct Packet { |
| 2407 | /// magic_number: C0C0, |
| 2408 | /// mug_size: u8, |
| 2409 | /// temperature: u8, |
| 2410 | /// marshmallows: [[u8; 2]], |
| 2411 | /// } |
| 2412 | /// |
| 2413 | /// let bytes = &mut [0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7, 8][..]; |
| 2414 | /// |
| 2415 | /// let (packet, suffix) = Packet::try_mut_from_prefix_with_elems(bytes, 3).unwrap(); |
| 2416 | /// |
| 2417 | /// assert_eq!(packet.mug_size, 240); |
| 2418 | /// assert_eq!(packet.temperature, 77); |
| 2419 | /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]); |
| 2420 | /// assert_eq!(suffix, &[8u8][..]); |
| 2421 | /// |
| 2422 | /// packet.temperature = 111; |
| 2423 | /// suffix[0] = 222; |
| 2424 | /// |
| 2425 | /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 2, 3, 4, 5, 6, 7, 222]); |
| 2426 | /// |
| 2427 | /// // These bytes are not valid instance of `Packet`. |
| 2428 | /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..]; |
| 2429 | /// assert!(Packet::try_mut_from_prefix_with_elems(bytes, 3).is_err()); |
| 2430 | /// ``` |
| 2431 | /// |
| 2432 | /// Since an explicit `count` is provided, this method supports types with |
| 2433 | /// zero-sized trailing slice elements. Methods such as [`try_mut_from_prefix`] |
| 2434 | /// which do not take an explicit count do not support such types. |
| 2435 | /// |
| 2436 | /// ``` |
| 2437 | /// use core::num::NonZeroU16; |
| 2438 | /// use zerocopy::*; |
| 2439 | /// # use zerocopy_derive::*; |
| 2440 | /// |
| 2441 | /// #[derive(TryFromBytes, KnownLayout)] |
| 2442 | /// #[repr(C)] |
| 2443 | /// struct ZSTy { |
| 2444 | /// leading_sized: NonZeroU16, |
| 2445 | /// trailing_dst: [()], |
| 2446 | /// } |
| 2447 | /// |
| 2448 | /// let mut src = 0xCAFEu16; |
| 2449 | /// let src = src.as_mut_bytes(); |
| 2450 | /// let (zsty, _) = ZSTy::try_mut_from_prefix_with_elems(src, 42).unwrap(); |
| 2451 | /// assert_eq!(zsty.trailing_dst.len(), 42); |
| 2452 | /// ``` |
| 2453 | /// |
| 2454 | /// [`try_mut_from_prefix`]: TryFromBytes::try_mut_from_prefix |
| 2455 | #[must_use = "has no side effects" ] |
| 2456 | #[inline ] |
| 2457 | fn try_mut_from_prefix_with_elems( |
| 2458 | source: &mut [u8], |
| 2459 | count: usize, |
| 2460 | ) -> Result<(&mut Self, &mut [u8]), TryCastError<&mut [u8], Self>> |
| 2461 | where |
| 2462 | Self: KnownLayout<PointerMetadata = usize>, |
| 2463 | { |
| 2464 | try_mut_from_prefix_suffix(source, CastType::Prefix, Some(count)) |
| 2465 | } |
| 2466 | |
| 2467 | /// Attempts to interpret the suffix of the given `source` as a `&mut Self` |
| 2468 | /// with a DST length equal to `count`. |
| 2469 | /// |
| 2470 | /// This method attempts to return a reference to the suffix of `source` |
| 2471 | /// interpreted as a `Self` with `count` trailing elements, and a reference |
| 2472 | /// to the preceding bytes. If the length of `source` is less than the size |
| 2473 | /// of `Self` with `count` elements, if the suffix of `source` is not |
| 2474 | /// appropriately aligned, or if the suffix of `source` does not contain a |
| 2475 | /// valid instance of `Self`, this returns `Err`. If [`Self: |
| 2476 | /// Unaligned`][self-unaligned], you can [infallibly discard the alignment |
| 2477 | /// error][ConvertError::from]. |
| 2478 | /// |
| 2479 | /// [self-unaligned]: Unaligned |
| 2480 | /// [slice-dst]: KnownLayout#dynamically-sized-types |
| 2481 | /// |
| 2482 | /// # Examples |
| 2483 | /// |
| 2484 | /// ``` |
| 2485 | /// # #![allow (non_camel_case_types)] // For C0::xC0 |
| 2486 | /// use zerocopy::TryFromBytes; |
| 2487 | /// # use zerocopy_derive::*; |
| 2488 | /// |
| 2489 | /// // The only valid value of this type is the byte `0xC0` |
| 2490 | /// #[derive(TryFromBytes, KnownLayout)] |
| 2491 | /// #[repr(u8)] |
| 2492 | /// enum C0 { xC0 = 0xC0 } |
| 2493 | /// |
| 2494 | /// // The only valid value of this type is the bytes `0xC0C0`. |
| 2495 | /// #[derive(TryFromBytes, KnownLayout)] |
| 2496 | /// #[repr(C)] |
| 2497 | /// struct C0C0(C0, C0); |
| 2498 | /// |
| 2499 | /// #[derive(TryFromBytes, KnownLayout)] |
| 2500 | /// #[repr(C)] |
| 2501 | /// struct Packet { |
| 2502 | /// magic_number: C0C0, |
| 2503 | /// mug_size: u8, |
| 2504 | /// temperature: u8, |
| 2505 | /// marshmallows: [[u8; 2]], |
| 2506 | /// } |
| 2507 | /// |
| 2508 | /// let bytes = &mut [123, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..]; |
| 2509 | /// |
| 2510 | /// let (prefix, packet) = Packet::try_mut_from_suffix_with_elems(bytes, 3).unwrap(); |
| 2511 | /// |
| 2512 | /// assert_eq!(packet.mug_size, 240); |
| 2513 | /// assert_eq!(packet.temperature, 77); |
| 2514 | /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]); |
| 2515 | /// assert_eq!(prefix, &[123u8][..]); |
| 2516 | /// |
| 2517 | /// prefix[0] = 111; |
| 2518 | /// packet.temperature = 222; |
| 2519 | /// |
| 2520 | /// assert_eq!(bytes, [111, 0xC0, 0xC0, 240, 222, 2, 3, 4, 5, 6, 7]); |
| 2521 | /// |
| 2522 | /// // These bytes are not valid instance of `Packet`. |
| 2523 | /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..]; |
| 2524 | /// assert!(Packet::try_mut_from_suffix_with_elems(bytes, 3).is_err()); |
| 2525 | /// ``` |
| 2526 | /// |
| 2527 | /// Since an explicit `count` is provided, this method supports types with |
| 2528 | /// zero-sized trailing slice elements. Methods such as [`try_mut_from_prefix`] |
| 2529 | /// which do not take an explicit count do not support such types. |
| 2530 | /// |
| 2531 | /// ``` |
| 2532 | /// use core::num::NonZeroU16; |
| 2533 | /// use zerocopy::*; |
| 2534 | /// # use zerocopy_derive::*; |
| 2535 | /// |
| 2536 | /// #[derive(TryFromBytes, KnownLayout)] |
| 2537 | /// #[repr(C)] |
| 2538 | /// struct ZSTy { |
| 2539 | /// leading_sized: NonZeroU16, |
| 2540 | /// trailing_dst: [()], |
| 2541 | /// } |
| 2542 | /// |
| 2543 | /// let mut src = 0xCAFEu16; |
| 2544 | /// let src = src.as_mut_bytes(); |
| 2545 | /// let (_, zsty) = ZSTy::try_mut_from_suffix_with_elems(src, 42).unwrap(); |
| 2546 | /// assert_eq!(zsty.trailing_dst.len(), 42); |
| 2547 | /// ``` |
| 2548 | /// |
| 2549 | /// [`try_mut_from_prefix`]: TryFromBytes::try_mut_from_prefix |
| 2550 | #[must_use = "has no side effects" ] |
| 2551 | #[inline ] |
| 2552 | fn try_mut_from_suffix_with_elems( |
| 2553 | source: &mut [u8], |
| 2554 | count: usize, |
| 2555 | ) -> Result<(&mut [u8], &mut Self), TryCastError<&mut [u8], Self>> |
| 2556 | where |
| 2557 | Self: KnownLayout<PointerMetadata = usize>, |
| 2558 | { |
| 2559 | try_mut_from_prefix_suffix(source, CastType::Suffix, Some(count)).map(swap) |
| 2560 | } |
| 2561 | |
| 2562 | /// Attempts to read the given `source` as a `Self`. |
| 2563 | /// |
| 2564 | /// If `source.len() != size_of::<Self>()` or the bytes are not a valid |
| 2565 | /// instance of `Self`, this returns `Err`. |
| 2566 | /// |
| 2567 | /// # Examples |
| 2568 | /// |
| 2569 | /// ``` |
| 2570 | /// use zerocopy::TryFromBytes; |
| 2571 | /// # use zerocopy_derive::*; |
| 2572 | /// |
| 2573 | /// // The only valid value of this type is the byte `0xC0` |
| 2574 | /// #[derive(TryFromBytes)] |
| 2575 | /// #[repr(u8)] |
| 2576 | /// enum C0 { xC0 = 0xC0 } |
| 2577 | /// |
| 2578 | /// // The only valid value of this type is the bytes `0xC0C0`. |
| 2579 | /// #[derive(TryFromBytes)] |
| 2580 | /// #[repr(C)] |
| 2581 | /// struct C0C0(C0, C0); |
| 2582 | /// |
| 2583 | /// #[derive(TryFromBytes)] |
| 2584 | /// #[repr(C)] |
| 2585 | /// struct Packet { |
| 2586 | /// magic_number: C0C0, |
| 2587 | /// mug_size: u8, |
| 2588 | /// temperature: u8, |
| 2589 | /// } |
| 2590 | /// |
| 2591 | /// let bytes = &[0xC0, 0xC0, 240, 77][..]; |
| 2592 | /// |
| 2593 | /// let packet = Packet::try_read_from_bytes(bytes).unwrap(); |
| 2594 | /// |
| 2595 | /// assert_eq!(packet.mug_size, 240); |
| 2596 | /// assert_eq!(packet.temperature, 77); |
| 2597 | /// |
| 2598 | /// // These bytes are not valid instance of `Packet`. |
| 2599 | /// let bytes = &mut [0x10, 0xC0, 240, 77][..]; |
| 2600 | /// assert!(Packet::try_read_from_bytes(bytes).is_err()); |
| 2601 | /// ``` |
| 2602 | #[must_use = "has no side effects" ] |
| 2603 | #[inline ] |
| 2604 | fn try_read_from_bytes(source: &[u8]) -> Result<Self, TryReadError<&[u8], Self>> |
| 2605 | where |
| 2606 | Self: Sized, |
| 2607 | { |
| 2608 | let candidate = match CoreMaybeUninit::<Self>::read_from_bytes(source) { |
| 2609 | Ok(candidate) => candidate, |
| 2610 | Err(e) => { |
| 2611 | return Err(TryReadError::Size(e.with_dst())); |
| 2612 | } |
| 2613 | }; |
| 2614 | // SAFETY: `candidate` was copied from from `source: &[u8]`, so all of |
| 2615 | // its bytes are initialized. |
| 2616 | unsafe { try_read_from(source, candidate) } |
| 2617 | } |
| 2618 | |
| 2619 | /// Attempts to read a `Self` from the prefix of the given `source`. |
| 2620 | /// |
| 2621 | /// This attempts to read a `Self` from the first `size_of::<Self>()` bytes |
| 2622 | /// of `source`, returning that `Self` and any remaining bytes. If |
| 2623 | /// `source.len() < size_of::<Self>()` or the bytes are not a valid instance |
| 2624 | /// of `Self`, it returns `Err`. |
| 2625 | /// |
| 2626 | /// # Examples |
| 2627 | /// |
| 2628 | /// ``` |
| 2629 | /// use zerocopy::TryFromBytes; |
| 2630 | /// # use zerocopy_derive::*; |
| 2631 | /// |
| 2632 | /// // The only valid value of this type is the byte `0xC0` |
| 2633 | /// #[derive(TryFromBytes)] |
| 2634 | /// #[repr(u8)] |
| 2635 | /// enum C0 { xC0 = 0xC0 } |
| 2636 | /// |
| 2637 | /// // The only valid value of this type is the bytes `0xC0C0`. |
| 2638 | /// #[derive(TryFromBytes)] |
| 2639 | /// #[repr(C)] |
| 2640 | /// struct C0C0(C0, C0); |
| 2641 | /// |
| 2642 | /// #[derive(TryFromBytes)] |
| 2643 | /// #[repr(C)] |
| 2644 | /// struct Packet { |
| 2645 | /// magic_number: C0C0, |
| 2646 | /// mug_size: u8, |
| 2647 | /// temperature: u8, |
| 2648 | /// } |
| 2649 | /// |
| 2650 | /// // These are more bytes than are needed to encode a `Packet`. |
| 2651 | /// let bytes = &[0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..]; |
| 2652 | /// |
| 2653 | /// let (packet, suffix) = Packet::try_read_from_prefix(bytes).unwrap(); |
| 2654 | /// |
| 2655 | /// assert_eq!(packet.mug_size, 240); |
| 2656 | /// assert_eq!(packet.temperature, 77); |
| 2657 | /// assert_eq!(suffix, &[0u8, 1, 2, 3, 4, 5, 6][..]); |
| 2658 | /// |
| 2659 | /// // These bytes are not valid instance of `Packet`. |
| 2660 | /// let bytes = &[0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..]; |
| 2661 | /// assert!(Packet::try_read_from_prefix(bytes).is_err()); |
| 2662 | /// ``` |
| 2663 | #[must_use = "has no side effects" ] |
| 2664 | #[inline ] |
| 2665 | fn try_read_from_prefix(source: &[u8]) -> Result<(Self, &[u8]), TryReadError<&[u8], Self>> |
| 2666 | where |
| 2667 | Self: Sized, |
| 2668 | { |
| 2669 | let (candidate, suffix) = match CoreMaybeUninit::<Self>::read_from_prefix(source) { |
| 2670 | Ok(candidate) => candidate, |
| 2671 | Err(e) => { |
| 2672 | return Err(TryReadError::Size(e.with_dst())); |
| 2673 | } |
| 2674 | }; |
| 2675 | // SAFETY: `candidate` was copied from from `source: &[u8]`, so all of |
| 2676 | // its bytes are initialized. |
| 2677 | unsafe { try_read_from(source, candidate).map(|slf| (slf, suffix)) } |
| 2678 | } |
| 2679 | |
| 2680 | /// Attempts to read a `Self` from the suffix of the given `source`. |
| 2681 | /// |
| 2682 | /// This attempts to read a `Self` from the last `size_of::<Self>()` bytes |
| 2683 | /// of `source`, returning that `Self` and any preceding bytes. If |
| 2684 | /// `source.len() < size_of::<Self>()` or the bytes are not a valid instance |
| 2685 | /// of `Self`, it returns `Err`. |
| 2686 | /// |
| 2687 | /// # Examples |
| 2688 | /// |
| 2689 | /// ``` |
| 2690 | /// # #![allow (non_camel_case_types)] // For C0::xC0 |
| 2691 | /// use zerocopy::TryFromBytes; |
| 2692 | /// # use zerocopy_derive::*; |
| 2693 | /// |
| 2694 | /// // The only valid value of this type is the byte `0xC0` |
| 2695 | /// #[derive(TryFromBytes)] |
| 2696 | /// #[repr(u8)] |
| 2697 | /// enum C0 { xC0 = 0xC0 } |
| 2698 | /// |
| 2699 | /// // The only valid value of this type is the bytes `0xC0C0`. |
| 2700 | /// #[derive(TryFromBytes)] |
| 2701 | /// #[repr(C)] |
| 2702 | /// struct C0C0(C0, C0); |
| 2703 | /// |
| 2704 | /// #[derive(TryFromBytes)] |
| 2705 | /// #[repr(C)] |
| 2706 | /// struct Packet { |
| 2707 | /// magic_number: C0C0, |
| 2708 | /// mug_size: u8, |
| 2709 | /// temperature: u8, |
| 2710 | /// } |
| 2711 | /// |
| 2712 | /// // These are more bytes than are needed to encode a `Packet`. |
| 2713 | /// let bytes = &[0, 1, 2, 3, 4, 5, 0xC0, 0xC0, 240, 77][..]; |
| 2714 | /// |
| 2715 | /// let (prefix, packet) = Packet::try_read_from_suffix(bytes).unwrap(); |
| 2716 | /// |
| 2717 | /// assert_eq!(packet.mug_size, 240); |
| 2718 | /// assert_eq!(packet.temperature, 77); |
| 2719 | /// assert_eq!(prefix, &[0u8, 1, 2, 3, 4, 5][..]); |
| 2720 | /// |
| 2721 | /// // These bytes are not valid instance of `Packet`. |
| 2722 | /// let bytes = &[0, 1, 2, 3, 4, 5, 0x10, 0xC0, 240, 77][..]; |
| 2723 | /// assert!(Packet::try_read_from_suffix(bytes).is_err()); |
| 2724 | /// ``` |
| 2725 | #[must_use = "has no side effects" ] |
| 2726 | #[inline ] |
| 2727 | fn try_read_from_suffix(source: &[u8]) -> Result<(&[u8], Self), TryReadError<&[u8], Self>> |
| 2728 | where |
| 2729 | Self: Sized, |
| 2730 | { |
| 2731 | let (prefix, candidate) = match CoreMaybeUninit::<Self>::read_from_suffix(source) { |
| 2732 | Ok(candidate) => candidate, |
| 2733 | Err(e) => { |
| 2734 | return Err(TryReadError::Size(e.with_dst())); |
| 2735 | } |
| 2736 | }; |
| 2737 | // SAFETY: `candidate` was copied from from `source: &[u8]`, so all of |
| 2738 | // its bytes are initialized. |
| 2739 | unsafe { try_read_from(source, candidate).map(|slf| (prefix, slf)) } |
| 2740 | } |
| 2741 | } |
| 2742 | |
| 2743 | #[inline (always)] |
| 2744 | fn try_ref_from_prefix_suffix<T: TryFromBytes + KnownLayout + Immutable + ?Sized>( |
| 2745 | source: &[u8], |
| 2746 | cast_type: CastType, |
| 2747 | meta: Option<T::PointerMetadata>, |
| 2748 | ) -> Result<(&T, &[u8]), TryCastError<&[u8], T>> { |
| 2749 | match Ptr::from_ref(ptr:source).try_cast_into::<T, BecauseImmutable>(cast_type, meta) { |
| 2750 | Ok((source: Ptr<'_, T, (Shared, Aligned, …)>, prefix_suffix: Ptr<'_, [u8], (Shared, Aligned, …)>)) => { |
| 2751 | // This call may panic. If that happens, it doesn't cause any soundness |
| 2752 | // issues, as we have not generated any invalid state which we need to |
| 2753 | // fix before returning. |
| 2754 | // |
| 2755 | // Note that one panic or post-monomorphization error condition is |
| 2756 | // calling `try_into_valid` (and thus `is_bit_valid`) with a shared |
| 2757 | // pointer when `Self: !Immutable`. Since `Self: Immutable`, this panic |
| 2758 | // condition will not happen. |
| 2759 | match source.try_into_valid() { |
| 2760 | Ok(valid: Ptr<'_, T, (Shared, Aligned, …)>) => Ok((valid.as_ref(), prefix_suffix.as_ref())), |
| 2761 | Err(e: ValidityError, …>) => Err(e.map_src(|src: Ptr<'_, T, (Shared, Aligned, …)>| src.as_bytes::<BecauseImmutable>().as_ref()).into()), |
| 2762 | } |
| 2763 | } |
| 2764 | Err(e: ConvertError, …, …>) => Err(e.map_src(Ptr::as_ref).into()), |
| 2765 | } |
| 2766 | } |
| 2767 | |
| 2768 | #[inline (always)] |
| 2769 | fn try_mut_from_prefix_suffix<T: TryFromBytes + KnownLayout + ?Sized>( |
| 2770 | candidate: &mut [u8], |
| 2771 | cast_type: CastType, |
| 2772 | meta: Option<T::PointerMetadata>, |
| 2773 | ) -> Result<(&mut T, &mut [u8]), TryCastError<&mut [u8], T>> { |
| 2774 | match Ptr::from_mut(ptr:candidate).try_cast_into::<T, BecauseExclusive>(cast_type, meta) { |
| 2775 | Ok((candidate: Ptr<'_, T, (Exclusive, Aligned, …)>, prefix_suffix: Ptr<'_, [u8], (Exclusive, …)>)) => { |
| 2776 | // This call may panic. If that happens, it doesn't cause any soundness |
| 2777 | // issues, as we have not generated any invalid state which we need to |
| 2778 | // fix before returning. |
| 2779 | // |
| 2780 | // Note that one panic or post-monomorphization error condition is |
| 2781 | // calling `try_into_valid` (and thus `is_bit_valid`) with a shared |
| 2782 | // pointer when `Self: !Immutable`. Since `Self: Immutable`, this panic |
| 2783 | // condition will not happen. |
| 2784 | match candidate.try_into_valid() { |
| 2785 | Ok(valid: Ptr<'_, T, (Exclusive, Aligned, …)>) => Ok((valid.as_mut(), prefix_suffix.as_mut())), |
| 2786 | Err(e: ValidityError, …>) => Err(e.map_src(|src: Ptr<'_, T, (Exclusive, Aligned, …)>| src.as_bytes::<BecauseExclusive>().as_mut()).into()), |
| 2787 | } |
| 2788 | } |
| 2789 | Err(e: ConvertError, …, …>) => Err(e.map_src(Ptr::as_mut).into()), |
| 2790 | } |
| 2791 | } |
| 2792 | |
| 2793 | #[inline (always)] |
| 2794 | fn swap<T, U>((t: T, u: U): (T, U)) -> (U, T) { |
| 2795 | (u, t) |
| 2796 | } |
| 2797 | |
| 2798 | /// # Safety |
| 2799 | /// |
| 2800 | /// All bytes of `candidate` must be initialized. |
| 2801 | #[inline (always)] |
| 2802 | unsafe fn try_read_from<S, T: TryFromBytes>( |
| 2803 | source: S, |
| 2804 | mut candidate: CoreMaybeUninit<T>, |
| 2805 | ) -> Result<T, TryReadError<S, T>> { |
| 2806 | // We use `from_mut` despite not mutating via `c_ptr` so that we don't need |
| 2807 | // to add a `T: Immutable` bound. |
| 2808 | let c_ptr: Ptr<'_, MaybeUninit, (…)> = Ptr::from_mut(&mut candidate); |
| 2809 | let c_ptr: Ptr<'_, T, (Exclusive, Aligned, …)> = c_ptr.transparent_wrapper_into_inner(); |
| 2810 | // SAFETY: `c_ptr` has no uninitialized sub-ranges because it derived from |
| 2811 | // `candidate`, which the caller promises is entirely initialized. |
| 2812 | let c_ptr: Ptr<'_, T, (Exclusive, Aligned, …)> = unsafe { c_ptr.assume_validity::<invariant::Initialized>() }; |
| 2813 | |
| 2814 | // This call may panic. If that happens, it doesn't cause any soundness |
| 2815 | // issues, as we have not generated any invalid state which we need to |
| 2816 | // fix before returning. |
| 2817 | // |
| 2818 | // Note that one panic or post-monomorphization error condition is |
| 2819 | // calling `try_into_valid` (and thus `is_bit_valid`) with a shared |
| 2820 | // pointer when `Self: !Immutable`. Since `Self: Immutable`, this panic |
| 2821 | // condition will not happen. |
| 2822 | if !T::is_bit_valid(candidate:c_ptr.forget_aligned()) { |
| 2823 | return Err(ValidityError::new(src:source).into()); |
| 2824 | } |
| 2825 | |
| 2826 | // SAFETY: We just validated that `candidate` contains a valid `T`. |
| 2827 | Ok(unsafe { candidate.assume_init() }) |
| 2828 | } |
| 2829 | |
| 2830 | /// Types for which a sequence of bytes all set to zero represents a valid |
| 2831 | /// instance of the type. |
| 2832 | /// |
| 2833 | /// Any memory region of the appropriate length which is guaranteed to contain |
| 2834 | /// only zero bytes can be viewed as any `FromZeros` type with no runtime |
| 2835 | /// overhead. This is useful whenever memory is known to be in a zeroed state, |
| 2836 | /// such memory returned from some allocation routines. |
| 2837 | /// |
| 2838 | /// # Warning: Padding bytes |
| 2839 | /// |
| 2840 | /// Note that, when a value is moved or copied, only the non-padding bytes of |
| 2841 | /// that value are guaranteed to be preserved. It is unsound to assume that |
| 2842 | /// values written to padding bytes are preserved after a move or copy. For more |
| 2843 | /// details, see the [`FromBytes` docs][frombytes-warning-padding-bytes]. |
| 2844 | /// |
| 2845 | /// [frombytes-warning-padding-bytes]: FromBytes#warning-padding-bytes |
| 2846 | /// |
| 2847 | /// # Implementation |
| 2848 | /// |
| 2849 | /// **Do not implement this trait yourself!** Instead, use |
| 2850 | /// [`#[derive(FromZeros)]`][derive]; e.g.: |
| 2851 | /// |
| 2852 | /// ``` |
| 2853 | /// # use zerocopy_derive::{FromZeros, Immutable}; |
| 2854 | /// #[derive(FromZeros)] |
| 2855 | /// struct MyStruct { |
| 2856 | /// # /* |
| 2857 | /// ... |
| 2858 | /// # */ |
| 2859 | /// } |
| 2860 | /// |
| 2861 | /// #[derive(FromZeros)] |
| 2862 | /// #[repr(u8)] |
| 2863 | /// enum MyEnum { |
| 2864 | /// # Variant0, |
| 2865 | /// # /* |
| 2866 | /// ... |
| 2867 | /// # */ |
| 2868 | /// } |
| 2869 | /// |
| 2870 | /// #[derive(FromZeros, Immutable)] |
| 2871 | /// union MyUnion { |
| 2872 | /// # variant: u8, |
| 2873 | /// # /* |
| 2874 | /// ... |
| 2875 | /// # */ |
| 2876 | /// } |
| 2877 | /// ``` |
| 2878 | /// |
| 2879 | /// This derive performs a sophisticated, compile-time safety analysis to |
| 2880 | /// determine whether a type is `FromZeros`. |
| 2881 | /// |
| 2882 | /// # Safety |
| 2883 | /// |
| 2884 | /// *This section describes what is required in order for `T: FromZeros`, and |
| 2885 | /// what unsafe code may assume of such types. If you don't plan on implementing |
| 2886 | /// `FromZeros` manually, and you don't plan on writing unsafe code that |
| 2887 | /// operates on `FromZeros` types, then you don't need to read this section.* |
| 2888 | /// |
| 2889 | /// If `T: FromZeros`, then unsafe code may assume that it is sound to produce a |
| 2890 | /// `T` whose bytes are all initialized to zero. If a type is marked as |
| 2891 | /// `FromZeros` which violates this contract, it may cause undefined behavior. |
| 2892 | /// |
| 2893 | /// `#[derive(FromZeros)]` only permits [types which satisfy these |
| 2894 | /// requirements][derive-analysis]. |
| 2895 | /// |
| 2896 | #[cfg_attr ( |
| 2897 | feature = "derive" , |
| 2898 | doc = "[derive]: zerocopy_derive::FromZeros" , |
| 2899 | doc = "[derive-analysis]: zerocopy_derive::FromZeros#analysis" |
| 2900 | )] |
| 2901 | #[cfg_attr ( |
| 2902 | not(feature = "derive" ), |
| 2903 | doc = concat!("[derive]: https://docs.rs/zerocopy/" , env!("CARGO_PKG_VERSION" ), "/zerocopy/derive.FromZeros.html" ), |
| 2904 | doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/" , env!("CARGO_PKG_VERSION" ), "/zerocopy/derive.FromZeros.html#analysis" ), |
| 2905 | )] |
| 2906 | #[cfg_attr ( |
| 2907 | zerocopy_diagnostic_on_unimplemented_1_78_0, |
| 2908 | diagnostic::on_unimplemented(note = "Consider adding `#[derive(FromZeros)]` to `{Self}`" ) |
| 2909 | )] |
| 2910 | pub unsafe trait FromZeros: TryFromBytes { |
| 2911 | // The `Self: Sized` bound makes it so that `FromZeros` is still object |
| 2912 | // safe. |
| 2913 | #[doc (hidden)] |
| 2914 | fn only_derive_is_allowed_to_implement_this_trait() |
| 2915 | where |
| 2916 | Self: Sized; |
| 2917 | |
| 2918 | /// Overwrites `self` with zeros. |
| 2919 | /// |
| 2920 | /// Sets every byte in `self` to 0. While this is similar to doing `*self = |
| 2921 | /// Self::new_zeroed()`, it differs in that `zero` does not semantically |
| 2922 | /// drop the current value and replace it with a new one — it simply |
| 2923 | /// modifies the bytes of the existing value. |
| 2924 | /// |
| 2925 | /// # Examples |
| 2926 | /// |
| 2927 | /// ``` |
| 2928 | /// # use zerocopy::FromZeros; |
| 2929 | /// # use zerocopy_derive::*; |
| 2930 | /// # |
| 2931 | /// #[derive(FromZeros)] |
| 2932 | /// #[repr(C)] |
| 2933 | /// struct PacketHeader { |
| 2934 | /// src_port: [u8; 2], |
| 2935 | /// dst_port: [u8; 2], |
| 2936 | /// length: [u8; 2], |
| 2937 | /// checksum: [u8; 2], |
| 2938 | /// } |
| 2939 | /// |
| 2940 | /// let mut header = PacketHeader { |
| 2941 | /// src_port: 100u16.to_be_bytes(), |
| 2942 | /// dst_port: 200u16.to_be_bytes(), |
| 2943 | /// length: 300u16.to_be_bytes(), |
| 2944 | /// checksum: 400u16.to_be_bytes(), |
| 2945 | /// }; |
| 2946 | /// |
| 2947 | /// header.zero(); |
| 2948 | /// |
| 2949 | /// assert_eq!(header.src_port, [0, 0]); |
| 2950 | /// assert_eq!(header.dst_port, [0, 0]); |
| 2951 | /// assert_eq!(header.length, [0, 0]); |
| 2952 | /// assert_eq!(header.checksum, [0, 0]); |
| 2953 | /// ``` |
| 2954 | #[inline (always)] |
| 2955 | fn zero(&mut self) { |
| 2956 | let slf: *mut Self = self; |
| 2957 | let len = mem::size_of_val(self); |
| 2958 | // SAFETY: |
| 2959 | // - `self` is guaranteed by the type system to be valid for writes of |
| 2960 | // size `size_of_val(self)`. |
| 2961 | // - `u8`'s alignment is 1, and thus `self` is guaranteed to be aligned |
| 2962 | // as required by `u8`. |
| 2963 | // - Since `Self: FromZeros`, the all-zeros instance is a valid instance |
| 2964 | // of `Self.` |
| 2965 | // |
| 2966 | // TODO(#429): Add references to docs and quotes. |
| 2967 | unsafe { ptr::write_bytes(slf.cast::<u8>(), 0, len) }; |
| 2968 | } |
| 2969 | |
| 2970 | /// Creates an instance of `Self` from zeroed bytes. |
| 2971 | /// |
| 2972 | /// # Examples |
| 2973 | /// |
| 2974 | /// ``` |
| 2975 | /// # use zerocopy::FromZeros; |
| 2976 | /// # use zerocopy_derive::*; |
| 2977 | /// # |
| 2978 | /// #[derive(FromZeros)] |
| 2979 | /// #[repr(C)] |
| 2980 | /// struct PacketHeader { |
| 2981 | /// src_port: [u8; 2], |
| 2982 | /// dst_port: [u8; 2], |
| 2983 | /// length: [u8; 2], |
| 2984 | /// checksum: [u8; 2], |
| 2985 | /// } |
| 2986 | /// |
| 2987 | /// let header: PacketHeader = FromZeros::new_zeroed(); |
| 2988 | /// |
| 2989 | /// assert_eq!(header.src_port, [0, 0]); |
| 2990 | /// assert_eq!(header.dst_port, [0, 0]); |
| 2991 | /// assert_eq!(header.length, [0, 0]); |
| 2992 | /// assert_eq!(header.checksum, [0, 0]); |
| 2993 | /// ``` |
| 2994 | #[must_use = "has no side effects" ] |
| 2995 | #[inline (always)] |
| 2996 | fn new_zeroed() -> Self |
| 2997 | where |
| 2998 | Self: Sized, |
| 2999 | { |
| 3000 | // SAFETY: `FromZeros` says that the all-zeros bit pattern is legal. |
| 3001 | unsafe { mem::zeroed() } |
| 3002 | } |
| 3003 | |
| 3004 | /// Creates a `Box<Self>` from zeroed bytes. |
| 3005 | /// |
| 3006 | /// This function is useful for allocating large values on the heap and |
| 3007 | /// zero-initializing them, without ever creating a temporary instance of |
| 3008 | /// `Self` on the stack. For example, `<[u8; 1048576]>::new_box_zeroed()` |
| 3009 | /// will allocate `[u8; 1048576]` directly on the heap; it does not require |
| 3010 | /// storing `[u8; 1048576]` in a temporary variable on the stack. |
| 3011 | /// |
| 3012 | /// On systems that use a heap implementation that supports allocating from |
| 3013 | /// pre-zeroed memory, using `new_box_zeroed` (or related functions) may |
| 3014 | /// have performance benefits. |
| 3015 | /// |
| 3016 | /// # Errors |
| 3017 | /// |
| 3018 | /// Returns an error on allocation failure. Allocation failure is guaranteed |
| 3019 | /// never to cause a panic or an abort. |
| 3020 | #[must_use = "has no side effects (other than allocation)" ] |
| 3021 | #[cfg (any(feature = "alloc" , test))] |
| 3022 | #[cfg_attr (doc_cfg, doc(cfg(feature = "alloc" )))] |
| 3023 | #[inline ] |
| 3024 | fn new_box_zeroed() -> Result<Box<Self>, AllocError> |
| 3025 | where |
| 3026 | Self: Sized, |
| 3027 | { |
| 3028 | // If `T` is a ZST, then return a proper boxed instance of it. There is |
| 3029 | // no allocation, but `Box` does require a correct dangling pointer. |
| 3030 | let layout = Layout::new::<Self>(); |
| 3031 | if layout.size() == 0 { |
| 3032 | // Construct the `Box` from a dangling pointer to avoid calling |
| 3033 | // `Self::new_zeroed`. This ensures that stack space is never |
| 3034 | // allocated for `Self` even on lower opt-levels where this branch |
| 3035 | // might not get optimized out. |
| 3036 | |
| 3037 | // SAFETY: Per [1], when `T` is a ZST, `Box<T>`'s only validity |
| 3038 | // requirements are that the pointer is non-null and sufficiently |
| 3039 | // aligned. Per [2], `NonNull::dangling` produces a pointer which |
| 3040 | // is sufficiently aligned. Since the produced pointer is a |
| 3041 | // `NonNull`, it is non-null. |
| 3042 | // |
| 3043 | // [1] Per https://doc.rust-lang.org/nightly/std/boxed/index.html#memory-layout: |
| 3044 | // |
| 3045 | // For zero-sized values, the `Box` pointer has to be non-null and sufficiently aligned. |
| 3046 | // |
| 3047 | // [2] Per https://doc.rust-lang.org/std/ptr/struct.NonNull.html#method.dangling: |
| 3048 | // |
| 3049 | // Creates a new `NonNull` that is dangling, but well-aligned. |
| 3050 | return Ok(unsafe { Box::from_raw(NonNull::dangling().as_ptr()) }); |
| 3051 | } |
| 3052 | |
| 3053 | // TODO(#429): Add a "SAFETY" comment and remove this `allow`. |
| 3054 | #[allow (clippy::undocumented_unsafe_blocks)] |
| 3055 | let ptr = unsafe { alloc::alloc::alloc_zeroed(layout).cast::<Self>() }; |
| 3056 | if ptr.is_null() { |
| 3057 | return Err(AllocError); |
| 3058 | } |
| 3059 | // TODO(#429): Add a "SAFETY" comment and remove this `allow`. |
| 3060 | #[allow (clippy::undocumented_unsafe_blocks)] |
| 3061 | Ok(unsafe { Box::from_raw(ptr) }) |
| 3062 | } |
| 3063 | |
| 3064 | /// Creates a `Box<[Self]>` (a boxed slice) from zeroed bytes. |
| 3065 | /// |
| 3066 | /// This function is useful for allocating large values of `[Self]` on the |
| 3067 | /// heap and zero-initializing them, without ever creating a temporary |
| 3068 | /// instance of `[Self; _]` on the stack. For example, |
| 3069 | /// `u8::new_box_slice_zeroed(1048576)` will allocate the slice directly on |
| 3070 | /// the heap; it does not require storing the slice on the stack. |
| 3071 | /// |
| 3072 | /// On systems that use a heap implementation that supports allocating from |
| 3073 | /// pre-zeroed memory, using `new_box_slice_zeroed` may have performance |
| 3074 | /// benefits. |
| 3075 | /// |
| 3076 | /// If `Self` is a zero-sized type, then this function will return a |
| 3077 | /// `Box<[Self]>` that has the correct `len`. Such a box cannot contain any |
| 3078 | /// actual information, but its `len()` property will report the correct |
| 3079 | /// value. |
| 3080 | /// |
| 3081 | /// # Errors |
| 3082 | /// |
| 3083 | /// Returns an error on allocation failure. Allocation failure is |
| 3084 | /// guaranteed never to cause a panic or an abort. |
| 3085 | #[must_use = "has no side effects (other than allocation)" ] |
| 3086 | #[cfg (feature = "alloc" )] |
| 3087 | #[cfg_attr (doc_cfg, doc(cfg(feature = "alloc" )))] |
| 3088 | #[inline ] |
| 3089 | fn new_box_zeroed_with_elems(count: usize) -> Result<Box<Self>, AllocError> |
| 3090 | where |
| 3091 | Self: KnownLayout<PointerMetadata = usize>, |
| 3092 | { |
| 3093 | // SAFETY: `alloc::alloc::alloc_zeroed` is a valid argument of |
| 3094 | // `new_box`. The referent of the pointer returned by `alloc_zeroed` |
| 3095 | // (and, consequently, the `Box` derived from it) is a valid instance of |
| 3096 | // `Self`, because `Self` is `FromZeros`. |
| 3097 | unsafe { crate::util::new_box(count, alloc::alloc::alloc_zeroed) } |
| 3098 | } |
| 3099 | |
| 3100 | #[deprecated (since = "0.8.0" , note = "renamed to `FromZeros::new_box_zeroed_with_elems`" )] |
| 3101 | #[doc (hidden)] |
| 3102 | #[cfg (feature = "alloc" )] |
| 3103 | #[cfg_attr (doc_cfg, doc(cfg(feature = "alloc" )))] |
| 3104 | #[must_use = "has no side effects (other than allocation)" ] |
| 3105 | #[inline (always)] |
| 3106 | fn new_box_slice_zeroed(len: usize) -> Result<Box<[Self]>, AllocError> |
| 3107 | where |
| 3108 | Self: Sized, |
| 3109 | { |
| 3110 | <[Self]>::new_box_zeroed_with_elems(len) |
| 3111 | } |
| 3112 | |
| 3113 | /// Creates a `Vec<Self>` from zeroed bytes. |
| 3114 | /// |
| 3115 | /// This function is useful for allocating large values of `Vec`s and |
| 3116 | /// zero-initializing them, without ever creating a temporary instance of |
| 3117 | /// `[Self; _]` (or many temporary instances of `Self`) on the stack. For |
| 3118 | /// example, `u8::new_vec_zeroed(1048576)` will allocate directly on the |
| 3119 | /// heap; it does not require storing intermediate values on the stack. |
| 3120 | /// |
| 3121 | /// On systems that use a heap implementation that supports allocating from |
| 3122 | /// pre-zeroed memory, using `new_vec_zeroed` may have performance benefits. |
| 3123 | /// |
| 3124 | /// If `Self` is a zero-sized type, then this function will return a |
| 3125 | /// `Vec<Self>` that has the correct `len`. Such a `Vec` cannot contain any |
| 3126 | /// actual information, but its `len()` property will report the correct |
| 3127 | /// value. |
| 3128 | /// |
| 3129 | /// # Errors |
| 3130 | /// |
| 3131 | /// Returns an error on allocation failure. Allocation failure is |
| 3132 | /// guaranteed never to cause a panic or an abort. |
| 3133 | #[must_use = "has no side effects (other than allocation)" ] |
| 3134 | #[cfg (feature = "alloc" )] |
| 3135 | #[cfg_attr (doc_cfg, doc(cfg(feature = "alloc" )))] |
| 3136 | #[inline (always)] |
| 3137 | fn new_vec_zeroed(len: usize) -> Result<Vec<Self>, AllocError> |
| 3138 | where |
| 3139 | Self: Sized, |
| 3140 | { |
| 3141 | <[Self]>::new_box_zeroed_with_elems(len).map(Into::into) |
| 3142 | } |
| 3143 | |
| 3144 | /// Extends a `Vec<Self>` by pushing `additional` new items onto the end of |
| 3145 | /// the vector. The new items are initialized with zeros. |
| 3146 | #[cfg (zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)] |
| 3147 | #[cfg (feature = "alloc" )] |
| 3148 | #[cfg_attr (doc_cfg, doc(cfg(all(rust = "1.57.0" , feature = "alloc" ))))] |
| 3149 | #[inline (always)] |
| 3150 | fn extend_vec_zeroed(v: &mut Vec<Self>, additional: usize) -> Result<(), AllocError> |
| 3151 | where |
| 3152 | Self: Sized, |
| 3153 | { |
| 3154 | // PANICS: We pass `v.len()` for `position`, so the `position > v.len()` |
| 3155 | // panic condition is not satisfied. |
| 3156 | <Self as FromZeros>::insert_vec_zeroed(v, v.len(), additional) |
| 3157 | } |
| 3158 | |
| 3159 | /// Inserts `additional` new items into `Vec<Self>` at `position`. The new |
| 3160 | /// items are initialized with zeros. |
| 3161 | /// |
| 3162 | /// # Panics |
| 3163 | /// |
| 3164 | /// Panics if `position > v.len()`. |
| 3165 | #[cfg (zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)] |
| 3166 | #[cfg (feature = "alloc" )] |
| 3167 | #[cfg_attr (doc_cfg, doc(cfg(all(rust = "1.57.0" , feature = "alloc" ))))] |
| 3168 | #[inline ] |
| 3169 | fn insert_vec_zeroed( |
| 3170 | v: &mut Vec<Self>, |
| 3171 | position: usize, |
| 3172 | additional: usize, |
| 3173 | ) -> Result<(), AllocError> |
| 3174 | where |
| 3175 | Self: Sized, |
| 3176 | { |
| 3177 | assert!(position <= v.len()); |
| 3178 | // We only conditionally compile on versions on which `try_reserve` is |
| 3179 | // stable; the Clippy lint is a false positive. |
| 3180 | #[allow (clippy::incompatible_msrv)] |
| 3181 | v.try_reserve(additional).map_err(|_| AllocError)?; |
| 3182 | // SAFETY: The `try_reserve` call guarantees that these cannot overflow: |
| 3183 | // * `ptr.add(position)` |
| 3184 | // * `position + additional` |
| 3185 | // * `v.len() + additional` |
| 3186 | // |
| 3187 | // `v.len() - position` cannot overflow because we asserted that |
| 3188 | // `position <= v.len()`. |
| 3189 | unsafe { |
| 3190 | // This is a potentially overlapping copy. |
| 3191 | let ptr = v.as_mut_ptr(); |
| 3192 | #[allow (clippy::arithmetic_side_effects)] |
| 3193 | ptr.add(position).copy_to(ptr.add(position + additional), v.len() - position); |
| 3194 | ptr.add(position).write_bytes(0, additional); |
| 3195 | #[allow (clippy::arithmetic_side_effects)] |
| 3196 | v.set_len(v.len() + additional); |
| 3197 | } |
| 3198 | |
| 3199 | Ok(()) |
| 3200 | } |
| 3201 | } |
| 3202 | |
| 3203 | /// Analyzes whether a type is [`FromBytes`]. |
| 3204 | /// |
| 3205 | /// This derive analyzes, at compile time, whether the annotated type satisfies |
| 3206 | /// the [safety conditions] of `FromBytes` and implements `FromBytes` and its |
| 3207 | /// supertraits if it is sound to do so. This derive can be applied to structs, |
| 3208 | /// enums, and unions; |
| 3209 | /// e.g.: |
| 3210 | /// |
| 3211 | /// ``` |
| 3212 | /// # use zerocopy_derive::{FromBytes, FromZeros, Immutable}; |
| 3213 | /// #[derive(FromBytes)] |
| 3214 | /// struct MyStruct { |
| 3215 | /// # /* |
| 3216 | /// ... |
| 3217 | /// # */ |
| 3218 | /// } |
| 3219 | /// |
| 3220 | /// #[derive(FromBytes)] |
| 3221 | /// #[repr(u8)] |
| 3222 | /// enum MyEnum { |
| 3223 | /// # V00, V01, V02, V03, V04, V05, V06, V07, V08, V09, V0A, V0B, V0C, V0D, V0E, |
| 3224 | /// # V0F, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V1A, V1B, V1C, V1D, |
| 3225 | /// # V1E, V1F, V20, V21, V22, V23, V24, V25, V26, V27, V28, V29, V2A, V2B, V2C, |
| 3226 | /// # V2D, V2E, V2F, V30, V31, V32, V33, V34, V35, V36, V37, V38, V39, V3A, V3B, |
| 3227 | /// # V3C, V3D, V3E, V3F, V40, V41, V42, V43, V44, V45, V46, V47, V48, V49, V4A, |
| 3228 | /// # V4B, V4C, V4D, V4E, V4F, V50, V51, V52, V53, V54, V55, V56, V57, V58, V59, |
| 3229 | /// # V5A, V5B, V5C, V5D, V5E, V5F, V60, V61, V62, V63, V64, V65, V66, V67, V68, |
| 3230 | /// # V69, V6A, V6B, V6C, V6D, V6E, V6F, V70, V71, V72, V73, V74, V75, V76, V77, |
| 3231 | /// # V78, V79, V7A, V7B, V7C, V7D, V7E, V7F, V80, V81, V82, V83, V84, V85, V86, |
| 3232 | /// # V87, V88, V89, V8A, V8B, V8C, V8D, V8E, V8F, V90, V91, V92, V93, V94, V95, |
| 3233 | /// # V96, V97, V98, V99, V9A, V9B, V9C, V9D, V9E, V9F, VA0, VA1, VA2, VA3, VA4, |
| 3234 | /// # VA5, VA6, VA7, VA8, VA9, VAA, VAB, VAC, VAD, VAE, VAF, VB0, VB1, VB2, VB3, |
| 3235 | /// # VB4, VB5, VB6, VB7, VB8, VB9, VBA, VBB, VBC, VBD, VBE, VBF, VC0, VC1, VC2, |
| 3236 | /// # VC3, VC4, VC5, VC6, VC7, VC8, VC9, VCA, VCB, VCC, VCD, VCE, VCF, VD0, VD1, |
| 3237 | /// # VD2, VD3, VD4, VD5, VD6, VD7, VD8, VD9, VDA, VDB, VDC, VDD, VDE, VDF, VE0, |
| 3238 | /// # VE1, VE2, VE3, VE4, VE5, VE6, VE7, VE8, VE9, VEA, VEB, VEC, VED, VEE, VEF, |
| 3239 | /// # VF0, VF1, VF2, VF3, VF4, VF5, VF6, VF7, VF8, VF9, VFA, VFB, VFC, VFD, VFE, |
| 3240 | /// # VFF, |
| 3241 | /// # /* |
| 3242 | /// ... |
| 3243 | /// # */ |
| 3244 | /// } |
| 3245 | /// |
| 3246 | /// #[derive(FromBytes, Immutable)] |
| 3247 | /// union MyUnion { |
| 3248 | /// # variant: u8, |
| 3249 | /// # /* |
| 3250 | /// ... |
| 3251 | /// # */ |
| 3252 | /// } |
| 3253 | /// ``` |
| 3254 | /// |
| 3255 | /// [safety conditions]: trait@FromBytes#safety |
| 3256 | /// |
| 3257 | /// # Analysis |
| 3258 | /// |
| 3259 | /// *This section describes, roughly, the analysis performed by this derive to |
| 3260 | /// determine whether it is sound to implement `FromBytes` for a given type. |
| 3261 | /// Unless you are modifying the implementation of this derive, or attempting to |
| 3262 | /// manually implement `FromBytes` for a type yourself, you don't need to read |
| 3263 | /// this section.* |
| 3264 | /// |
| 3265 | /// If a type has the following properties, then this derive can implement |
| 3266 | /// `FromBytes` for that type: |
| 3267 | /// |
| 3268 | /// - If the type is a struct, all of its fields must be `FromBytes`. |
| 3269 | /// - If the type is an enum: |
| 3270 | /// - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`, |
| 3271 | /// `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`). |
| 3272 | /// - The maximum number of discriminants must be used (so that every possible |
| 3273 | /// bit pattern is a valid one). Be very careful when using the `C`, |
| 3274 | /// `usize`, or `isize` representations, as their size is |
| 3275 | /// platform-dependent. |
| 3276 | /// - Its fields must be `FromBytes`. |
| 3277 | /// |
| 3278 | /// This analysis is subject to change. Unsafe code may *only* rely on the |
| 3279 | /// documented [safety conditions] of `FromBytes`, and must *not* rely on the |
| 3280 | /// implementation details of this derive. |
| 3281 | /// |
| 3282 | /// ## Why isn't an explicit representation required for structs? |
| 3283 | /// |
| 3284 | /// Neither this derive, nor the [safety conditions] of `FromBytes`, requires |
| 3285 | /// that structs are marked with `#[repr(C)]`. |
| 3286 | /// |
| 3287 | /// Per the [Rust reference](reference), |
| 3288 | /// |
| 3289 | /// > The representation of a type can change the padding between fields, but |
| 3290 | /// > does not change the layout of the fields themselves. |
| 3291 | /// |
| 3292 | /// [reference]: https://doc.rust-lang.org/reference/type-layout.html#representations |
| 3293 | /// |
| 3294 | /// Since the layout of structs only consists of padding bytes and field bytes, |
| 3295 | /// a struct is soundly `FromBytes` if: |
| 3296 | /// 1. its padding is soundly `FromBytes`, and |
| 3297 | /// 2. its fields are soundly `FromBytes`. |
| 3298 | /// |
| 3299 | /// The answer to the first question is always yes: padding bytes do not have |
| 3300 | /// any validity constraints. A [discussion] of this question in the Unsafe Code |
| 3301 | /// Guidelines Working Group concluded that it would be virtually unimaginable |
| 3302 | /// for future versions of rustc to add validity constraints to padding bytes. |
| 3303 | /// |
| 3304 | /// [discussion]: https://github.com/rust-lang/unsafe-code-guidelines/issues/174 |
| 3305 | /// |
| 3306 | /// Whether a struct is soundly `FromBytes` therefore solely depends on whether |
| 3307 | /// its fields are `FromBytes`. |
| 3308 | // TODO(#146): Document why we don't require an enum to have an explicit `repr` |
| 3309 | // attribute. |
| 3310 | #[cfg (any(feature = "derive" , test))] |
| 3311 | #[cfg_attr (doc_cfg, doc(cfg(feature = "derive" )))] |
| 3312 | pub use zerocopy_derive::FromBytes; |
| 3313 | |
| 3314 | /// Types for which any bit pattern is valid. |
| 3315 | /// |
| 3316 | /// Any memory region of the appropriate length which contains initialized bytes |
| 3317 | /// can be viewed as any `FromBytes` type with no runtime overhead. This is |
| 3318 | /// useful for efficiently parsing bytes as structured data. |
| 3319 | /// |
| 3320 | /// # Warning: Padding bytes |
| 3321 | /// |
| 3322 | /// Note that, when a value is moved or copied, only the non-padding bytes of |
| 3323 | /// that value are guaranteed to be preserved. It is unsound to assume that |
| 3324 | /// values written to padding bytes are preserved after a move or copy. For |
| 3325 | /// example, the following is unsound: |
| 3326 | /// |
| 3327 | /// ```rust,no_run |
| 3328 | /// use core::mem::{size_of, transmute}; |
| 3329 | /// use zerocopy::FromZeros; |
| 3330 | /// # use zerocopy_derive::*; |
| 3331 | /// |
| 3332 | /// // Assume `Foo` is a type with padding bytes. |
| 3333 | /// #[derive(FromZeros, Default)] |
| 3334 | /// struct Foo { |
| 3335 | /// # /* |
| 3336 | /// ... |
| 3337 | /// # */ |
| 3338 | /// } |
| 3339 | /// |
| 3340 | /// let mut foo: Foo = Foo::default(); |
| 3341 | /// FromZeros::zero(&mut foo); |
| 3342 | /// // UNSOUND: Although `FromZeros::zero` writes zeros to all bytes of `foo`, |
| 3343 | /// // those writes are not guaranteed to be preserved in padding bytes when |
| 3344 | /// // `foo` is moved, so this may expose padding bytes as `u8`s. |
| 3345 | /// let foo_bytes: [u8; size_of::<Foo>()] = unsafe { transmute(foo) }; |
| 3346 | /// ``` |
| 3347 | /// |
| 3348 | /// # Implementation |
| 3349 | /// |
| 3350 | /// **Do not implement this trait yourself!** Instead, use |
| 3351 | /// [`#[derive(FromBytes)]`][derive]; e.g.: |
| 3352 | /// |
| 3353 | /// ``` |
| 3354 | /// # use zerocopy_derive::{FromBytes, Immutable}; |
| 3355 | /// #[derive(FromBytes)] |
| 3356 | /// struct MyStruct { |
| 3357 | /// # /* |
| 3358 | /// ... |
| 3359 | /// # */ |
| 3360 | /// } |
| 3361 | /// |
| 3362 | /// #[derive(FromBytes)] |
| 3363 | /// #[repr(u8)] |
| 3364 | /// enum MyEnum { |
| 3365 | /// # V00, V01, V02, V03, V04, V05, V06, V07, V08, V09, V0A, V0B, V0C, V0D, V0E, |
| 3366 | /// # V0F, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V1A, V1B, V1C, V1D, |
| 3367 | /// # V1E, V1F, V20, V21, V22, V23, V24, V25, V26, V27, V28, V29, V2A, V2B, V2C, |
| 3368 | /// # V2D, V2E, V2F, V30, V31, V32, V33, V34, V35, V36, V37, V38, V39, V3A, V3B, |
| 3369 | /// # V3C, V3D, V3E, V3F, V40, V41, V42, V43, V44, V45, V46, V47, V48, V49, V4A, |
| 3370 | /// # V4B, V4C, V4D, V4E, V4F, V50, V51, V52, V53, V54, V55, V56, V57, V58, V59, |
| 3371 | /// # V5A, V5B, V5C, V5D, V5E, V5F, V60, V61, V62, V63, V64, V65, V66, V67, V68, |
| 3372 | /// # V69, V6A, V6B, V6C, V6D, V6E, V6F, V70, V71, V72, V73, V74, V75, V76, V77, |
| 3373 | /// # V78, V79, V7A, V7B, V7C, V7D, V7E, V7F, V80, V81, V82, V83, V84, V85, V86, |
| 3374 | /// # V87, V88, V89, V8A, V8B, V8C, V8D, V8E, V8F, V90, V91, V92, V93, V94, V95, |
| 3375 | /// # V96, V97, V98, V99, V9A, V9B, V9C, V9D, V9E, V9F, VA0, VA1, VA2, VA3, VA4, |
| 3376 | /// # VA5, VA6, VA7, VA8, VA9, VAA, VAB, VAC, VAD, VAE, VAF, VB0, VB1, VB2, VB3, |
| 3377 | /// # VB4, VB5, VB6, VB7, VB8, VB9, VBA, VBB, VBC, VBD, VBE, VBF, VC0, VC1, VC2, |
| 3378 | /// # VC3, VC4, VC5, VC6, VC7, VC8, VC9, VCA, VCB, VCC, VCD, VCE, VCF, VD0, VD1, |
| 3379 | /// # VD2, VD3, VD4, VD5, VD6, VD7, VD8, VD9, VDA, VDB, VDC, VDD, VDE, VDF, VE0, |
| 3380 | /// # VE1, VE2, VE3, VE4, VE5, VE6, VE7, VE8, VE9, VEA, VEB, VEC, VED, VEE, VEF, |
| 3381 | /// # VF0, VF1, VF2, VF3, VF4, VF5, VF6, VF7, VF8, VF9, VFA, VFB, VFC, VFD, VFE, |
| 3382 | /// # VFF, |
| 3383 | /// # /* |
| 3384 | /// ... |
| 3385 | /// # */ |
| 3386 | /// } |
| 3387 | /// |
| 3388 | /// #[derive(FromBytes, Immutable)] |
| 3389 | /// union MyUnion { |
| 3390 | /// # variant: u8, |
| 3391 | /// # /* |
| 3392 | /// ... |
| 3393 | /// # */ |
| 3394 | /// } |
| 3395 | /// ``` |
| 3396 | /// |
| 3397 | /// This derive performs a sophisticated, compile-time safety analysis to |
| 3398 | /// determine whether a type is `FromBytes`. |
| 3399 | /// |
| 3400 | /// # Safety |
| 3401 | /// |
| 3402 | /// *This section describes what is required in order for `T: FromBytes`, and |
| 3403 | /// what unsafe code may assume of such types. If you don't plan on implementing |
| 3404 | /// `FromBytes` manually, and you don't plan on writing unsafe code that |
| 3405 | /// operates on `FromBytes` types, then you don't need to read this section.* |
| 3406 | /// |
| 3407 | /// If `T: FromBytes`, then unsafe code may assume that it is sound to produce a |
| 3408 | /// `T` whose bytes are initialized to any sequence of valid `u8`s (in other |
| 3409 | /// words, any byte value which is not uninitialized). If a type is marked as |
| 3410 | /// `FromBytes` which violates this contract, it may cause undefined behavior. |
| 3411 | /// |
| 3412 | /// `#[derive(FromBytes)]` only permits [types which satisfy these |
| 3413 | /// requirements][derive-analysis]. |
| 3414 | /// |
| 3415 | #[cfg_attr ( |
| 3416 | feature = "derive" , |
| 3417 | doc = "[derive]: zerocopy_derive::FromBytes" , |
| 3418 | doc = "[derive-analysis]: zerocopy_derive::FromBytes#analysis" |
| 3419 | )] |
| 3420 | #[cfg_attr ( |
| 3421 | not(feature = "derive" ), |
| 3422 | doc = concat!("[derive]: https://docs.rs/zerocopy/" , env!("CARGO_PKG_VERSION" ), "/zerocopy/derive.FromBytes.html" ), |
| 3423 | doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/" , env!("CARGO_PKG_VERSION" ), "/zerocopy/derive.FromBytes.html#analysis" ), |
| 3424 | )] |
| 3425 | #[cfg_attr ( |
| 3426 | zerocopy_diagnostic_on_unimplemented_1_78_0, |
| 3427 | diagnostic::on_unimplemented(note = "Consider adding `#[derive(FromBytes)]` to `{Self}`" ) |
| 3428 | )] |
| 3429 | pub unsafe trait FromBytes: FromZeros { |
| 3430 | // The `Self: Sized` bound makes it so that `FromBytes` is still object |
| 3431 | // safe. |
| 3432 | #[doc (hidden)] |
| 3433 | fn only_derive_is_allowed_to_implement_this_trait() |
| 3434 | where |
| 3435 | Self: Sized; |
| 3436 | |
| 3437 | /// Interprets the given `source` as a `&Self`. |
| 3438 | /// |
| 3439 | /// This method attempts to return a reference to `source` interpreted as a |
| 3440 | /// `Self`. If the length of `source` is not a [valid size of |
| 3441 | /// `Self`][valid-size], or if `source` is not appropriately aligned, this |
| 3442 | /// returns `Err`. If [`Self: Unaligned`][self-unaligned], you can |
| 3443 | /// [infallibly discard the alignment error][size-error-from]. |
| 3444 | /// |
| 3445 | /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst]. |
| 3446 | /// |
| 3447 | /// [valid-size]: crate::KnownLayout#what-is-a-valid-size |
| 3448 | /// [self-unaligned]: Unaligned |
| 3449 | /// [size-error-from]: error/struct.SizeError.html#method.from-1 |
| 3450 | /// [slice-dst]: KnownLayout#dynamically-sized-types |
| 3451 | /// |
| 3452 | /// # Compile-Time Assertions |
| 3453 | /// |
| 3454 | /// This method cannot yet be used on unsized types whose dynamically-sized |
| 3455 | /// component is zero-sized. Attempting to use this method on such types |
| 3456 | /// results in a compile-time assertion error; e.g.: |
| 3457 | /// |
| 3458 | /// ```compile_fail,E0080 |
| 3459 | /// use zerocopy::*; |
| 3460 | /// # use zerocopy_derive::*; |
| 3461 | /// |
| 3462 | /// #[derive(FromBytes, Immutable, KnownLayout)] |
| 3463 | /// #[repr(C)] |
| 3464 | /// struct ZSTy { |
| 3465 | /// leading_sized: u16, |
| 3466 | /// trailing_dst: [()], |
| 3467 | /// } |
| 3468 | /// |
| 3469 | /// let _ = ZSTy::ref_from_bytes(0u16.as_bytes()); // âš Compile Error! |
| 3470 | /// ``` |
| 3471 | /// |
| 3472 | /// # Examples |
| 3473 | /// |
| 3474 | /// ``` |
| 3475 | /// use zerocopy::FromBytes; |
| 3476 | /// # use zerocopy_derive::*; |
| 3477 | /// |
| 3478 | /// #[derive(FromBytes, KnownLayout, Immutable)] |
| 3479 | /// #[repr(C)] |
| 3480 | /// struct PacketHeader { |
| 3481 | /// src_port: [u8; 2], |
| 3482 | /// dst_port: [u8; 2], |
| 3483 | /// length: [u8; 2], |
| 3484 | /// checksum: [u8; 2], |
| 3485 | /// } |
| 3486 | /// |
| 3487 | /// #[derive(FromBytes, KnownLayout, Immutable)] |
| 3488 | /// #[repr(C)] |
| 3489 | /// struct Packet { |
| 3490 | /// header: PacketHeader, |
| 3491 | /// body: [u8], |
| 3492 | /// } |
| 3493 | /// |
| 3494 | /// // These bytes encode a `Packet`. |
| 3495 | /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11][..]; |
| 3496 | /// |
| 3497 | /// let packet = Packet::ref_from_bytes(bytes).unwrap(); |
| 3498 | /// |
| 3499 | /// assert_eq!(packet.header.src_port, [0, 1]); |
| 3500 | /// assert_eq!(packet.header.dst_port, [2, 3]); |
| 3501 | /// assert_eq!(packet.header.length, [4, 5]); |
| 3502 | /// assert_eq!(packet.header.checksum, [6, 7]); |
| 3503 | /// assert_eq!(packet.body, [8, 9, 10, 11]); |
| 3504 | /// ``` |
| 3505 | #[must_use = "has no side effects" ] |
| 3506 | #[inline ] |
| 3507 | fn ref_from_bytes(source: &[u8]) -> Result<&Self, CastError<&[u8], Self>> |
| 3508 | where |
| 3509 | Self: KnownLayout + Immutable, |
| 3510 | { |
| 3511 | static_assert_dst_is_not_zst!(Self); |
| 3512 | match Ptr::from_ref(source).try_cast_into_no_leftover::<_, BecauseImmutable>(None) { |
| 3513 | Ok(ptr) => Ok(ptr.bikeshed_recall_valid().as_ref()), |
| 3514 | Err(err) => Err(err.map_src(|src| src.as_ref())), |
| 3515 | } |
| 3516 | } |
| 3517 | |
| 3518 | /// Interprets the prefix of the given `source` as a `&Self` without |
| 3519 | /// copying. |
| 3520 | /// |
| 3521 | /// This method computes the [largest possible size of `Self`][valid-size] |
| 3522 | /// that can fit in the leading bytes of `source`, then attempts to return |
| 3523 | /// both a reference to those bytes interpreted as a `Self`, and a reference |
| 3524 | /// to the remaining bytes. If there are insufficient bytes, or if `source` |
| 3525 | /// is not appropriately aligned, this returns `Err`. If [`Self: |
| 3526 | /// Unaligned`][self-unaligned], you can [infallibly discard the alignment |
| 3527 | /// error][size-error-from]. |
| 3528 | /// |
| 3529 | /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst]. |
| 3530 | /// |
| 3531 | /// [valid-size]: crate::KnownLayout#what-is-a-valid-size |
| 3532 | /// [self-unaligned]: Unaligned |
| 3533 | /// [size-error-from]: error/struct.SizeError.html#method.from-1 |
| 3534 | /// [slice-dst]: KnownLayout#dynamically-sized-types |
| 3535 | /// |
| 3536 | /// # Compile-Time Assertions |
| 3537 | /// |
| 3538 | /// This method cannot yet be used on unsized types whose dynamically-sized |
| 3539 | /// component is zero-sized. See [`ref_from_prefix_with_elems`], which does |
| 3540 | /// support such types. Attempting to use this method on such types results |
| 3541 | /// in a compile-time assertion error; e.g.: |
| 3542 | /// |
| 3543 | /// ```compile_fail,E0080 |
| 3544 | /// use zerocopy::*; |
| 3545 | /// # use zerocopy_derive::*; |
| 3546 | /// |
| 3547 | /// #[derive(FromBytes, Immutable, KnownLayout)] |
| 3548 | /// #[repr(C)] |
| 3549 | /// struct ZSTy { |
| 3550 | /// leading_sized: u16, |
| 3551 | /// trailing_dst: [()], |
| 3552 | /// } |
| 3553 | /// |
| 3554 | /// let _ = ZSTy::ref_from_prefix(0u16.as_bytes()); // âš Compile Error! |
| 3555 | /// ``` |
| 3556 | /// |
| 3557 | /// [`ref_from_prefix_with_elems`]: FromBytes::ref_from_prefix_with_elems |
| 3558 | /// |
| 3559 | /// # Examples |
| 3560 | /// |
| 3561 | /// ``` |
| 3562 | /// use zerocopy::FromBytes; |
| 3563 | /// # use zerocopy_derive::*; |
| 3564 | /// |
| 3565 | /// #[derive(FromBytes, KnownLayout, Immutable)] |
| 3566 | /// #[repr(C)] |
| 3567 | /// struct PacketHeader { |
| 3568 | /// src_port: [u8; 2], |
| 3569 | /// dst_port: [u8; 2], |
| 3570 | /// length: [u8; 2], |
| 3571 | /// checksum: [u8; 2], |
| 3572 | /// } |
| 3573 | /// |
| 3574 | /// #[derive(FromBytes, KnownLayout, Immutable)] |
| 3575 | /// #[repr(C)] |
| 3576 | /// struct Packet { |
| 3577 | /// header: PacketHeader, |
| 3578 | /// body: [[u8; 2]], |
| 3579 | /// } |
| 3580 | /// |
| 3581 | /// // These are more bytes than are needed to encode a `Packet`. |
| 3582 | /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14][..]; |
| 3583 | /// |
| 3584 | /// let (packet, suffix) = Packet::ref_from_prefix(bytes).unwrap(); |
| 3585 | /// |
| 3586 | /// assert_eq!(packet.header.src_port, [0, 1]); |
| 3587 | /// assert_eq!(packet.header.dst_port, [2, 3]); |
| 3588 | /// assert_eq!(packet.header.length, [4, 5]); |
| 3589 | /// assert_eq!(packet.header.checksum, [6, 7]); |
| 3590 | /// assert_eq!(packet.body, [[8, 9], [10, 11], [12, 13]]); |
| 3591 | /// assert_eq!(suffix, &[14u8][..]); |
| 3592 | /// ``` |
| 3593 | #[must_use = "has no side effects" ] |
| 3594 | #[inline ] |
| 3595 | fn ref_from_prefix(source: &[u8]) -> Result<(&Self, &[u8]), CastError<&[u8], Self>> |
| 3596 | where |
| 3597 | Self: KnownLayout + Immutable, |
| 3598 | { |
| 3599 | static_assert_dst_is_not_zst!(Self); |
| 3600 | ref_from_prefix_suffix(source, None, CastType::Prefix) |
| 3601 | } |
| 3602 | |
| 3603 | /// Interprets the suffix of the given bytes as a `&Self`. |
| 3604 | /// |
| 3605 | /// This method computes the [largest possible size of `Self`][valid-size] |
| 3606 | /// that can fit in the trailing bytes of `source`, then attempts to return |
| 3607 | /// both a reference to those bytes interpreted as a `Self`, and a reference |
| 3608 | /// to the preceding bytes. If there are insufficient bytes, or if that |
| 3609 | /// suffix of `source` is not appropriately aligned, this returns `Err`. If |
| 3610 | /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the |
| 3611 | /// alignment error][size-error-from]. |
| 3612 | /// |
| 3613 | /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst]. |
| 3614 | /// |
| 3615 | /// [valid-size]: crate::KnownLayout#what-is-a-valid-size |
| 3616 | /// [self-unaligned]: Unaligned |
| 3617 | /// [size-error-from]: error/struct.SizeError.html#method.from-1 |
| 3618 | /// [slice-dst]: KnownLayout#dynamically-sized-types |
| 3619 | /// |
| 3620 | /// # Compile-Time Assertions |
| 3621 | /// |
| 3622 | /// This method cannot yet be used on unsized types whose dynamically-sized |
| 3623 | /// component is zero-sized. See [`ref_from_suffix_with_elems`], which does |
| 3624 | /// support such types. Attempting to use this method on such types results |
| 3625 | /// in a compile-time assertion error; e.g.: |
| 3626 | /// |
| 3627 | /// ```compile_fail,E0080 |
| 3628 | /// use zerocopy::*; |
| 3629 | /// # use zerocopy_derive::*; |
| 3630 | /// |
| 3631 | /// #[derive(FromBytes, Immutable, KnownLayout)] |
| 3632 | /// #[repr(C)] |
| 3633 | /// struct ZSTy { |
| 3634 | /// leading_sized: u16, |
| 3635 | /// trailing_dst: [()], |
| 3636 | /// } |
| 3637 | /// |
| 3638 | /// let _ = ZSTy::ref_from_suffix(0u16.as_bytes()); // âš Compile Error! |
| 3639 | /// ``` |
| 3640 | /// |
| 3641 | /// [`ref_from_suffix_with_elems`]: FromBytes::ref_from_suffix_with_elems |
| 3642 | /// |
| 3643 | /// # Examples |
| 3644 | /// |
| 3645 | /// ``` |
| 3646 | /// use zerocopy::FromBytes; |
| 3647 | /// # use zerocopy_derive::*; |
| 3648 | /// |
| 3649 | /// #[derive(FromBytes, Immutable, KnownLayout)] |
| 3650 | /// #[repr(C)] |
| 3651 | /// struct PacketTrailer { |
| 3652 | /// frame_check_sequence: [u8; 4], |
| 3653 | /// } |
| 3654 | /// |
| 3655 | /// // These are more bytes than are needed to encode a `PacketTrailer`. |
| 3656 | /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..]; |
| 3657 | /// |
| 3658 | /// let (prefix, trailer) = PacketTrailer::ref_from_suffix(bytes).unwrap(); |
| 3659 | /// |
| 3660 | /// assert_eq!(prefix, &[0, 1, 2, 3, 4, 5][..]); |
| 3661 | /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]); |
| 3662 | /// ``` |
| 3663 | #[must_use = "has no side effects" ] |
| 3664 | #[inline ] |
| 3665 | fn ref_from_suffix(source: &[u8]) -> Result<(&[u8], &Self), CastError<&[u8], Self>> |
| 3666 | where |
| 3667 | Self: Immutable + KnownLayout, |
| 3668 | { |
| 3669 | static_assert_dst_is_not_zst!(Self); |
| 3670 | ref_from_prefix_suffix(source, None, CastType::Suffix).map(swap) |
| 3671 | } |
| 3672 | |
| 3673 | /// Interprets the given `source` as a `&mut Self`. |
| 3674 | /// |
| 3675 | /// This method attempts to return a reference to `source` interpreted as a |
| 3676 | /// `Self`. If the length of `source` is not a [valid size of |
| 3677 | /// `Self`][valid-size], or if `source` is not appropriately aligned, this |
| 3678 | /// returns `Err`. If [`Self: Unaligned`][self-unaligned], you can |
| 3679 | /// [infallibly discard the alignment error][size-error-from]. |
| 3680 | /// |
| 3681 | /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst]. |
| 3682 | /// |
| 3683 | /// [valid-size]: crate::KnownLayout#what-is-a-valid-size |
| 3684 | /// [self-unaligned]: Unaligned |
| 3685 | /// [size-error-from]: error/struct.SizeError.html#method.from-1 |
| 3686 | /// [slice-dst]: KnownLayout#dynamically-sized-types |
| 3687 | /// |
| 3688 | /// # Compile-Time Assertions |
| 3689 | /// |
| 3690 | /// This method cannot yet be used on unsized types whose dynamically-sized |
| 3691 | /// component is zero-sized. See [`mut_from_prefix_with_elems`], which does |
| 3692 | /// support such types. Attempting to use this method on such types results |
| 3693 | /// in a compile-time assertion error; e.g.: |
| 3694 | /// |
| 3695 | /// ```compile_fail,E0080 |
| 3696 | /// use zerocopy::*; |
| 3697 | /// # use zerocopy_derive::*; |
| 3698 | /// |
| 3699 | /// #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)] |
| 3700 | /// #[repr(C, packed)] |
| 3701 | /// struct ZSTy { |
| 3702 | /// leading_sized: [u8; 2], |
| 3703 | /// trailing_dst: [()], |
| 3704 | /// } |
| 3705 | /// |
| 3706 | /// let mut source = [85, 85]; |
| 3707 | /// let _ = ZSTy::mut_from_bytes(&mut source[..]); // âš Compile Error! |
| 3708 | /// ``` |
| 3709 | /// |
| 3710 | /// [`mut_from_prefix_with_elems`]: FromBytes::mut_from_prefix_with_elems |
| 3711 | /// |
| 3712 | /// # Examples |
| 3713 | /// |
| 3714 | /// ``` |
| 3715 | /// use zerocopy::FromBytes; |
| 3716 | /// # use zerocopy_derive::*; |
| 3717 | /// |
| 3718 | /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable)] |
| 3719 | /// #[repr(C)] |
| 3720 | /// struct PacketHeader { |
| 3721 | /// src_port: [u8; 2], |
| 3722 | /// dst_port: [u8; 2], |
| 3723 | /// length: [u8; 2], |
| 3724 | /// checksum: [u8; 2], |
| 3725 | /// } |
| 3726 | /// |
| 3727 | /// // These bytes encode a `PacketHeader`. |
| 3728 | /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7][..]; |
| 3729 | /// |
| 3730 | /// let header = PacketHeader::mut_from_bytes(bytes).unwrap(); |
| 3731 | /// |
| 3732 | /// assert_eq!(header.src_port, [0, 1]); |
| 3733 | /// assert_eq!(header.dst_port, [2, 3]); |
| 3734 | /// assert_eq!(header.length, [4, 5]); |
| 3735 | /// assert_eq!(header.checksum, [6, 7]); |
| 3736 | /// |
| 3737 | /// header.checksum = [0, 0]; |
| 3738 | /// |
| 3739 | /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0]); |
| 3740 | /// ``` |
| 3741 | #[must_use = "has no side effects" ] |
| 3742 | #[inline ] |
| 3743 | fn mut_from_bytes(source: &mut [u8]) -> Result<&mut Self, CastError<&mut [u8], Self>> |
| 3744 | where |
| 3745 | Self: IntoBytes + KnownLayout, |
| 3746 | { |
| 3747 | static_assert_dst_is_not_zst!(Self); |
| 3748 | match Ptr::from_mut(source).try_cast_into_no_leftover::<_, BecauseExclusive>(None) { |
| 3749 | Ok(ptr) => Ok(ptr.bikeshed_recall_valid().as_mut()), |
| 3750 | Err(err) => Err(err.map_src(|src| src.as_mut())), |
| 3751 | } |
| 3752 | } |
| 3753 | |
| 3754 | /// Interprets the prefix of the given `source` as a `&mut Self` without |
| 3755 | /// copying. |
| 3756 | /// |
| 3757 | /// This method computes the [largest possible size of `Self`][valid-size] |
| 3758 | /// that can fit in the leading bytes of `source`, then attempts to return |
| 3759 | /// both a reference to those bytes interpreted as a `Self`, and a reference |
| 3760 | /// to the remaining bytes. If there are insufficient bytes, or if `source` |
| 3761 | /// is not appropriately aligned, this returns `Err`. If [`Self: |
| 3762 | /// Unaligned`][self-unaligned], you can [infallibly discard the alignment |
| 3763 | /// error][size-error-from]. |
| 3764 | /// |
| 3765 | /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst]. |
| 3766 | /// |
| 3767 | /// [valid-size]: crate::KnownLayout#what-is-a-valid-size |
| 3768 | /// [self-unaligned]: Unaligned |
| 3769 | /// [size-error-from]: error/struct.SizeError.html#method.from-1 |
| 3770 | /// [slice-dst]: KnownLayout#dynamically-sized-types |
| 3771 | /// |
| 3772 | /// # Compile-Time Assertions |
| 3773 | /// |
| 3774 | /// This method cannot yet be used on unsized types whose dynamically-sized |
| 3775 | /// component is zero-sized. See [`mut_from_suffix_with_elems`], which does |
| 3776 | /// support such types. Attempting to use this method on such types results |
| 3777 | /// in a compile-time assertion error; e.g.: |
| 3778 | /// |
| 3779 | /// ```compile_fail,E0080 |
| 3780 | /// use zerocopy::*; |
| 3781 | /// # use zerocopy_derive::*; |
| 3782 | /// |
| 3783 | /// #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)] |
| 3784 | /// #[repr(C, packed)] |
| 3785 | /// struct ZSTy { |
| 3786 | /// leading_sized: [u8; 2], |
| 3787 | /// trailing_dst: [()], |
| 3788 | /// } |
| 3789 | /// |
| 3790 | /// let mut source = [85, 85]; |
| 3791 | /// let _ = ZSTy::mut_from_prefix(&mut source[..]); // âš Compile Error! |
| 3792 | /// ``` |
| 3793 | /// |
| 3794 | /// [`mut_from_suffix_with_elems`]: FromBytes::mut_from_suffix_with_elems |
| 3795 | /// |
| 3796 | /// # Examples |
| 3797 | /// |
| 3798 | /// ``` |
| 3799 | /// use zerocopy::FromBytes; |
| 3800 | /// # use zerocopy_derive::*; |
| 3801 | /// |
| 3802 | /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable)] |
| 3803 | /// #[repr(C)] |
| 3804 | /// struct PacketHeader { |
| 3805 | /// src_port: [u8; 2], |
| 3806 | /// dst_port: [u8; 2], |
| 3807 | /// length: [u8; 2], |
| 3808 | /// checksum: [u8; 2], |
| 3809 | /// } |
| 3810 | /// |
| 3811 | /// // These are more bytes than are needed to encode a `PacketHeader`. |
| 3812 | /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..]; |
| 3813 | /// |
| 3814 | /// let (header, body) = PacketHeader::mut_from_prefix(bytes).unwrap(); |
| 3815 | /// |
| 3816 | /// assert_eq!(header.src_port, [0, 1]); |
| 3817 | /// assert_eq!(header.dst_port, [2, 3]); |
| 3818 | /// assert_eq!(header.length, [4, 5]); |
| 3819 | /// assert_eq!(header.checksum, [6, 7]); |
| 3820 | /// assert_eq!(body, &[8, 9][..]); |
| 3821 | /// |
| 3822 | /// header.checksum = [0, 0]; |
| 3823 | /// body.fill(1); |
| 3824 | /// |
| 3825 | /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0, 1, 1]); |
| 3826 | /// ``` |
| 3827 | #[must_use = "has no side effects" ] |
| 3828 | #[inline ] |
| 3829 | fn mut_from_prefix( |
| 3830 | source: &mut [u8], |
| 3831 | ) -> Result<(&mut Self, &mut [u8]), CastError<&mut [u8], Self>> |
| 3832 | where |
| 3833 | Self: IntoBytes + KnownLayout, |
| 3834 | { |
| 3835 | static_assert_dst_is_not_zst!(Self); |
| 3836 | mut_from_prefix_suffix(source, None, CastType::Prefix) |
| 3837 | } |
| 3838 | |
| 3839 | /// Interprets the suffix of the given `source` as a `&mut Self` without |
| 3840 | /// copying. |
| 3841 | /// |
| 3842 | /// This method computes the [largest possible size of `Self`][valid-size] |
| 3843 | /// that can fit in the trailing bytes of `source`, then attempts to return |
| 3844 | /// both a reference to those bytes interpreted as a `Self`, and a reference |
| 3845 | /// to the preceding bytes. If there are insufficient bytes, or if that |
| 3846 | /// suffix of `source` is not appropriately aligned, this returns `Err`. If |
| 3847 | /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the |
| 3848 | /// alignment error][size-error-from]. |
| 3849 | /// |
| 3850 | /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst]. |
| 3851 | /// |
| 3852 | /// [valid-size]: crate::KnownLayout#what-is-a-valid-size |
| 3853 | /// [self-unaligned]: Unaligned |
| 3854 | /// [size-error-from]: error/struct.SizeError.html#method.from-1 |
| 3855 | /// [slice-dst]: KnownLayout#dynamically-sized-types |
| 3856 | /// |
| 3857 | /// # Compile-Time Assertions |
| 3858 | /// |
| 3859 | /// This method cannot yet be used on unsized types whose dynamically-sized |
| 3860 | /// component is zero-sized. Attempting to use this method on such types |
| 3861 | /// results in a compile-time assertion error; e.g.: |
| 3862 | /// |
| 3863 | /// ```compile_fail,E0080 |
| 3864 | /// use zerocopy::*; |
| 3865 | /// # use zerocopy_derive::*; |
| 3866 | /// |
| 3867 | /// #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)] |
| 3868 | /// #[repr(C, packed)] |
| 3869 | /// struct ZSTy { |
| 3870 | /// leading_sized: [u8; 2], |
| 3871 | /// trailing_dst: [()], |
| 3872 | /// } |
| 3873 | /// |
| 3874 | /// let mut source = [85, 85]; |
| 3875 | /// let _ = ZSTy::mut_from_suffix(&mut source[..]); // âš Compile Error! |
| 3876 | /// ``` |
| 3877 | /// |
| 3878 | /// # Examples |
| 3879 | /// |
| 3880 | /// ``` |
| 3881 | /// use zerocopy::FromBytes; |
| 3882 | /// # use zerocopy_derive::*; |
| 3883 | /// |
| 3884 | /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable)] |
| 3885 | /// #[repr(C)] |
| 3886 | /// struct PacketTrailer { |
| 3887 | /// frame_check_sequence: [u8; 4], |
| 3888 | /// } |
| 3889 | /// |
| 3890 | /// // These are more bytes than are needed to encode a `PacketTrailer`. |
| 3891 | /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..]; |
| 3892 | /// |
| 3893 | /// let (prefix, trailer) = PacketTrailer::mut_from_suffix(bytes).unwrap(); |
| 3894 | /// |
| 3895 | /// assert_eq!(prefix, &[0u8, 1, 2, 3, 4, 5][..]); |
| 3896 | /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]); |
| 3897 | /// |
| 3898 | /// prefix.fill(0); |
| 3899 | /// trailer.frame_check_sequence.fill(1); |
| 3900 | /// |
| 3901 | /// assert_eq!(bytes, [0, 0, 0, 0, 0, 0, 1, 1, 1, 1]); |
| 3902 | /// ``` |
| 3903 | #[must_use = "has no side effects" ] |
| 3904 | #[inline ] |
| 3905 | fn mut_from_suffix( |
| 3906 | source: &mut [u8], |
| 3907 | ) -> Result<(&mut [u8], &mut Self), CastError<&mut [u8], Self>> |
| 3908 | where |
| 3909 | Self: IntoBytes + KnownLayout, |
| 3910 | { |
| 3911 | static_assert_dst_is_not_zst!(Self); |
| 3912 | mut_from_prefix_suffix(source, None, CastType::Suffix).map(swap) |
| 3913 | } |
| 3914 | |
| 3915 | /// Interprets the given `source` as a `&Self` with a DST length equal to |
| 3916 | /// `count`. |
| 3917 | /// |
| 3918 | /// This method attempts to return a reference to `source` interpreted as a |
| 3919 | /// `Self` with `count` trailing elements. If the length of `source` is not |
| 3920 | /// equal to the size of `Self` with `count` elements, or if `source` is not |
| 3921 | /// appropriately aligned, this returns `Err`. If [`Self: |
| 3922 | /// Unaligned`][self-unaligned], you can [infallibly discard the alignment |
| 3923 | /// error][size-error-from]. |
| 3924 | /// |
| 3925 | /// [self-unaligned]: Unaligned |
| 3926 | /// [size-error-from]: error/struct.SizeError.html#method.from-1 |
| 3927 | /// |
| 3928 | /// # Examples |
| 3929 | /// |
| 3930 | /// ``` |
| 3931 | /// use zerocopy::FromBytes; |
| 3932 | /// # use zerocopy_derive::*; |
| 3933 | /// |
| 3934 | /// # #[derive(Debug, PartialEq, Eq)] |
| 3935 | /// #[derive(FromBytes, Immutable)] |
| 3936 | /// #[repr(C)] |
| 3937 | /// struct Pixel { |
| 3938 | /// r: u8, |
| 3939 | /// g: u8, |
| 3940 | /// b: u8, |
| 3941 | /// a: u8, |
| 3942 | /// } |
| 3943 | /// |
| 3944 | /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7][..]; |
| 3945 | /// |
| 3946 | /// let pixels = <[Pixel]>::ref_from_bytes_with_elems(bytes, 2).unwrap(); |
| 3947 | /// |
| 3948 | /// assert_eq!(pixels, &[ |
| 3949 | /// Pixel { r: 0, g: 1, b: 2, a: 3 }, |
| 3950 | /// Pixel { r: 4, g: 5, b: 6, a: 7 }, |
| 3951 | /// ]); |
| 3952 | /// |
| 3953 | /// ``` |
| 3954 | /// |
| 3955 | /// Since an explicit `count` is provided, this method supports types with |
| 3956 | /// zero-sized trailing slice elements. Methods such as [`ref_from_bytes`] |
| 3957 | /// which do not take an explicit count do not support such types. |
| 3958 | /// |
| 3959 | /// ``` |
| 3960 | /// use zerocopy::*; |
| 3961 | /// # use zerocopy_derive::*; |
| 3962 | /// |
| 3963 | /// #[derive(FromBytes, Immutable, KnownLayout)] |
| 3964 | /// #[repr(C)] |
| 3965 | /// struct ZSTy { |
| 3966 | /// leading_sized: [u8; 2], |
| 3967 | /// trailing_dst: [()], |
| 3968 | /// } |
| 3969 | /// |
| 3970 | /// let src = &[85, 85][..]; |
| 3971 | /// let zsty = ZSTy::ref_from_bytes_with_elems(src, 42).unwrap(); |
| 3972 | /// assert_eq!(zsty.trailing_dst.len(), 42); |
| 3973 | /// ``` |
| 3974 | /// |
| 3975 | /// [`ref_from_bytes`]: FromBytes::ref_from_bytes |
| 3976 | #[must_use = "has no side effects" ] |
| 3977 | #[inline ] |
| 3978 | fn ref_from_bytes_with_elems( |
| 3979 | source: &[u8], |
| 3980 | count: usize, |
| 3981 | ) -> Result<&Self, CastError<&[u8], Self>> |
| 3982 | where |
| 3983 | Self: KnownLayout<PointerMetadata = usize> + Immutable, |
| 3984 | { |
| 3985 | let source = Ptr::from_ref(source); |
| 3986 | let maybe_slf = source.try_cast_into_no_leftover::<_, BecauseImmutable>(Some(count)); |
| 3987 | match maybe_slf { |
| 3988 | Ok(slf) => Ok(slf.bikeshed_recall_valid().as_ref()), |
| 3989 | Err(err) => Err(err.map_src(|s| s.as_ref())), |
| 3990 | } |
| 3991 | } |
| 3992 | |
| 3993 | /// Interprets the prefix of the given `source` as a DST `&Self` with length |
| 3994 | /// equal to `count`. |
| 3995 | /// |
| 3996 | /// This method attempts to return a reference to the prefix of `source` |
| 3997 | /// interpreted as a `Self` with `count` trailing elements, and a reference |
| 3998 | /// to the remaining bytes. If there are insufficient bytes, or if `source` |
| 3999 | /// is not appropriately aligned, this returns `Err`. If [`Self: |
| 4000 | /// Unaligned`][self-unaligned], you can [infallibly discard the alignment |
| 4001 | /// error][size-error-from]. |
| 4002 | /// |
| 4003 | /// [self-unaligned]: Unaligned |
| 4004 | /// [size-error-from]: error/struct.SizeError.html#method.from-1 |
| 4005 | /// |
| 4006 | /// # Examples |
| 4007 | /// |
| 4008 | /// ``` |
| 4009 | /// use zerocopy::FromBytes; |
| 4010 | /// # use zerocopy_derive::*; |
| 4011 | /// |
| 4012 | /// # #[derive(Debug, PartialEq, Eq)] |
| 4013 | /// #[derive(FromBytes, Immutable)] |
| 4014 | /// #[repr(C)] |
| 4015 | /// struct Pixel { |
| 4016 | /// r: u8, |
| 4017 | /// g: u8, |
| 4018 | /// b: u8, |
| 4019 | /// a: u8, |
| 4020 | /// } |
| 4021 | /// |
| 4022 | /// // These are more bytes than are needed to encode two `Pixel`s. |
| 4023 | /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..]; |
| 4024 | /// |
| 4025 | /// let (pixels, suffix) = <[Pixel]>::ref_from_prefix_with_elems(bytes, 2).unwrap(); |
| 4026 | /// |
| 4027 | /// assert_eq!(pixels, &[ |
| 4028 | /// Pixel { r: 0, g: 1, b: 2, a: 3 }, |
| 4029 | /// Pixel { r: 4, g: 5, b: 6, a: 7 }, |
| 4030 | /// ]); |
| 4031 | /// |
| 4032 | /// assert_eq!(suffix, &[8, 9]); |
| 4033 | /// ``` |
| 4034 | /// |
| 4035 | /// Since an explicit `count` is provided, this method supports types with |
| 4036 | /// zero-sized trailing slice elements. Methods such as [`ref_from_prefix`] |
| 4037 | /// which do not take an explicit count do not support such types. |
| 4038 | /// |
| 4039 | /// ``` |
| 4040 | /// use zerocopy::*; |
| 4041 | /// # use zerocopy_derive::*; |
| 4042 | /// |
| 4043 | /// #[derive(FromBytes, Immutable, KnownLayout)] |
| 4044 | /// #[repr(C)] |
| 4045 | /// struct ZSTy { |
| 4046 | /// leading_sized: [u8; 2], |
| 4047 | /// trailing_dst: [()], |
| 4048 | /// } |
| 4049 | /// |
| 4050 | /// let src = &[85, 85][..]; |
| 4051 | /// let (zsty, _) = ZSTy::ref_from_prefix_with_elems(src, 42).unwrap(); |
| 4052 | /// assert_eq!(zsty.trailing_dst.len(), 42); |
| 4053 | /// ``` |
| 4054 | /// |
| 4055 | /// [`ref_from_prefix`]: FromBytes::ref_from_prefix |
| 4056 | #[must_use = "has no side effects" ] |
| 4057 | #[inline ] |
| 4058 | fn ref_from_prefix_with_elems( |
| 4059 | source: &[u8], |
| 4060 | count: usize, |
| 4061 | ) -> Result<(&Self, &[u8]), CastError<&[u8], Self>> |
| 4062 | where |
| 4063 | Self: KnownLayout<PointerMetadata = usize> + Immutable, |
| 4064 | { |
| 4065 | ref_from_prefix_suffix(source, Some(count), CastType::Prefix) |
| 4066 | } |
| 4067 | |
| 4068 | /// Interprets the suffix of the given `source` as a DST `&Self` with length |
| 4069 | /// equal to `count`. |
| 4070 | /// |
| 4071 | /// This method attempts to return a reference to the suffix of `source` |
| 4072 | /// interpreted as a `Self` with `count` trailing elements, and a reference |
| 4073 | /// to the preceding bytes. If there are insufficient bytes, or if that |
| 4074 | /// suffix of `source` is not appropriately aligned, this returns `Err`. If |
| 4075 | /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the |
| 4076 | /// alignment error][size-error-from]. |
| 4077 | /// |
| 4078 | /// [self-unaligned]: Unaligned |
| 4079 | /// [size-error-from]: error/struct.SizeError.html#method.from-1 |
| 4080 | /// |
| 4081 | /// # Examples |
| 4082 | /// |
| 4083 | /// ``` |
| 4084 | /// use zerocopy::FromBytes; |
| 4085 | /// # use zerocopy_derive::*; |
| 4086 | /// |
| 4087 | /// # #[derive(Debug, PartialEq, Eq)] |
| 4088 | /// #[derive(FromBytes, Immutable)] |
| 4089 | /// #[repr(C)] |
| 4090 | /// struct Pixel { |
| 4091 | /// r: u8, |
| 4092 | /// g: u8, |
| 4093 | /// b: u8, |
| 4094 | /// a: u8, |
| 4095 | /// } |
| 4096 | /// |
| 4097 | /// // These are more bytes than are needed to encode two `Pixel`s. |
| 4098 | /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..]; |
| 4099 | /// |
| 4100 | /// let (prefix, pixels) = <[Pixel]>::ref_from_suffix_with_elems(bytes, 2).unwrap(); |
| 4101 | /// |
| 4102 | /// assert_eq!(prefix, &[0, 1]); |
| 4103 | /// |
| 4104 | /// assert_eq!(pixels, &[ |
| 4105 | /// Pixel { r: 2, g: 3, b: 4, a: 5 }, |
| 4106 | /// Pixel { r: 6, g: 7, b: 8, a: 9 }, |
| 4107 | /// ]); |
| 4108 | /// ``` |
| 4109 | /// |
| 4110 | /// Since an explicit `count` is provided, this method supports types with |
| 4111 | /// zero-sized trailing slice elements. Methods such as [`ref_from_suffix`] |
| 4112 | /// which do not take an explicit count do not support such types. |
| 4113 | /// |
| 4114 | /// ``` |
| 4115 | /// use zerocopy::*; |
| 4116 | /// # use zerocopy_derive::*; |
| 4117 | /// |
| 4118 | /// #[derive(FromBytes, Immutable, KnownLayout)] |
| 4119 | /// #[repr(C)] |
| 4120 | /// struct ZSTy { |
| 4121 | /// leading_sized: [u8; 2], |
| 4122 | /// trailing_dst: [()], |
| 4123 | /// } |
| 4124 | /// |
| 4125 | /// let src = &[85, 85][..]; |
| 4126 | /// let (_, zsty) = ZSTy::ref_from_suffix_with_elems(src, 42).unwrap(); |
| 4127 | /// assert_eq!(zsty.trailing_dst.len(), 42); |
| 4128 | /// ``` |
| 4129 | /// |
| 4130 | /// [`ref_from_suffix`]: FromBytes::ref_from_suffix |
| 4131 | #[must_use = "has no side effects" ] |
| 4132 | #[inline ] |
| 4133 | fn ref_from_suffix_with_elems( |
| 4134 | source: &[u8], |
| 4135 | count: usize, |
| 4136 | ) -> Result<(&[u8], &Self), CastError<&[u8], Self>> |
| 4137 | where |
| 4138 | Self: KnownLayout<PointerMetadata = usize> + Immutable, |
| 4139 | { |
| 4140 | ref_from_prefix_suffix(source, Some(count), CastType::Suffix).map(swap) |
| 4141 | } |
| 4142 | |
| 4143 | /// Interprets the given `source` as a `&mut Self` with a DST length equal |
| 4144 | /// to `count`. |
| 4145 | /// |
| 4146 | /// This method attempts to return a reference to `source` interpreted as a |
| 4147 | /// `Self` with `count` trailing elements. If the length of `source` is not |
| 4148 | /// equal to the size of `Self` with `count` elements, or if `source` is not |
| 4149 | /// appropriately aligned, this returns `Err`. If [`Self: |
| 4150 | /// Unaligned`][self-unaligned], you can [infallibly discard the alignment |
| 4151 | /// error][size-error-from]. |
| 4152 | /// |
| 4153 | /// [self-unaligned]: Unaligned |
| 4154 | /// [size-error-from]: error/struct.SizeError.html#method.from-1 |
| 4155 | /// |
| 4156 | /// # Examples |
| 4157 | /// |
| 4158 | /// ``` |
| 4159 | /// use zerocopy::FromBytes; |
| 4160 | /// # use zerocopy_derive::*; |
| 4161 | /// |
| 4162 | /// # #[derive(Debug, PartialEq, Eq)] |
| 4163 | /// #[derive(KnownLayout, FromBytes, IntoBytes, Immutable)] |
| 4164 | /// #[repr(C)] |
| 4165 | /// struct Pixel { |
| 4166 | /// r: u8, |
| 4167 | /// g: u8, |
| 4168 | /// b: u8, |
| 4169 | /// a: u8, |
| 4170 | /// } |
| 4171 | /// |
| 4172 | /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7][..]; |
| 4173 | /// |
| 4174 | /// let pixels = <[Pixel]>::mut_from_bytes_with_elems(bytes, 2).unwrap(); |
| 4175 | /// |
| 4176 | /// assert_eq!(pixels, &[ |
| 4177 | /// Pixel { r: 0, g: 1, b: 2, a: 3 }, |
| 4178 | /// Pixel { r: 4, g: 5, b: 6, a: 7 }, |
| 4179 | /// ]); |
| 4180 | /// |
| 4181 | /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 }; |
| 4182 | /// |
| 4183 | /// assert_eq!(bytes, [0, 1, 2, 3, 0, 0, 0, 0]); |
| 4184 | /// ``` |
| 4185 | /// |
| 4186 | /// Since an explicit `count` is provided, this method supports types with |
| 4187 | /// zero-sized trailing slice elements. Methods such as [`mut_from`] which |
| 4188 | /// do not take an explicit count do not support such types. |
| 4189 | /// |
| 4190 | /// ``` |
| 4191 | /// use zerocopy::*; |
| 4192 | /// # use zerocopy_derive::*; |
| 4193 | /// |
| 4194 | /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)] |
| 4195 | /// #[repr(C, packed)] |
| 4196 | /// struct ZSTy { |
| 4197 | /// leading_sized: [u8; 2], |
| 4198 | /// trailing_dst: [()], |
| 4199 | /// } |
| 4200 | /// |
| 4201 | /// let src = &mut [85, 85][..]; |
| 4202 | /// let zsty = ZSTy::mut_from_bytes_with_elems(src, 42).unwrap(); |
| 4203 | /// assert_eq!(zsty.trailing_dst.len(), 42); |
| 4204 | /// ``` |
| 4205 | /// |
| 4206 | /// [`mut_from`]: FromBytes::mut_from |
| 4207 | #[must_use = "has no side effects" ] |
| 4208 | #[inline ] |
| 4209 | fn mut_from_bytes_with_elems( |
| 4210 | source: &mut [u8], |
| 4211 | count: usize, |
| 4212 | ) -> Result<&mut Self, CastError<&mut [u8], Self>> |
| 4213 | where |
| 4214 | Self: IntoBytes + KnownLayout<PointerMetadata = usize> + Immutable, |
| 4215 | { |
| 4216 | let source = Ptr::from_mut(source); |
| 4217 | let maybe_slf = source.try_cast_into_no_leftover::<_, BecauseImmutable>(Some(count)); |
| 4218 | match maybe_slf { |
| 4219 | Ok(slf) => Ok(slf.bikeshed_recall_valid().as_mut()), |
| 4220 | Err(err) => Err(err.map_src(|s| s.as_mut())), |
| 4221 | } |
| 4222 | } |
| 4223 | |
| 4224 | /// Interprets the prefix of the given `source` as a `&mut Self` with DST |
| 4225 | /// length equal to `count`. |
| 4226 | /// |
| 4227 | /// This method attempts to return a reference to the prefix of `source` |
| 4228 | /// interpreted as a `Self` with `count` trailing elements, and a reference |
| 4229 | /// to the preceding bytes. If there are insufficient bytes, or if `source` |
| 4230 | /// is not appropriately aligned, this returns `Err`. If [`Self: |
| 4231 | /// Unaligned`][self-unaligned], you can [infallibly discard the alignment |
| 4232 | /// error][size-error-from]. |
| 4233 | /// |
| 4234 | /// [self-unaligned]: Unaligned |
| 4235 | /// [size-error-from]: error/struct.SizeError.html#method.from-1 |
| 4236 | /// |
| 4237 | /// # Examples |
| 4238 | /// |
| 4239 | /// ``` |
| 4240 | /// use zerocopy::FromBytes; |
| 4241 | /// # use zerocopy_derive::*; |
| 4242 | /// |
| 4243 | /// # #[derive(Debug, PartialEq, Eq)] |
| 4244 | /// #[derive(KnownLayout, FromBytes, IntoBytes, Immutable)] |
| 4245 | /// #[repr(C)] |
| 4246 | /// struct Pixel { |
| 4247 | /// r: u8, |
| 4248 | /// g: u8, |
| 4249 | /// b: u8, |
| 4250 | /// a: u8, |
| 4251 | /// } |
| 4252 | /// |
| 4253 | /// // These are more bytes than are needed to encode two `Pixel`s. |
| 4254 | /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..]; |
| 4255 | /// |
| 4256 | /// let (pixels, suffix) = <[Pixel]>::mut_from_prefix_with_elems(bytes, 2).unwrap(); |
| 4257 | /// |
| 4258 | /// assert_eq!(pixels, &[ |
| 4259 | /// Pixel { r: 0, g: 1, b: 2, a: 3 }, |
| 4260 | /// Pixel { r: 4, g: 5, b: 6, a: 7 }, |
| 4261 | /// ]); |
| 4262 | /// |
| 4263 | /// assert_eq!(suffix, &[8, 9]); |
| 4264 | /// |
| 4265 | /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 }; |
| 4266 | /// suffix.fill(1); |
| 4267 | /// |
| 4268 | /// assert_eq!(bytes, [0, 1, 2, 3, 0, 0, 0, 0, 1, 1]); |
| 4269 | /// ``` |
| 4270 | /// |
| 4271 | /// Since an explicit `count` is provided, this method supports types with |
| 4272 | /// zero-sized trailing slice elements. Methods such as [`mut_from_prefix`] |
| 4273 | /// which do not take an explicit count do not support such types. |
| 4274 | /// |
| 4275 | /// ``` |
| 4276 | /// use zerocopy::*; |
| 4277 | /// # use zerocopy_derive::*; |
| 4278 | /// |
| 4279 | /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)] |
| 4280 | /// #[repr(C, packed)] |
| 4281 | /// struct ZSTy { |
| 4282 | /// leading_sized: [u8; 2], |
| 4283 | /// trailing_dst: [()], |
| 4284 | /// } |
| 4285 | /// |
| 4286 | /// let src = &mut [85, 85][..]; |
| 4287 | /// let (zsty, _) = ZSTy::mut_from_prefix_with_elems(src, 42).unwrap(); |
| 4288 | /// assert_eq!(zsty.trailing_dst.len(), 42); |
| 4289 | /// ``` |
| 4290 | /// |
| 4291 | /// [`mut_from_prefix`]: FromBytes::mut_from_prefix |
| 4292 | #[must_use = "has no side effects" ] |
| 4293 | #[inline ] |
| 4294 | fn mut_from_prefix_with_elems( |
| 4295 | source: &mut [u8], |
| 4296 | count: usize, |
| 4297 | ) -> Result<(&mut Self, &mut [u8]), CastError<&mut [u8], Self>> |
| 4298 | where |
| 4299 | Self: IntoBytes + KnownLayout<PointerMetadata = usize>, |
| 4300 | { |
| 4301 | mut_from_prefix_suffix(source, Some(count), CastType::Prefix) |
| 4302 | } |
| 4303 | |
| 4304 | /// Interprets the suffix of the given `source` as a `&mut Self` with DST |
| 4305 | /// length equal to `count`. |
| 4306 | /// |
| 4307 | /// This method attempts to return a reference to the suffix of `source` |
| 4308 | /// interpreted as a `Self` with `count` trailing elements, and a reference |
| 4309 | /// to the remaining bytes. If there are insufficient bytes, or if that |
| 4310 | /// suffix of `source` is not appropriately aligned, this returns `Err`. If |
| 4311 | /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the |
| 4312 | /// alignment error][size-error-from]. |
| 4313 | /// |
| 4314 | /// [self-unaligned]: Unaligned |
| 4315 | /// [size-error-from]: error/struct.SizeError.html#method.from-1 |
| 4316 | /// |
| 4317 | /// # Examples |
| 4318 | /// |
| 4319 | /// ``` |
| 4320 | /// use zerocopy::FromBytes; |
| 4321 | /// # use zerocopy_derive::*; |
| 4322 | /// |
| 4323 | /// # #[derive(Debug, PartialEq, Eq)] |
| 4324 | /// #[derive(FromBytes, IntoBytes, Immutable)] |
| 4325 | /// #[repr(C)] |
| 4326 | /// struct Pixel { |
| 4327 | /// r: u8, |
| 4328 | /// g: u8, |
| 4329 | /// b: u8, |
| 4330 | /// a: u8, |
| 4331 | /// } |
| 4332 | /// |
| 4333 | /// // These are more bytes than are needed to encode two `Pixel`s. |
| 4334 | /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..]; |
| 4335 | /// |
| 4336 | /// let (prefix, pixels) = <[Pixel]>::mut_from_suffix_with_elems(bytes, 2).unwrap(); |
| 4337 | /// |
| 4338 | /// assert_eq!(prefix, &[0, 1]); |
| 4339 | /// |
| 4340 | /// assert_eq!(pixels, &[ |
| 4341 | /// Pixel { r: 2, g: 3, b: 4, a: 5 }, |
| 4342 | /// Pixel { r: 6, g: 7, b: 8, a: 9 }, |
| 4343 | /// ]); |
| 4344 | /// |
| 4345 | /// prefix.fill(9); |
| 4346 | /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 }; |
| 4347 | /// |
| 4348 | /// assert_eq!(bytes, [9, 9, 2, 3, 4, 5, 0, 0, 0, 0]); |
| 4349 | /// ``` |
| 4350 | /// |
| 4351 | /// Since an explicit `count` is provided, this method supports types with |
| 4352 | /// zero-sized trailing slice elements. Methods such as [`mut_from_suffix`] |
| 4353 | /// which do not take an explicit count do not support such types. |
| 4354 | /// |
| 4355 | /// ``` |
| 4356 | /// use zerocopy::*; |
| 4357 | /// # use zerocopy_derive::*; |
| 4358 | /// |
| 4359 | /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)] |
| 4360 | /// #[repr(C, packed)] |
| 4361 | /// struct ZSTy { |
| 4362 | /// leading_sized: [u8; 2], |
| 4363 | /// trailing_dst: [()], |
| 4364 | /// } |
| 4365 | /// |
| 4366 | /// let src = &mut [85, 85][..]; |
| 4367 | /// let (_, zsty) = ZSTy::mut_from_suffix_with_elems(src, 42).unwrap(); |
| 4368 | /// assert_eq!(zsty.trailing_dst.len(), 42); |
| 4369 | /// ``` |
| 4370 | /// |
| 4371 | /// [`mut_from_suffix`]: FromBytes::mut_from_suffix |
| 4372 | #[must_use = "has no side effects" ] |
| 4373 | #[inline ] |
| 4374 | fn mut_from_suffix_with_elems( |
| 4375 | source: &mut [u8], |
| 4376 | count: usize, |
| 4377 | ) -> Result<(&mut [u8], &mut Self), CastError<&mut [u8], Self>> |
| 4378 | where |
| 4379 | Self: IntoBytes + KnownLayout<PointerMetadata = usize>, |
| 4380 | { |
| 4381 | mut_from_prefix_suffix(source, Some(count), CastType::Suffix).map(swap) |
| 4382 | } |
| 4383 | |
| 4384 | /// Reads a copy of `Self` from the given `source`. |
| 4385 | /// |
| 4386 | /// If `source.len() != size_of::<Self>()`, `read_from_bytes` returns `Err`. |
| 4387 | /// |
| 4388 | /// # Examples |
| 4389 | /// |
| 4390 | /// ``` |
| 4391 | /// use zerocopy::FromBytes; |
| 4392 | /// # use zerocopy_derive::*; |
| 4393 | /// |
| 4394 | /// #[derive(FromBytes)] |
| 4395 | /// #[repr(C)] |
| 4396 | /// struct PacketHeader { |
| 4397 | /// src_port: [u8; 2], |
| 4398 | /// dst_port: [u8; 2], |
| 4399 | /// length: [u8; 2], |
| 4400 | /// checksum: [u8; 2], |
| 4401 | /// } |
| 4402 | /// |
| 4403 | /// // These bytes encode a `PacketHeader`. |
| 4404 | /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7][..]; |
| 4405 | /// |
| 4406 | /// let header = PacketHeader::read_from_bytes(bytes).unwrap(); |
| 4407 | /// |
| 4408 | /// assert_eq!(header.src_port, [0, 1]); |
| 4409 | /// assert_eq!(header.dst_port, [2, 3]); |
| 4410 | /// assert_eq!(header.length, [4, 5]); |
| 4411 | /// assert_eq!(header.checksum, [6, 7]); |
| 4412 | /// ``` |
| 4413 | #[must_use = "has no side effects" ] |
| 4414 | #[inline ] |
| 4415 | fn read_from_bytes(source: &[u8]) -> Result<Self, SizeError<&[u8], Self>> |
| 4416 | where |
| 4417 | Self: Sized, |
| 4418 | { |
| 4419 | match Ref::<_, Unalign<Self>>::sized_from(source) { |
| 4420 | Ok(r) => Ok(Ref::read(&r).into_inner()), |
| 4421 | Err(CastError::Size(e)) => Err(e.with_dst()), |
| 4422 | Err(CastError::Alignment(_)) => { |
| 4423 | // SAFETY: `Unalign<Self>` is trivially aligned, so |
| 4424 | // `Ref::sized_from` cannot fail due to unmet alignment |
| 4425 | // requirements. |
| 4426 | unsafe { core::hint::unreachable_unchecked() } |
| 4427 | } |
| 4428 | Err(CastError::Validity(i)) => match i {}, |
| 4429 | } |
| 4430 | } |
| 4431 | |
| 4432 | /// Reads a copy of `Self` from the prefix of the given `source`. |
| 4433 | /// |
| 4434 | /// This attempts to read a `Self` from the first `size_of::<Self>()` bytes |
| 4435 | /// of `source`, returning that `Self` and any remaining bytes. If |
| 4436 | /// `source.len() < size_of::<Self>()`, it returns `Err`. |
| 4437 | /// |
| 4438 | /// # Examples |
| 4439 | /// |
| 4440 | /// ``` |
| 4441 | /// use zerocopy::FromBytes; |
| 4442 | /// # use zerocopy_derive::*; |
| 4443 | /// |
| 4444 | /// #[derive(FromBytes)] |
| 4445 | /// #[repr(C)] |
| 4446 | /// struct PacketHeader { |
| 4447 | /// src_port: [u8; 2], |
| 4448 | /// dst_port: [u8; 2], |
| 4449 | /// length: [u8; 2], |
| 4450 | /// checksum: [u8; 2], |
| 4451 | /// } |
| 4452 | /// |
| 4453 | /// // These are more bytes than are needed to encode a `PacketHeader`. |
| 4454 | /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..]; |
| 4455 | /// |
| 4456 | /// let (header, body) = PacketHeader::read_from_prefix(bytes).unwrap(); |
| 4457 | /// |
| 4458 | /// assert_eq!(header.src_port, [0, 1]); |
| 4459 | /// assert_eq!(header.dst_port, [2, 3]); |
| 4460 | /// assert_eq!(header.length, [4, 5]); |
| 4461 | /// assert_eq!(header.checksum, [6, 7]); |
| 4462 | /// assert_eq!(body, [8, 9]); |
| 4463 | /// ``` |
| 4464 | #[must_use = "has no side effects" ] |
| 4465 | #[inline ] |
| 4466 | fn read_from_prefix(source: &[u8]) -> Result<(Self, &[u8]), SizeError<&[u8], Self>> |
| 4467 | where |
| 4468 | Self: Sized, |
| 4469 | { |
| 4470 | match Ref::<_, Unalign<Self>>::sized_from_prefix(source) { |
| 4471 | Ok((r, suffix)) => Ok((Ref::read(&r).into_inner(), suffix)), |
| 4472 | Err(CastError::Size(e)) => Err(e.with_dst()), |
| 4473 | Err(CastError::Alignment(_)) => { |
| 4474 | // SAFETY: `Unalign<Self>` is trivially aligned, so |
| 4475 | // `Ref::sized_from_prefix` cannot fail due to unmet alignment |
| 4476 | // requirements. |
| 4477 | unsafe { core::hint::unreachable_unchecked() } |
| 4478 | } |
| 4479 | Err(CastError::Validity(i)) => match i {}, |
| 4480 | } |
| 4481 | } |
| 4482 | |
| 4483 | /// Reads a copy of `Self` from the suffix of the given `source`. |
| 4484 | /// |
| 4485 | /// This attempts to read a `Self` from the last `size_of::<Self>()` bytes |
| 4486 | /// of `source`, returning that `Self` and any preceding bytes. If |
| 4487 | /// `source.len() < size_of::<Self>()`, it returns `Err`. |
| 4488 | /// |
| 4489 | /// # Examples |
| 4490 | /// |
| 4491 | /// ``` |
| 4492 | /// use zerocopy::FromBytes; |
| 4493 | /// # use zerocopy_derive::*; |
| 4494 | /// |
| 4495 | /// #[derive(FromBytes)] |
| 4496 | /// #[repr(C)] |
| 4497 | /// struct PacketTrailer { |
| 4498 | /// frame_check_sequence: [u8; 4], |
| 4499 | /// } |
| 4500 | /// |
| 4501 | /// // These are more bytes than are needed to encode a `PacketTrailer`. |
| 4502 | /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..]; |
| 4503 | /// |
| 4504 | /// let (prefix, trailer) = PacketTrailer::read_from_suffix(bytes).unwrap(); |
| 4505 | /// |
| 4506 | /// assert_eq!(prefix, [0, 1, 2, 3, 4, 5]); |
| 4507 | /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]); |
| 4508 | /// ``` |
| 4509 | #[must_use = "has no side effects" ] |
| 4510 | #[inline ] |
| 4511 | fn read_from_suffix(source: &[u8]) -> Result<(&[u8], Self), SizeError<&[u8], Self>> |
| 4512 | where |
| 4513 | Self: Sized, |
| 4514 | { |
| 4515 | match Ref::<_, Unalign<Self>>::sized_from_suffix(source) { |
| 4516 | Ok((prefix, r)) => Ok((prefix, Ref::read(&r).into_inner())), |
| 4517 | Err(CastError::Size(e)) => Err(e.with_dst()), |
| 4518 | Err(CastError::Alignment(_)) => { |
| 4519 | // SAFETY: `Unalign<Self>` is trivially aligned, so |
| 4520 | // `Ref::sized_from_suffix` cannot fail due to unmet alignment |
| 4521 | // requirements. |
| 4522 | unsafe { core::hint::unreachable_unchecked() } |
| 4523 | } |
| 4524 | Err(CastError::Validity(i)) => match i {}, |
| 4525 | } |
| 4526 | } |
| 4527 | |
| 4528 | /// Reads a copy of `self` from an `io::Read`. |
| 4529 | /// |
| 4530 | /// This is useful for interfacing with operating system byte sinks (files, |
| 4531 | /// sockets, etc.). |
| 4532 | /// |
| 4533 | /// # Examples |
| 4534 | /// |
| 4535 | /// ```no_run |
| 4536 | /// use zerocopy::{byteorder::big_endian::*, FromBytes}; |
| 4537 | /// use std::fs::File; |
| 4538 | /// # use zerocopy_derive::*; |
| 4539 | /// |
| 4540 | /// #[derive(FromBytes)] |
| 4541 | /// #[repr(C)] |
| 4542 | /// struct BitmapFileHeader { |
| 4543 | /// signature: [u8; 2], |
| 4544 | /// size: U32, |
| 4545 | /// reserved: U64, |
| 4546 | /// offset: U64, |
| 4547 | /// } |
| 4548 | /// |
| 4549 | /// let mut file = File::open("image.bin").unwrap(); |
| 4550 | /// let header = BitmapFileHeader::read_from_io(&mut file).unwrap(); |
| 4551 | /// ``` |
| 4552 | #[cfg (feature = "std" )] |
| 4553 | #[inline (always)] |
| 4554 | fn read_from_io<R>(mut src: R) -> io::Result<Self> |
| 4555 | where |
| 4556 | Self: Sized, |
| 4557 | R: io::Read, |
| 4558 | { |
| 4559 | let mut buf = CoreMaybeUninit::<Self>::zeroed(); |
| 4560 | let ptr = Ptr::from_mut(&mut buf); |
| 4561 | // SAFETY: `buf` consists entirely of initialized, zeroed bytes. |
| 4562 | let ptr = unsafe { ptr.assume_validity::<invariant::Initialized>() }; |
| 4563 | let ptr = ptr.as_bytes::<BecauseExclusive>(); |
| 4564 | src.read_exact(ptr.as_mut())?; |
| 4565 | // SAFETY: `buf` entirely consists of initialized bytes, and `Self` is |
| 4566 | // `FromBytes`. |
| 4567 | Ok(unsafe { buf.assume_init() }) |
| 4568 | } |
| 4569 | |
| 4570 | #[deprecated (since = "0.8.0" , note = "renamed to `FromBytes::ref_from_bytes`" )] |
| 4571 | #[doc (hidden)] |
| 4572 | #[must_use = "has no side effects" ] |
| 4573 | #[inline (always)] |
| 4574 | fn ref_from(source: &[u8]) -> Option<&Self> |
| 4575 | where |
| 4576 | Self: KnownLayout + Immutable, |
| 4577 | { |
| 4578 | Self::ref_from_bytes(source).ok() |
| 4579 | } |
| 4580 | |
| 4581 | #[deprecated (since = "0.8.0" , note = "renamed to `FromBytes::mut_from_bytes`" )] |
| 4582 | #[doc (hidden)] |
| 4583 | #[must_use = "has no side effects" ] |
| 4584 | #[inline (always)] |
| 4585 | fn mut_from(source: &mut [u8]) -> Option<&mut Self> |
| 4586 | where |
| 4587 | Self: KnownLayout + IntoBytes, |
| 4588 | { |
| 4589 | Self::mut_from_bytes(source).ok() |
| 4590 | } |
| 4591 | |
| 4592 | #[deprecated (since = "0.8.0" , note = "renamed to `FromBytes::ref_from_prefix_with_elems`" )] |
| 4593 | #[doc (hidden)] |
| 4594 | #[must_use = "has no side effects" ] |
| 4595 | #[inline (always)] |
| 4596 | fn slice_from_prefix(source: &[u8], count: usize) -> Option<(&[Self], &[u8])> |
| 4597 | where |
| 4598 | Self: Sized + Immutable, |
| 4599 | { |
| 4600 | <[Self]>::ref_from_prefix_with_elems(source, count).ok() |
| 4601 | } |
| 4602 | |
| 4603 | #[deprecated (since = "0.8.0" , note = "renamed to `FromBytes::ref_from_suffix_with_elems`" )] |
| 4604 | #[doc (hidden)] |
| 4605 | #[must_use = "has no side effects" ] |
| 4606 | #[inline (always)] |
| 4607 | fn slice_from_suffix(source: &[u8], count: usize) -> Option<(&[u8], &[Self])> |
| 4608 | where |
| 4609 | Self: Sized + Immutable, |
| 4610 | { |
| 4611 | <[Self]>::ref_from_suffix_with_elems(source, count).ok() |
| 4612 | } |
| 4613 | |
| 4614 | #[deprecated (since = "0.8.0" , note = "renamed to `FromBytes::mut_from_prefix_with_elems`" )] |
| 4615 | #[doc (hidden)] |
| 4616 | #[must_use = "has no side effects" ] |
| 4617 | #[inline (always)] |
| 4618 | fn mut_slice_from_prefix(source: &mut [u8], count: usize) -> Option<(&mut [Self], &mut [u8])> |
| 4619 | where |
| 4620 | Self: Sized + IntoBytes, |
| 4621 | { |
| 4622 | <[Self]>::mut_from_prefix_with_elems(source, count).ok() |
| 4623 | } |
| 4624 | |
| 4625 | #[deprecated (since = "0.8.0" , note = "renamed to `FromBytes::mut_from_suffix_with_elems`" )] |
| 4626 | #[doc (hidden)] |
| 4627 | #[must_use = "has no side effects" ] |
| 4628 | #[inline (always)] |
| 4629 | fn mut_slice_from_suffix(source: &mut [u8], count: usize) -> Option<(&mut [u8], &mut [Self])> |
| 4630 | where |
| 4631 | Self: Sized + IntoBytes, |
| 4632 | { |
| 4633 | <[Self]>::mut_from_suffix_with_elems(source, count).ok() |
| 4634 | } |
| 4635 | |
| 4636 | #[deprecated (since = "0.8.0" , note = "renamed to `FromBytes::read_from_bytes`" )] |
| 4637 | #[doc (hidden)] |
| 4638 | #[must_use = "has no side effects" ] |
| 4639 | #[inline (always)] |
| 4640 | fn read_from(source: &[u8]) -> Option<Self> |
| 4641 | where |
| 4642 | Self: Sized, |
| 4643 | { |
| 4644 | Self::read_from_bytes(source).ok() |
| 4645 | } |
| 4646 | } |
| 4647 | |
| 4648 | /// Interprets the given affix of the given bytes as a `&Self`. |
| 4649 | /// |
| 4650 | /// This method computes the largest possible size of `Self` that can fit in the |
| 4651 | /// prefix or suffix bytes of `source`, then attempts to return both a reference |
| 4652 | /// to those bytes interpreted as a `Self`, and a reference to the excess bytes. |
| 4653 | /// If there are insufficient bytes, or if that affix of `source` is not |
| 4654 | /// appropriately aligned, this returns `Err`. |
| 4655 | #[inline (always)] |
| 4656 | fn ref_from_prefix_suffix<T: FromBytes + KnownLayout + Immutable + ?Sized>( |
| 4657 | source: &[u8], |
| 4658 | meta: Option<T::PointerMetadata>, |
| 4659 | cast_type: CastType, |
| 4660 | ) -> Result<(&T, &[u8]), CastError<&[u8], T>> { |
| 4661 | let (slf: Ptr<'_, T, (Shared, Aligned, …)>, prefix_suffix: Ptr<'_, [u8], (Shared, Aligned, …)>) = Ptr::from_ref(source) |
| 4662 | .try_cast_into::<_, BecauseImmutable>(cast_type, meta) |
| 4663 | .map_err(|err: ConvertError, …, …>| err.map_src(|s: Ptr<'_, [u8], (Shared, Aligned, …)>| s.as_ref()))?; |
| 4664 | Ok((slf.bikeshed_recall_valid().as_ref(), prefix_suffix.as_ref())) |
| 4665 | } |
| 4666 | |
| 4667 | /// Interprets the given affix of the given bytes as a `&mut Self` without |
| 4668 | /// copying. |
| 4669 | /// |
| 4670 | /// This method computes the largest possible size of `Self` that can fit in the |
| 4671 | /// prefix or suffix bytes of `source`, then attempts to return both a reference |
| 4672 | /// to those bytes interpreted as a `Self`, and a reference to the excess bytes. |
| 4673 | /// If there are insufficient bytes, or if that affix of `source` is not |
| 4674 | /// appropriately aligned, this returns `Err`. |
| 4675 | #[inline (always)] |
| 4676 | fn mut_from_prefix_suffix<T: FromBytes + KnownLayout + ?Sized>( |
| 4677 | source: &mut [u8], |
| 4678 | meta: Option<T::PointerMetadata>, |
| 4679 | cast_type: CastType, |
| 4680 | ) -> Result<(&mut T, &mut [u8]), CastError<&mut [u8], T>> { |
| 4681 | let (slf: Ptr<'_, T, (Exclusive, Aligned, …)>, prefix_suffix: Ptr<'_, [u8], (Exclusive, …)>) = Ptr::from_mut(source) |
| 4682 | .try_cast_into::<_, BecauseExclusive>(cast_type, meta) |
| 4683 | .map_err(|err: ConvertError, …, …>| err.map_src(|s: Ptr<'_, [u8], (Exclusive, …)>| s.as_mut()))?; |
| 4684 | Ok((slf.bikeshed_recall_valid().as_mut(), prefix_suffix.as_mut())) |
| 4685 | } |
| 4686 | |
| 4687 | /// Analyzes whether a type is [`IntoBytes`]. |
| 4688 | /// |
| 4689 | /// This derive analyzes, at compile time, whether the annotated type satisfies |
| 4690 | /// the [safety conditions] of `IntoBytes` and implements `IntoBytes` if it is |
| 4691 | /// sound to do so. This derive can be applied to structs and enums (see below |
| 4692 | /// for union support); e.g.: |
| 4693 | /// |
| 4694 | /// ``` |
| 4695 | /// # use zerocopy_derive::{IntoBytes}; |
| 4696 | /// #[derive(IntoBytes)] |
| 4697 | /// #[repr(C)] |
| 4698 | /// struct MyStruct { |
| 4699 | /// # /* |
| 4700 | /// ... |
| 4701 | /// # */ |
| 4702 | /// } |
| 4703 | /// |
| 4704 | /// #[derive(IntoBytes)] |
| 4705 | /// #[repr(u8)] |
| 4706 | /// enum MyEnum { |
| 4707 | /// # Variant, |
| 4708 | /// # /* |
| 4709 | /// ... |
| 4710 | /// # */ |
| 4711 | /// } |
| 4712 | /// ``` |
| 4713 | /// |
| 4714 | /// [safety conditions]: trait@IntoBytes#safety |
| 4715 | /// |
| 4716 | /// # Error Messages |
| 4717 | /// |
| 4718 | /// On Rust toolchains prior to 1.78.0, due to the way that the custom derive |
| 4719 | /// for `IntoBytes` is implemented, you may get an error like this: |
| 4720 | /// |
| 4721 | /// ```text |
| 4722 | /// error[E0277]: the trait bound `(): PaddingFree<Foo, true>` is not satisfied |
| 4723 | /// --> lib.rs:23:10 |
| 4724 | /// | |
| 4725 | /// 1 | #[derive(IntoBytes)] |
| 4726 | /// | ^^^^^^^^^ the trait `PaddingFree<Foo, true>` is not implemented for `()` |
| 4727 | /// | |
| 4728 | /// = help: the following implementations were found: |
| 4729 | /// <() as PaddingFree<T, false>> |
| 4730 | /// ``` |
| 4731 | /// |
| 4732 | /// This error indicates that the type being annotated has padding bytes, which |
| 4733 | /// is illegal for `IntoBytes` types. Consider reducing the alignment of some |
| 4734 | /// fields by using types in the [`byteorder`] module, wrapping field types in |
| 4735 | /// [`Unalign`], adding explicit struct fields where those padding bytes would |
| 4736 | /// be, or using `#[repr(packed)]`. See the Rust Reference's page on [type |
| 4737 | /// layout] for more information about type layout and padding. |
| 4738 | /// |
| 4739 | /// [type layout]: https://doc.rust-lang.org/reference/type-layout.html |
| 4740 | /// |
| 4741 | /// # Unions |
| 4742 | /// |
| 4743 | /// Currently, union bit validity is [up in the air][union-validity], and so |
| 4744 | /// zerocopy does not support `#[derive(IntoBytes)]` on unions by default. |
| 4745 | /// However, implementing `IntoBytes` on a union type is likely sound on all |
| 4746 | /// existing Rust toolchains - it's just that it may become unsound in the |
| 4747 | /// future. You can opt-in to `#[derive(IntoBytes)]` support on unions by |
| 4748 | /// passing the unstable `zerocopy_derive_union_into_bytes` cfg: |
| 4749 | /// |
| 4750 | /// ```shell |
| 4751 | /// $ RUSTFLAGS='--cfg zerocopy_derive_union_into_bytes' cargo build |
| 4752 | /// ``` |
| 4753 | /// |
| 4754 | /// However, it is your responsibility to ensure that this derive is sound on |
| 4755 | /// the specific versions of the Rust toolchain you are using! We make no |
| 4756 | /// stability or soundness guarantees regarding this cfg, and may remove it at |
| 4757 | /// any point. |
| 4758 | /// |
| 4759 | /// We are actively working with Rust to stabilize the necessary language |
| 4760 | /// guarantees to support this in a forwards-compatible way, which will enable |
| 4761 | /// us to remove the cfg gate. As part of this effort, we need to know how much |
| 4762 | /// demand there is for this feature. If you would like to use `IntoBytes` on |
| 4763 | /// unions, [please let us know][discussion]. |
| 4764 | /// |
| 4765 | /// [union-validity]: https://github.com/rust-lang/unsafe-code-guidelines/issues/438 |
| 4766 | /// [discussion]: https://github.com/google/zerocopy/discussions/1802 |
| 4767 | /// |
| 4768 | /// # Analysis |
| 4769 | /// |
| 4770 | /// *This section describes, roughly, the analysis performed by this derive to |
| 4771 | /// determine whether it is sound to implement `IntoBytes` for a given type. |
| 4772 | /// Unless you are modifying the implementation of this derive, or attempting to |
| 4773 | /// manually implement `IntoBytes` for a type yourself, you don't need to read |
| 4774 | /// this section.* |
| 4775 | /// |
| 4776 | /// If a type has the following properties, then this derive can implement |
| 4777 | /// `IntoBytes` for that type: |
| 4778 | /// |
| 4779 | /// - If the type is a struct, its fields must be [`IntoBytes`]. Additionally: |
| 4780 | /// - if the type is `repr(transparent)` or `repr(packed)`, it is |
| 4781 | /// [`IntoBytes`] if its fields are [`IntoBytes`]; else, |
| 4782 | /// - if the type is `repr(C)` with at most one field, it is [`IntoBytes`] |
| 4783 | /// if its field is [`IntoBytes`]; else, |
| 4784 | /// - if the type has no generic parameters, it is [`IntoBytes`] if the type |
| 4785 | /// is sized and has no padding bytes; else, |
| 4786 | /// - if the type is `repr(C)`, its fields must be [`Unaligned`]. |
| 4787 | /// - If the type is an enum: |
| 4788 | /// - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`, |
| 4789 | /// `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`). |
| 4790 | /// - It must have no padding bytes. |
| 4791 | /// - Its fields must be [`IntoBytes`]. |
| 4792 | /// |
| 4793 | /// This analysis is subject to change. Unsafe code may *only* rely on the |
| 4794 | /// documented [safety conditions] of `FromBytes`, and must *not* rely on the |
| 4795 | /// implementation details of this derive. |
| 4796 | /// |
| 4797 | /// [Rust Reference]: https://doc.rust-lang.org/reference/type-layout.html |
| 4798 | #[cfg (any(feature = "derive" , test))] |
| 4799 | #[cfg_attr (doc_cfg, doc(cfg(feature = "derive" )))] |
| 4800 | pub use zerocopy_derive::IntoBytes; |
| 4801 | |
| 4802 | /// Types that can be converted to an immutable slice of initialized bytes. |
| 4803 | /// |
| 4804 | /// Any `IntoBytes` type can be converted to a slice of initialized bytes of the |
| 4805 | /// same size. This is useful for efficiently serializing structured data as raw |
| 4806 | /// bytes. |
| 4807 | /// |
| 4808 | /// # Implementation |
| 4809 | /// |
| 4810 | /// **Do not implement this trait yourself!** Instead, use |
| 4811 | /// [`#[derive(IntoBytes)]`][derive]; e.g.: |
| 4812 | /// |
| 4813 | /// ``` |
| 4814 | /// # use zerocopy_derive::IntoBytes; |
| 4815 | /// #[derive(IntoBytes)] |
| 4816 | /// #[repr(C)] |
| 4817 | /// struct MyStruct { |
| 4818 | /// # /* |
| 4819 | /// ... |
| 4820 | /// # */ |
| 4821 | /// } |
| 4822 | /// |
| 4823 | /// #[derive(IntoBytes)] |
| 4824 | /// #[repr(u8)] |
| 4825 | /// enum MyEnum { |
| 4826 | /// # Variant0, |
| 4827 | /// # /* |
| 4828 | /// ... |
| 4829 | /// # */ |
| 4830 | /// } |
| 4831 | /// ``` |
| 4832 | /// |
| 4833 | /// This derive performs a sophisticated, compile-time safety analysis to |
| 4834 | /// determine whether a type is `IntoBytes`. See the [derive |
| 4835 | /// documentation][derive] for guidance on how to interpret error messages |
| 4836 | /// produced by the derive's analysis. |
| 4837 | /// |
| 4838 | /// # Safety |
| 4839 | /// |
| 4840 | /// *This section describes what is required in order for `T: IntoBytes`, and |
| 4841 | /// what unsafe code may assume of such types. If you don't plan on implementing |
| 4842 | /// `IntoBytes` manually, and you don't plan on writing unsafe code that |
| 4843 | /// operates on `IntoBytes` types, then you don't need to read this section.* |
| 4844 | /// |
| 4845 | /// If `T: IntoBytes`, then unsafe code may assume that it is sound to treat any |
| 4846 | /// `t: T` as an immutable `[u8]` of length `size_of_val(t)`. If a type is |
| 4847 | /// marked as `IntoBytes` which violates this contract, it may cause undefined |
| 4848 | /// behavior. |
| 4849 | /// |
| 4850 | /// `#[derive(IntoBytes)]` only permits [types which satisfy these |
| 4851 | /// requirements][derive-analysis]. |
| 4852 | /// |
| 4853 | #[cfg_attr ( |
| 4854 | feature = "derive" , |
| 4855 | doc = "[derive]: zerocopy_derive::IntoBytes" , |
| 4856 | doc = "[derive-analysis]: zerocopy_derive::IntoBytes#analysis" |
| 4857 | )] |
| 4858 | #[cfg_attr ( |
| 4859 | not(feature = "derive" ), |
| 4860 | doc = concat!("[derive]: https://docs.rs/zerocopy/" , env!("CARGO_PKG_VERSION" ), "/zerocopy/derive.IntoBytes.html" ), |
| 4861 | doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/" , env!("CARGO_PKG_VERSION" ), "/zerocopy/derive.IntoBytes.html#analysis" ), |
| 4862 | )] |
| 4863 | #[cfg_attr ( |
| 4864 | zerocopy_diagnostic_on_unimplemented_1_78_0, |
| 4865 | diagnostic::on_unimplemented(note = "Consider adding `#[derive(IntoBytes)]` to `{Self}`" ) |
| 4866 | )] |
| 4867 | pub unsafe trait IntoBytes { |
| 4868 | // The `Self: Sized` bound makes it so that this function doesn't prevent |
| 4869 | // `IntoBytes` from being object safe. Note that other `IntoBytes` methods |
| 4870 | // prevent object safety, but those provide a benefit in exchange for object |
| 4871 | // safety. If at some point we remove those methods, change their type |
| 4872 | // signatures, or move them out of this trait so that `IntoBytes` is object |
| 4873 | // safe again, it's important that this function not prevent object safety. |
| 4874 | #[doc (hidden)] |
| 4875 | fn only_derive_is_allowed_to_implement_this_trait() |
| 4876 | where |
| 4877 | Self: Sized; |
| 4878 | |
| 4879 | /// Gets the bytes of this value. |
| 4880 | /// |
| 4881 | /// # Examples |
| 4882 | /// |
| 4883 | /// ``` |
| 4884 | /// use zerocopy::IntoBytes; |
| 4885 | /// # use zerocopy_derive::*; |
| 4886 | /// |
| 4887 | /// #[derive(IntoBytes, Immutable)] |
| 4888 | /// #[repr(C)] |
| 4889 | /// struct PacketHeader { |
| 4890 | /// src_port: [u8; 2], |
| 4891 | /// dst_port: [u8; 2], |
| 4892 | /// length: [u8; 2], |
| 4893 | /// checksum: [u8; 2], |
| 4894 | /// } |
| 4895 | /// |
| 4896 | /// let header = PacketHeader { |
| 4897 | /// src_port: [0, 1], |
| 4898 | /// dst_port: [2, 3], |
| 4899 | /// length: [4, 5], |
| 4900 | /// checksum: [6, 7], |
| 4901 | /// }; |
| 4902 | /// |
| 4903 | /// let bytes = header.as_bytes(); |
| 4904 | /// |
| 4905 | /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]); |
| 4906 | /// ``` |
| 4907 | #[must_use = "has no side effects" ] |
| 4908 | #[inline (always)] |
| 4909 | fn as_bytes(&self) -> &[u8] |
| 4910 | where |
| 4911 | Self: Immutable, |
| 4912 | { |
| 4913 | // Note that this method does not have a `Self: Sized` bound; |
| 4914 | // `size_of_val` works for unsized values too. |
| 4915 | let len = mem::size_of_val(self); |
| 4916 | let slf: *const Self = self; |
| 4917 | |
| 4918 | // SAFETY: |
| 4919 | // - `slf.cast::<u8>()` is valid for reads for `len * size_of::<u8>()` |
| 4920 | // many bytes because... |
| 4921 | // - `slf` is the same pointer as `self`, and `self` is a reference |
| 4922 | // which points to an object whose size is `len`. Thus... |
| 4923 | // - The entire region of `len` bytes starting at `slf` is contained |
| 4924 | // within a single allocation. |
| 4925 | // - `slf` is non-null. |
| 4926 | // - `slf` is trivially aligned to `align_of::<u8>() == 1`. |
| 4927 | // - `Self: IntoBytes` ensures that all of the bytes of `slf` are |
| 4928 | // initialized. |
| 4929 | // - Since `slf` is derived from `self`, and `self` is an immutable |
| 4930 | // reference, the only other references to this memory region that |
| 4931 | // could exist are other immutable references, and those don't allow |
| 4932 | // mutation. `Self: Immutable` prohibits types which contain |
| 4933 | // `UnsafeCell`s, which are the only types for which this rule |
| 4934 | // wouldn't be sufficient. |
| 4935 | // - The total size of the resulting slice is no larger than |
| 4936 | // `isize::MAX` because no allocation produced by safe code can be |
| 4937 | // larger than `isize::MAX`. |
| 4938 | // |
| 4939 | // TODO(#429): Add references to docs and quotes. |
| 4940 | unsafe { slice::from_raw_parts(slf.cast::<u8>(), len) } |
| 4941 | } |
| 4942 | |
| 4943 | /// Gets the bytes of this value mutably. |
| 4944 | /// |
| 4945 | /// # Examples |
| 4946 | /// |
| 4947 | /// ``` |
| 4948 | /// use zerocopy::IntoBytes; |
| 4949 | /// # use zerocopy_derive::*; |
| 4950 | /// |
| 4951 | /// # #[derive(Eq, PartialEq, Debug)] |
| 4952 | /// #[derive(FromBytes, IntoBytes, Immutable)] |
| 4953 | /// #[repr(C)] |
| 4954 | /// struct PacketHeader { |
| 4955 | /// src_port: [u8; 2], |
| 4956 | /// dst_port: [u8; 2], |
| 4957 | /// length: [u8; 2], |
| 4958 | /// checksum: [u8; 2], |
| 4959 | /// } |
| 4960 | /// |
| 4961 | /// let mut header = PacketHeader { |
| 4962 | /// src_port: [0, 1], |
| 4963 | /// dst_port: [2, 3], |
| 4964 | /// length: [4, 5], |
| 4965 | /// checksum: [6, 7], |
| 4966 | /// }; |
| 4967 | /// |
| 4968 | /// let bytes = header.as_mut_bytes(); |
| 4969 | /// |
| 4970 | /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]); |
| 4971 | /// |
| 4972 | /// bytes.reverse(); |
| 4973 | /// |
| 4974 | /// assert_eq!(header, PacketHeader { |
| 4975 | /// src_port: [7, 6], |
| 4976 | /// dst_port: [5, 4], |
| 4977 | /// length: [3, 2], |
| 4978 | /// checksum: [1, 0], |
| 4979 | /// }); |
| 4980 | /// ``` |
| 4981 | #[must_use = "has no side effects" ] |
| 4982 | #[inline (always)] |
| 4983 | fn as_mut_bytes(&mut self) -> &mut [u8] |
| 4984 | where |
| 4985 | Self: FromBytes, |
| 4986 | { |
| 4987 | // Note that this method does not have a `Self: Sized` bound; |
| 4988 | // `size_of_val` works for unsized values too. |
| 4989 | let len = mem::size_of_val(self); |
| 4990 | let slf: *mut Self = self; |
| 4991 | |
| 4992 | // SAFETY: |
| 4993 | // - `slf.cast::<u8>()` is valid for reads and writes for `len * |
| 4994 | // size_of::<u8>()` many bytes because... |
| 4995 | // - `slf` is the same pointer as `self`, and `self` is a reference |
| 4996 | // which points to an object whose size is `len`. Thus... |
| 4997 | // - The entire region of `len` bytes starting at `slf` is contained |
| 4998 | // within a single allocation. |
| 4999 | // - `slf` is non-null. |
| 5000 | // - `slf` is trivially aligned to `align_of::<u8>() == 1`. |
| 5001 | // - `Self: IntoBytes` ensures that all of the bytes of `slf` are |
| 5002 | // initialized. |
| 5003 | // - `Self: FromBytes` ensures that no write to this memory region |
| 5004 | // could result in it containing an invalid `Self`. |
| 5005 | // - Since `slf` is derived from `self`, and `self` is a mutable |
| 5006 | // reference, no other references to this memory region can exist. |
| 5007 | // - The total size of the resulting slice is no larger than |
| 5008 | // `isize::MAX` because no allocation produced by safe code can be |
| 5009 | // larger than `isize::MAX`. |
| 5010 | // |
| 5011 | // TODO(#429): Add references to docs and quotes. |
| 5012 | unsafe { slice::from_raw_parts_mut(slf.cast::<u8>(), len) } |
| 5013 | } |
| 5014 | |
| 5015 | /// Writes a copy of `self` to `dst`. |
| 5016 | /// |
| 5017 | /// If `dst.len() != size_of_val(self)`, `write_to` returns `Err`. |
| 5018 | /// |
| 5019 | /// # Examples |
| 5020 | /// |
| 5021 | /// ``` |
| 5022 | /// use zerocopy::IntoBytes; |
| 5023 | /// # use zerocopy_derive::*; |
| 5024 | /// |
| 5025 | /// #[derive(IntoBytes, Immutable)] |
| 5026 | /// #[repr(C)] |
| 5027 | /// struct PacketHeader { |
| 5028 | /// src_port: [u8; 2], |
| 5029 | /// dst_port: [u8; 2], |
| 5030 | /// length: [u8; 2], |
| 5031 | /// checksum: [u8; 2], |
| 5032 | /// } |
| 5033 | /// |
| 5034 | /// let header = PacketHeader { |
| 5035 | /// src_port: [0, 1], |
| 5036 | /// dst_port: [2, 3], |
| 5037 | /// length: [4, 5], |
| 5038 | /// checksum: [6, 7], |
| 5039 | /// }; |
| 5040 | /// |
| 5041 | /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0]; |
| 5042 | /// |
| 5043 | /// header.write_to(&mut bytes[..]); |
| 5044 | /// |
| 5045 | /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]); |
| 5046 | /// ``` |
| 5047 | /// |
| 5048 | /// If too many or too few target bytes are provided, `write_to` returns |
| 5049 | /// `Err` and leaves the target bytes unmodified: |
| 5050 | /// |
| 5051 | /// ``` |
| 5052 | /// # use zerocopy::IntoBytes; |
| 5053 | /// # let header = u128::MAX; |
| 5054 | /// let mut excessive_bytes = &mut [0u8; 128][..]; |
| 5055 | /// |
| 5056 | /// let write_result = header.write_to(excessive_bytes); |
| 5057 | /// |
| 5058 | /// assert!(write_result.is_err()); |
| 5059 | /// assert_eq!(excessive_bytes, [0u8; 128]); |
| 5060 | /// ``` |
| 5061 | #[must_use = "callers should check the return value to see if the operation succeeded" ] |
| 5062 | #[inline ] |
| 5063 | fn write_to(&self, dst: &mut [u8]) -> Result<(), SizeError<&Self, &mut [u8]>> |
| 5064 | where |
| 5065 | Self: Immutable, |
| 5066 | { |
| 5067 | let src = self.as_bytes(); |
| 5068 | if dst.len() == src.len() { |
| 5069 | // SAFETY: Within this branch of the conditional, we have ensured |
| 5070 | // that `dst.len()` is equal to `src.len()`. Neither the size of the |
| 5071 | // source nor the size of the destination change between the above |
| 5072 | // size check and the invocation of `copy_unchecked`. |
| 5073 | unsafe { util::copy_unchecked(src, dst) } |
| 5074 | Ok(()) |
| 5075 | } else { |
| 5076 | Err(SizeError::new(self)) |
| 5077 | } |
| 5078 | } |
| 5079 | |
| 5080 | /// Writes a copy of `self` to the prefix of `dst`. |
| 5081 | /// |
| 5082 | /// `write_to_prefix` writes `self` to the first `size_of_val(self)` bytes |
| 5083 | /// of `dst`. If `dst.len() < size_of_val(self)`, it returns `Err`. |
| 5084 | /// |
| 5085 | /// # Examples |
| 5086 | /// |
| 5087 | /// ``` |
| 5088 | /// use zerocopy::IntoBytes; |
| 5089 | /// # use zerocopy_derive::*; |
| 5090 | /// |
| 5091 | /// #[derive(IntoBytes, Immutable)] |
| 5092 | /// #[repr(C)] |
| 5093 | /// struct PacketHeader { |
| 5094 | /// src_port: [u8; 2], |
| 5095 | /// dst_port: [u8; 2], |
| 5096 | /// length: [u8; 2], |
| 5097 | /// checksum: [u8; 2], |
| 5098 | /// } |
| 5099 | /// |
| 5100 | /// let header = PacketHeader { |
| 5101 | /// src_port: [0, 1], |
| 5102 | /// dst_port: [2, 3], |
| 5103 | /// length: [4, 5], |
| 5104 | /// checksum: [6, 7], |
| 5105 | /// }; |
| 5106 | /// |
| 5107 | /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; |
| 5108 | /// |
| 5109 | /// header.write_to_prefix(&mut bytes[..]); |
| 5110 | /// |
| 5111 | /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7, 0, 0]); |
| 5112 | /// ``` |
| 5113 | /// |
| 5114 | /// If insufficient target bytes are provided, `write_to_prefix` returns |
| 5115 | /// `Err` and leaves the target bytes unmodified: |
| 5116 | /// |
| 5117 | /// ``` |
| 5118 | /// # use zerocopy::IntoBytes; |
| 5119 | /// # let header = u128::MAX; |
| 5120 | /// let mut insufficent_bytes = &mut [0, 0][..]; |
| 5121 | /// |
| 5122 | /// let write_result = header.write_to_suffix(insufficent_bytes); |
| 5123 | /// |
| 5124 | /// assert!(write_result.is_err()); |
| 5125 | /// assert_eq!(insufficent_bytes, [0, 0]); |
| 5126 | /// ``` |
| 5127 | #[must_use = "callers should check the return value to see if the operation succeeded" ] |
| 5128 | #[inline ] |
| 5129 | fn write_to_prefix(&self, dst: &mut [u8]) -> Result<(), SizeError<&Self, &mut [u8]>> |
| 5130 | where |
| 5131 | Self: Immutable, |
| 5132 | { |
| 5133 | let src = self.as_bytes(); |
| 5134 | match dst.get_mut(..src.len()) { |
| 5135 | Some(dst) => { |
| 5136 | // SAFETY: Within this branch of the `match`, we have ensured |
| 5137 | // through fallible subslicing that `dst.len()` is equal to |
| 5138 | // `src.len()`. Neither the size of the source nor the size of |
| 5139 | // the destination change between the above subslicing operation |
| 5140 | // and the invocation of `copy_unchecked`. |
| 5141 | unsafe { util::copy_unchecked(src, dst) } |
| 5142 | Ok(()) |
| 5143 | } |
| 5144 | None => Err(SizeError::new(self)), |
| 5145 | } |
| 5146 | } |
| 5147 | |
| 5148 | /// Writes a copy of `self` to the suffix of `dst`. |
| 5149 | /// |
| 5150 | /// `write_to_suffix` writes `self` to the last `size_of_val(self)` bytes of |
| 5151 | /// `dst`. If `dst.len() < size_of_val(self)`, it returns `Err`. |
| 5152 | /// |
| 5153 | /// # Examples |
| 5154 | /// |
| 5155 | /// ``` |
| 5156 | /// use zerocopy::IntoBytes; |
| 5157 | /// # use zerocopy_derive::*; |
| 5158 | /// |
| 5159 | /// #[derive(IntoBytes, Immutable)] |
| 5160 | /// #[repr(C)] |
| 5161 | /// struct PacketHeader { |
| 5162 | /// src_port: [u8; 2], |
| 5163 | /// dst_port: [u8; 2], |
| 5164 | /// length: [u8; 2], |
| 5165 | /// checksum: [u8; 2], |
| 5166 | /// } |
| 5167 | /// |
| 5168 | /// let header = PacketHeader { |
| 5169 | /// src_port: [0, 1], |
| 5170 | /// dst_port: [2, 3], |
| 5171 | /// length: [4, 5], |
| 5172 | /// checksum: [6, 7], |
| 5173 | /// }; |
| 5174 | /// |
| 5175 | /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; |
| 5176 | /// |
| 5177 | /// header.write_to_suffix(&mut bytes[..]); |
| 5178 | /// |
| 5179 | /// assert_eq!(bytes, [0, 0, 0, 1, 2, 3, 4, 5, 6, 7]); |
| 5180 | /// |
| 5181 | /// let mut insufficent_bytes = &mut [0, 0][..]; |
| 5182 | /// |
| 5183 | /// let write_result = header.write_to_suffix(insufficent_bytes); |
| 5184 | /// |
| 5185 | /// assert!(write_result.is_err()); |
| 5186 | /// assert_eq!(insufficent_bytes, [0, 0]); |
| 5187 | /// ``` |
| 5188 | /// |
| 5189 | /// If insufficient target bytes are provided, `write_to_suffix` returns |
| 5190 | /// `Err` and leaves the target bytes unmodified: |
| 5191 | /// |
| 5192 | /// ``` |
| 5193 | /// # use zerocopy::IntoBytes; |
| 5194 | /// # let header = u128::MAX; |
| 5195 | /// let mut insufficent_bytes = &mut [0, 0][..]; |
| 5196 | /// |
| 5197 | /// let write_result = header.write_to_suffix(insufficent_bytes); |
| 5198 | /// |
| 5199 | /// assert!(write_result.is_err()); |
| 5200 | /// assert_eq!(insufficent_bytes, [0, 0]); |
| 5201 | /// ``` |
| 5202 | #[must_use = "callers should check the return value to see if the operation succeeded" ] |
| 5203 | #[inline ] |
| 5204 | fn write_to_suffix(&self, dst: &mut [u8]) -> Result<(), SizeError<&Self, &mut [u8]>> |
| 5205 | where |
| 5206 | Self: Immutable, |
| 5207 | { |
| 5208 | let src = self.as_bytes(); |
| 5209 | let start = if let Some(start) = dst.len().checked_sub(src.len()) { |
| 5210 | start |
| 5211 | } else { |
| 5212 | return Err(SizeError::new(self)); |
| 5213 | }; |
| 5214 | let dst = if let Some(dst) = dst.get_mut(start..) { |
| 5215 | dst |
| 5216 | } else { |
| 5217 | // get_mut() should never return None here. We return a `SizeError` |
| 5218 | // rather than .unwrap() because in the event the branch is not |
| 5219 | // optimized away, returning a value is generally lighter-weight |
| 5220 | // than panicking. |
| 5221 | return Err(SizeError::new(self)); |
| 5222 | }; |
| 5223 | // SAFETY: Through fallible subslicing of `dst`, we have ensured that |
| 5224 | // `dst.len()` is equal to `src.len()`. Neither the size of the source |
| 5225 | // nor the size of the destination change between the above subslicing |
| 5226 | // operation and the invocation of `copy_unchecked`. |
| 5227 | unsafe { |
| 5228 | util::copy_unchecked(src, dst); |
| 5229 | } |
| 5230 | Ok(()) |
| 5231 | } |
| 5232 | |
| 5233 | /// Writes a copy of `self` to an `io::Write`. |
| 5234 | /// |
| 5235 | /// This is a shorthand for `dst.write_all(self.as_bytes())`, and is useful |
| 5236 | /// for interfacing with operating system byte sinks (files, sockets, etc.). |
| 5237 | /// |
| 5238 | /// # Examples |
| 5239 | /// |
| 5240 | /// ```no_run |
| 5241 | /// use zerocopy::{byteorder::big_endian::U16, FromBytes, IntoBytes}; |
| 5242 | /// use std::fs::File; |
| 5243 | /// # use zerocopy_derive::*; |
| 5244 | /// |
| 5245 | /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)] |
| 5246 | /// #[repr(C, packed)] |
| 5247 | /// struct GrayscaleImage { |
| 5248 | /// height: U16, |
| 5249 | /// width: U16, |
| 5250 | /// pixels: [U16], |
| 5251 | /// } |
| 5252 | /// |
| 5253 | /// let image = GrayscaleImage::ref_from_bytes(&[0, 0, 0, 0][..]).unwrap(); |
| 5254 | /// let mut file = File::create("image.bin").unwrap(); |
| 5255 | /// image.write_to_io(&mut file).unwrap(); |
| 5256 | /// ``` |
| 5257 | /// |
| 5258 | /// If the write fails, `write_to_io` returns `Err` and a partial write may |
| 5259 | /// have occured; e.g.: |
| 5260 | /// |
| 5261 | /// ``` |
| 5262 | /// # use zerocopy::IntoBytes; |
| 5263 | /// |
| 5264 | /// let src = u128::MAX; |
| 5265 | /// let mut dst = [0u8; 2]; |
| 5266 | /// |
| 5267 | /// let write_result = src.write_to_io(&mut dst[..]); |
| 5268 | /// |
| 5269 | /// assert!(write_result.is_err()); |
| 5270 | /// assert_eq!(dst, [255, 255]); |
| 5271 | /// ``` |
| 5272 | #[cfg (feature = "std" )] |
| 5273 | #[inline (always)] |
| 5274 | fn write_to_io<W>(&self, mut dst: W) -> io::Result<()> |
| 5275 | where |
| 5276 | Self: Immutable, |
| 5277 | W: io::Write, |
| 5278 | { |
| 5279 | dst.write_all(self.as_bytes()) |
| 5280 | } |
| 5281 | |
| 5282 | #[deprecated (since = "0.8.0" , note = "`IntoBytes::as_bytes_mut` was renamed to `as_mut_bytes`" )] |
| 5283 | #[doc (hidden)] |
| 5284 | #[inline ] |
| 5285 | fn as_bytes_mut(&mut self) -> &mut [u8] |
| 5286 | where |
| 5287 | Self: FromBytes, |
| 5288 | { |
| 5289 | self.as_mut_bytes() |
| 5290 | } |
| 5291 | } |
| 5292 | |
| 5293 | /// Analyzes whether a type is [`Unaligned`]. |
| 5294 | /// |
| 5295 | /// This derive analyzes, at compile time, whether the annotated type satisfies |
| 5296 | /// the [safety conditions] of `Unaligned` and implements `Unaligned` if it is |
| 5297 | /// sound to do so. This derive can be applied to structs, enums, and unions; |
| 5298 | /// e.g.: |
| 5299 | /// |
| 5300 | /// ``` |
| 5301 | /// # use zerocopy_derive::Unaligned; |
| 5302 | /// #[derive(Unaligned)] |
| 5303 | /// #[repr(C)] |
| 5304 | /// struct MyStruct { |
| 5305 | /// # /* |
| 5306 | /// ... |
| 5307 | /// # */ |
| 5308 | /// } |
| 5309 | /// |
| 5310 | /// #[derive(Unaligned)] |
| 5311 | /// #[repr(u8)] |
| 5312 | /// enum MyEnum { |
| 5313 | /// # Variant0, |
| 5314 | /// # /* |
| 5315 | /// ... |
| 5316 | /// # */ |
| 5317 | /// } |
| 5318 | /// |
| 5319 | /// #[derive(Unaligned)] |
| 5320 | /// #[repr(packed)] |
| 5321 | /// union MyUnion { |
| 5322 | /// # variant: u8, |
| 5323 | /// # /* |
| 5324 | /// ... |
| 5325 | /// # */ |
| 5326 | /// } |
| 5327 | /// ``` |
| 5328 | /// |
| 5329 | /// # Analysis |
| 5330 | /// |
| 5331 | /// *This section describes, roughly, the analysis performed by this derive to |
| 5332 | /// determine whether it is sound to implement `Unaligned` for a given type. |
| 5333 | /// Unless you are modifying the implementation of this derive, or attempting to |
| 5334 | /// manually implement `Unaligned` for a type yourself, you don't need to read |
| 5335 | /// this section.* |
| 5336 | /// |
| 5337 | /// If a type has the following properties, then this derive can implement |
| 5338 | /// `Unaligned` for that type: |
| 5339 | /// |
| 5340 | /// - If the type is a struct or union: |
| 5341 | /// - If `repr(align(N))` is provided, `N` must equal 1. |
| 5342 | /// - If the type is `repr(C)` or `repr(transparent)`, all fields must be |
| 5343 | /// [`Unaligned`]. |
| 5344 | /// - If the type is not `repr(C)` or `repr(transparent)`, it must be |
| 5345 | /// `repr(packed)` or `repr(packed(1))`. |
| 5346 | /// - If the type is an enum: |
| 5347 | /// - If `repr(align(N))` is provided, `N` must equal 1. |
| 5348 | /// - It must be a field-less enum (meaning that all variants have no fields). |
| 5349 | /// - It must be `repr(i8)` or `repr(u8)`. |
| 5350 | /// |
| 5351 | /// [safety conditions]: trait@Unaligned#safety |
| 5352 | #[cfg (any(feature = "derive" , test))] |
| 5353 | #[cfg_attr (doc_cfg, doc(cfg(feature = "derive" )))] |
| 5354 | pub use zerocopy_derive::Unaligned; |
| 5355 | |
| 5356 | /// Types with no alignment requirement. |
| 5357 | /// |
| 5358 | /// If `T: Unaligned`, then `align_of::<T>() == 1`. |
| 5359 | /// |
| 5360 | /// # Implementation |
| 5361 | /// |
| 5362 | /// **Do not implement this trait yourself!** Instead, use |
| 5363 | /// [`#[derive(Unaligned)]`][derive]; e.g.: |
| 5364 | /// |
| 5365 | /// ``` |
| 5366 | /// # use zerocopy_derive::Unaligned; |
| 5367 | /// #[derive(Unaligned)] |
| 5368 | /// #[repr(C)] |
| 5369 | /// struct MyStruct { |
| 5370 | /// # /* |
| 5371 | /// ... |
| 5372 | /// # */ |
| 5373 | /// } |
| 5374 | /// |
| 5375 | /// #[derive(Unaligned)] |
| 5376 | /// #[repr(u8)] |
| 5377 | /// enum MyEnum { |
| 5378 | /// # Variant0, |
| 5379 | /// # /* |
| 5380 | /// ... |
| 5381 | /// # */ |
| 5382 | /// } |
| 5383 | /// |
| 5384 | /// #[derive(Unaligned)] |
| 5385 | /// #[repr(packed)] |
| 5386 | /// union MyUnion { |
| 5387 | /// # variant: u8, |
| 5388 | /// # /* |
| 5389 | /// ... |
| 5390 | /// # */ |
| 5391 | /// } |
| 5392 | /// ``` |
| 5393 | /// |
| 5394 | /// This derive performs a sophisticated, compile-time safety analysis to |
| 5395 | /// determine whether a type is `Unaligned`. |
| 5396 | /// |
| 5397 | /// # Safety |
| 5398 | /// |
| 5399 | /// *This section describes what is required in order for `T: Unaligned`, and |
| 5400 | /// what unsafe code may assume of such types. If you don't plan on implementing |
| 5401 | /// `Unaligned` manually, and you don't plan on writing unsafe code that |
| 5402 | /// operates on `Unaligned` types, then you don't need to read this section.* |
| 5403 | /// |
| 5404 | /// If `T: Unaligned`, then unsafe code may assume that it is sound to produce a |
| 5405 | /// reference to `T` at any memory location regardless of alignment. If a type |
| 5406 | /// is marked as `Unaligned` which violates this contract, it may cause |
| 5407 | /// undefined behavior. |
| 5408 | /// |
| 5409 | /// `#[derive(Unaligned)]` only permits [types which satisfy these |
| 5410 | /// requirements][derive-analysis]. |
| 5411 | /// |
| 5412 | #[cfg_attr ( |
| 5413 | feature = "derive" , |
| 5414 | doc = "[derive]: zerocopy_derive::Unaligned" , |
| 5415 | doc = "[derive-analysis]: zerocopy_derive::Unaligned#analysis" |
| 5416 | )] |
| 5417 | #[cfg_attr ( |
| 5418 | not(feature = "derive" ), |
| 5419 | doc = concat!("[derive]: https://docs.rs/zerocopy/" , env!("CARGO_PKG_VERSION" ), "/zerocopy/derive.Unaligned.html" ), |
| 5420 | doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/" , env!("CARGO_PKG_VERSION" ), "/zerocopy/derive.Unaligned.html#analysis" ), |
| 5421 | )] |
| 5422 | #[cfg_attr ( |
| 5423 | zerocopy_diagnostic_on_unimplemented_1_78_0, |
| 5424 | diagnostic::on_unimplemented(note = "Consider adding `#[derive(Unaligned)]` to `{Self}`" ) |
| 5425 | )] |
| 5426 | pub unsafe trait Unaligned { |
| 5427 | // The `Self: Sized` bound makes it so that `Unaligned` is still object |
| 5428 | // safe. |
| 5429 | #[doc (hidden)] |
| 5430 | fn only_derive_is_allowed_to_implement_this_trait() |
| 5431 | where |
| 5432 | Self: Sized; |
| 5433 | } |
| 5434 | |
| 5435 | /// Derives an optimized implementation of [`Hash`] for types that implement |
| 5436 | /// [`IntoBytes`] and [`Immutable`]. |
| 5437 | /// |
| 5438 | /// The standard library's derive for `Hash` generates a recursive descent |
| 5439 | /// into the fields of the type it is applied to. Instead, the implementation |
| 5440 | /// derived by this macro makes a single call to [`Hasher::write()`] for both |
| 5441 | /// [`Hash::hash()`] and [`Hash::hash_slice()`], feeding the hasher the bytes |
| 5442 | /// of the type or slice all at once. |
| 5443 | /// |
| 5444 | /// [`Hash`]: core::hash::Hash |
| 5445 | /// [`Hash::hash()`]: core::hash::Hash::hash() |
| 5446 | /// [`Hash::hash_slice()`]: core::hash::Hash::hash_slice() |
| 5447 | #[cfg (any(feature = "derive" , test))] |
| 5448 | #[cfg_attr (doc_cfg, doc(cfg(feature = "derive" )))] |
| 5449 | pub use zerocopy_derive::ByteHash; |
| 5450 | |
| 5451 | #[cfg (feature = "alloc" )] |
| 5452 | #[cfg_attr (doc_cfg, doc(cfg(feature = "alloc" )))] |
| 5453 | #[cfg (zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)] |
| 5454 | mod alloc_support { |
| 5455 | use super::*; |
| 5456 | |
| 5457 | /// Extends a `Vec<T>` by pushing `additional` new items onto the end of the |
| 5458 | /// vector. The new items are initialized with zeros. |
| 5459 | #[cfg (zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)] |
| 5460 | #[doc (hidden)] |
| 5461 | #[deprecated (since = "0.8.0" , note = "moved to `FromZeros`" )] |
| 5462 | #[inline (always)] |
| 5463 | pub fn extend_vec_zeroed<T: FromZeros>( |
| 5464 | v: &mut Vec<T>, |
| 5465 | additional: usize, |
| 5466 | ) -> Result<(), AllocError> { |
| 5467 | <T as FromZeros>::extend_vec_zeroed(v, additional) |
| 5468 | } |
| 5469 | |
| 5470 | /// Inserts `additional` new items into `Vec<T>` at `position`. The new |
| 5471 | /// items are initialized with zeros. |
| 5472 | /// |
| 5473 | /// # Panics |
| 5474 | /// |
| 5475 | /// Panics if `position > v.len()`. |
| 5476 | #[cfg (zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)] |
| 5477 | #[doc (hidden)] |
| 5478 | #[deprecated (since = "0.8.0" , note = "moved to `FromZeros`" )] |
| 5479 | #[inline (always)] |
| 5480 | pub fn insert_vec_zeroed<T: FromZeros>( |
| 5481 | v: &mut Vec<T>, |
| 5482 | position: usize, |
| 5483 | additional: usize, |
| 5484 | ) -> Result<(), AllocError> { |
| 5485 | <T as FromZeros>::insert_vec_zeroed(v, position, additional) |
| 5486 | } |
| 5487 | } |
| 5488 | |
| 5489 | #[cfg (feature = "alloc" )] |
| 5490 | #[cfg (zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)] |
| 5491 | #[doc (hidden)] |
| 5492 | pub use alloc_support::*; |
| 5493 | |
| 5494 | #[cfg (test)] |
| 5495 | #[allow (clippy::assertions_on_result_states, clippy::unreadable_literal)] |
| 5496 | mod tests { |
| 5497 | use static_assertions::assert_impl_all; |
| 5498 | |
| 5499 | use super::*; |
| 5500 | use crate::util::testutil::*; |
| 5501 | |
| 5502 | // An unsized type. |
| 5503 | // |
| 5504 | // This is used to test the custom derives of our traits. The `[u8]` type |
| 5505 | // gets a hand-rolled impl, so it doesn't exercise our custom derives. |
| 5506 | #[derive (Debug, Eq, PartialEq, FromBytes, IntoBytes, Unaligned, Immutable)] |
| 5507 | #[repr (transparent)] |
| 5508 | struct Unsized([u8]); |
| 5509 | |
| 5510 | impl Unsized { |
| 5511 | fn from_mut_slice(slc: &mut [u8]) -> &mut Unsized { |
| 5512 | // SAFETY: This *probably* sound - since the layouts of `[u8]` and |
| 5513 | // `Unsized` are the same, so are the layouts of `&mut [u8]` and |
| 5514 | // `&mut Unsized`. [1] Even if it turns out that this isn't actually |
| 5515 | // guaranteed by the language spec, we can just change this since |
| 5516 | // it's in test code. |
| 5517 | // |
| 5518 | // [1] https://github.com/rust-lang/unsafe-code-guidelines/issues/375 |
| 5519 | unsafe { mem::transmute(slc) } |
| 5520 | } |
| 5521 | } |
| 5522 | |
| 5523 | #[test ] |
| 5524 | fn test_known_layout() { |
| 5525 | // Test that `$ty` and `ManuallyDrop<$ty>` have the expected layout. |
| 5526 | // Test that `PhantomData<$ty>` has the same layout as `()` regardless |
| 5527 | // of `$ty`. |
| 5528 | macro_rules! test { |
| 5529 | ($ty:ty, $expect:expr) => { |
| 5530 | let expect = $expect; |
| 5531 | assert_eq!(<$ty as KnownLayout>::LAYOUT, expect); |
| 5532 | assert_eq!(<ManuallyDrop<$ty> as KnownLayout>::LAYOUT, expect); |
| 5533 | assert_eq!(<PhantomData<$ty> as KnownLayout>::LAYOUT, <() as KnownLayout>::LAYOUT); |
| 5534 | }; |
| 5535 | } |
| 5536 | |
| 5537 | let layout = |offset, align, _trailing_slice_elem_size| DstLayout { |
| 5538 | align: NonZeroUsize::new(align).unwrap(), |
| 5539 | size_info: match _trailing_slice_elem_size { |
| 5540 | None => SizeInfo::Sized { size: offset }, |
| 5541 | Some(elem_size) => SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }), |
| 5542 | }, |
| 5543 | }; |
| 5544 | |
| 5545 | test !((), layout(0, 1, None)); |
| 5546 | test !(u8, layout(1, 1, None)); |
| 5547 | // Use `align_of` because `u64` alignment may be smaller than 8 on some |
| 5548 | // platforms. |
| 5549 | test !(u64, layout(8, mem::align_of::<u64>(), None)); |
| 5550 | test !(AU64, layout(8, 8, None)); |
| 5551 | |
| 5552 | test !(Option<&'static ()>, usize::LAYOUT); |
| 5553 | |
| 5554 | test !([()], layout(0, 1, Some(0))); |
| 5555 | test !([u8], layout(0, 1, Some(1))); |
| 5556 | test !(str, layout(0, 1, Some(1))); |
| 5557 | } |
| 5558 | |
| 5559 | #[cfg (feature = "derive" )] |
| 5560 | #[test ] |
| 5561 | fn test_known_layout_derive() { |
| 5562 | // In this and other files (`late_compile_pass.rs`, |
| 5563 | // `mid_compile_pass.rs`, and `struct.rs`), we test success and failure |
| 5564 | // modes of `derive(KnownLayout)` for the following combination of |
| 5565 | // properties: |
| 5566 | // |
| 5567 | // +------------+--------------------------------------+-----------+ |
| 5568 | // | | trailing field properties | | |
| 5569 | // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | |
| 5570 | // |------------+----------+----------------+----------+-----------| |
| 5571 | // | N | N | N | N | KL00 | |
| 5572 | // | N | N | N | Y | KL01 | |
| 5573 | // | N | N | Y | N | KL02 | |
| 5574 | // | N | N | Y | Y | KL03 | |
| 5575 | // | N | Y | N | N | KL04 | |
| 5576 | // | N | Y | N | Y | KL05 | |
| 5577 | // | N | Y | Y | N | KL06 | |
| 5578 | // | N | Y | Y | Y | KL07 | |
| 5579 | // | Y | N | N | N | KL08 | |
| 5580 | // | Y | N | N | Y | KL09 | |
| 5581 | // | Y | N | Y | N | KL10 | |
| 5582 | // | Y | N | Y | Y | KL11 | |
| 5583 | // | Y | Y | N | N | KL12 | |
| 5584 | // | Y | Y | N | Y | KL13 | |
| 5585 | // | Y | Y | Y | N | KL14 | |
| 5586 | // | Y | Y | Y | Y | KL15 | |
| 5587 | // +------------+----------+----------------+----------+-----------+ |
| 5588 | |
| 5589 | struct NotKnownLayout<T = ()> { |
| 5590 | _t: T, |
| 5591 | } |
| 5592 | |
| 5593 | #[derive (KnownLayout)] |
| 5594 | #[repr (C)] |
| 5595 | struct AlignSize<const ALIGN: usize, const SIZE: usize> |
| 5596 | where |
| 5597 | elain::Align<ALIGN>: elain::Alignment, |
| 5598 | { |
| 5599 | _align: elain::Align<ALIGN>, |
| 5600 | size: [u8; SIZE], |
| 5601 | } |
| 5602 | |
| 5603 | type AU16 = AlignSize<2, 2>; |
| 5604 | type AU32 = AlignSize<4, 4>; |
| 5605 | |
| 5606 | fn _assert_kl<T: ?Sized + KnownLayout>(_: &T) {} |
| 5607 | |
| 5608 | let sized_layout = |align, size| DstLayout { |
| 5609 | align: NonZeroUsize::new(align).unwrap(), |
| 5610 | size_info: SizeInfo::Sized { size }, |
| 5611 | }; |
| 5612 | |
| 5613 | let unsized_layout = |align, elem_size, offset| DstLayout { |
| 5614 | align: NonZeroUsize::new(align).unwrap(), |
| 5615 | size_info: SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }), |
| 5616 | }; |
| 5617 | |
| 5618 | // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | |
| 5619 | // | N | N | N | Y | KL01 | |
| 5620 | #[allow (dead_code)] |
| 5621 | #[derive (KnownLayout)] |
| 5622 | struct KL01(NotKnownLayout<AU32>, NotKnownLayout<AU16>); |
| 5623 | |
| 5624 | let expected = DstLayout::for_type::<KL01>(); |
| 5625 | |
| 5626 | assert_eq!(<KL01 as KnownLayout>::LAYOUT, expected); |
| 5627 | assert_eq!(<KL01 as KnownLayout>::LAYOUT, sized_layout(4, 8)); |
| 5628 | |
| 5629 | // ...with `align(N)`: |
| 5630 | #[allow (dead_code)] |
| 5631 | #[derive (KnownLayout)] |
| 5632 | #[repr (align(64))] |
| 5633 | struct KL01Align(NotKnownLayout<AU32>, NotKnownLayout<AU16>); |
| 5634 | |
| 5635 | let expected = DstLayout::for_type::<KL01Align>(); |
| 5636 | |
| 5637 | assert_eq!(<KL01Align as KnownLayout>::LAYOUT, expected); |
| 5638 | assert_eq!(<KL01Align as KnownLayout>::LAYOUT, sized_layout(64, 64)); |
| 5639 | |
| 5640 | // ...with `packed`: |
| 5641 | #[allow (dead_code)] |
| 5642 | #[derive (KnownLayout)] |
| 5643 | #[repr (packed)] |
| 5644 | struct KL01Packed(NotKnownLayout<AU32>, NotKnownLayout<AU16>); |
| 5645 | |
| 5646 | let expected = DstLayout::for_type::<KL01Packed>(); |
| 5647 | |
| 5648 | assert_eq!(<KL01Packed as KnownLayout>::LAYOUT, expected); |
| 5649 | assert_eq!(<KL01Packed as KnownLayout>::LAYOUT, sized_layout(1, 6)); |
| 5650 | |
| 5651 | // ...with `packed(N)`: |
| 5652 | #[allow (dead_code)] |
| 5653 | #[derive (KnownLayout)] |
| 5654 | #[repr (packed(2))] |
| 5655 | struct KL01PackedN(NotKnownLayout<AU32>, NotKnownLayout<AU16>); |
| 5656 | |
| 5657 | assert_impl_all!(KL01PackedN: KnownLayout); |
| 5658 | |
| 5659 | let expected = DstLayout::for_type::<KL01PackedN>(); |
| 5660 | |
| 5661 | assert_eq!(<KL01PackedN as KnownLayout>::LAYOUT, expected); |
| 5662 | assert_eq!(<KL01PackedN as KnownLayout>::LAYOUT, sized_layout(2, 6)); |
| 5663 | |
| 5664 | // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | |
| 5665 | // | N | N | Y | Y | KL03 | |
| 5666 | #[allow (dead_code)] |
| 5667 | #[derive (KnownLayout)] |
| 5668 | struct KL03(NotKnownLayout, u8); |
| 5669 | |
| 5670 | let expected = DstLayout::for_type::<KL03>(); |
| 5671 | |
| 5672 | assert_eq!(<KL03 as KnownLayout>::LAYOUT, expected); |
| 5673 | assert_eq!(<KL03 as KnownLayout>::LAYOUT, sized_layout(1, 1)); |
| 5674 | |
| 5675 | // ... with `align(N)` |
| 5676 | #[allow (dead_code)] |
| 5677 | #[derive (KnownLayout)] |
| 5678 | #[repr (align(64))] |
| 5679 | struct KL03Align(NotKnownLayout<AU32>, u8); |
| 5680 | |
| 5681 | let expected = DstLayout::for_type::<KL03Align>(); |
| 5682 | |
| 5683 | assert_eq!(<KL03Align as KnownLayout>::LAYOUT, expected); |
| 5684 | assert_eq!(<KL03Align as KnownLayout>::LAYOUT, sized_layout(64, 64)); |
| 5685 | |
| 5686 | // ... with `packed`: |
| 5687 | #[allow (dead_code)] |
| 5688 | #[derive (KnownLayout)] |
| 5689 | #[repr (packed)] |
| 5690 | struct KL03Packed(NotKnownLayout<AU32>, u8); |
| 5691 | |
| 5692 | let expected = DstLayout::for_type::<KL03Packed>(); |
| 5693 | |
| 5694 | assert_eq!(<KL03Packed as KnownLayout>::LAYOUT, expected); |
| 5695 | assert_eq!(<KL03Packed as KnownLayout>::LAYOUT, sized_layout(1, 5)); |
| 5696 | |
| 5697 | // ... with `packed(N)` |
| 5698 | #[allow (dead_code)] |
| 5699 | #[derive (KnownLayout)] |
| 5700 | #[repr (packed(2))] |
| 5701 | struct KL03PackedN(NotKnownLayout<AU32>, u8); |
| 5702 | |
| 5703 | assert_impl_all!(KL03PackedN: KnownLayout); |
| 5704 | |
| 5705 | let expected = DstLayout::for_type::<KL03PackedN>(); |
| 5706 | |
| 5707 | assert_eq!(<KL03PackedN as KnownLayout>::LAYOUT, expected); |
| 5708 | assert_eq!(<KL03PackedN as KnownLayout>::LAYOUT, sized_layout(2, 6)); |
| 5709 | |
| 5710 | // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | |
| 5711 | // | N | Y | N | Y | KL05 | |
| 5712 | #[allow (dead_code)] |
| 5713 | #[derive (KnownLayout)] |
| 5714 | struct KL05<T>(u8, T); |
| 5715 | |
| 5716 | fn _test_kl05<T>(t: T) -> impl KnownLayout { |
| 5717 | KL05(0u8, t) |
| 5718 | } |
| 5719 | |
| 5720 | // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | |
| 5721 | // | N | Y | Y | Y | KL07 | |
| 5722 | #[allow (dead_code)] |
| 5723 | #[derive (KnownLayout)] |
| 5724 | struct KL07<T: KnownLayout>(u8, T); |
| 5725 | |
| 5726 | fn _test_kl07<T: KnownLayout>(t: T) -> impl KnownLayout { |
| 5727 | let _ = KL07(0u8, t); |
| 5728 | } |
| 5729 | |
| 5730 | // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | |
| 5731 | // | Y | N | Y | N | KL10 | |
| 5732 | #[allow (dead_code)] |
| 5733 | #[derive (KnownLayout)] |
| 5734 | #[repr (C)] |
| 5735 | struct KL10(NotKnownLayout<AU32>, [u8]); |
| 5736 | |
| 5737 | let expected = DstLayout::new_zst(None) |
| 5738 | .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), None) |
| 5739 | .extend(<[u8] as KnownLayout>::LAYOUT, None) |
| 5740 | .pad_to_align(); |
| 5741 | |
| 5742 | assert_eq!(<KL10 as KnownLayout>::LAYOUT, expected); |
| 5743 | assert_eq!(<KL10 as KnownLayout>::LAYOUT, unsized_layout(4, 1, 4)); |
| 5744 | |
| 5745 | // ...with `align(N)`: |
| 5746 | #[allow (dead_code)] |
| 5747 | #[derive (KnownLayout)] |
| 5748 | #[repr (C, align(64))] |
| 5749 | struct KL10Align(NotKnownLayout<AU32>, [u8]); |
| 5750 | |
| 5751 | let repr_align = NonZeroUsize::new(64); |
| 5752 | |
| 5753 | let expected = DstLayout::new_zst(repr_align) |
| 5754 | .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), None) |
| 5755 | .extend(<[u8] as KnownLayout>::LAYOUT, None) |
| 5756 | .pad_to_align(); |
| 5757 | |
| 5758 | assert_eq!(<KL10Align as KnownLayout>::LAYOUT, expected); |
| 5759 | assert_eq!(<KL10Align as KnownLayout>::LAYOUT, unsized_layout(64, 1, 4)); |
| 5760 | |
| 5761 | // ...with `packed`: |
| 5762 | #[allow (dead_code)] |
| 5763 | #[derive (KnownLayout)] |
| 5764 | #[repr (C, packed)] |
| 5765 | struct KL10Packed(NotKnownLayout<AU32>, [u8]); |
| 5766 | |
| 5767 | let repr_packed = NonZeroUsize::new(1); |
| 5768 | |
| 5769 | let expected = DstLayout::new_zst(None) |
| 5770 | .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), repr_packed) |
| 5771 | .extend(<[u8] as KnownLayout>::LAYOUT, repr_packed) |
| 5772 | .pad_to_align(); |
| 5773 | |
| 5774 | assert_eq!(<KL10Packed as KnownLayout>::LAYOUT, expected); |
| 5775 | assert_eq!(<KL10Packed as KnownLayout>::LAYOUT, unsized_layout(1, 1, 4)); |
| 5776 | |
| 5777 | // ...with `packed(N)`: |
| 5778 | #[allow (dead_code)] |
| 5779 | #[derive (KnownLayout)] |
| 5780 | #[repr (C, packed(2))] |
| 5781 | struct KL10PackedN(NotKnownLayout<AU32>, [u8]); |
| 5782 | |
| 5783 | let repr_packed = NonZeroUsize::new(2); |
| 5784 | |
| 5785 | let expected = DstLayout::new_zst(None) |
| 5786 | .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), repr_packed) |
| 5787 | .extend(<[u8] as KnownLayout>::LAYOUT, repr_packed) |
| 5788 | .pad_to_align(); |
| 5789 | |
| 5790 | assert_eq!(<KL10PackedN as KnownLayout>::LAYOUT, expected); |
| 5791 | assert_eq!(<KL10PackedN as KnownLayout>::LAYOUT, unsized_layout(2, 1, 4)); |
| 5792 | |
| 5793 | // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | |
| 5794 | // | Y | N | Y | Y | KL11 | |
| 5795 | #[allow (dead_code)] |
| 5796 | #[derive (KnownLayout)] |
| 5797 | #[repr (C)] |
| 5798 | struct KL11(NotKnownLayout<AU64>, u8); |
| 5799 | |
| 5800 | let expected = DstLayout::new_zst(None) |
| 5801 | .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), None) |
| 5802 | .extend(<u8 as KnownLayout>::LAYOUT, None) |
| 5803 | .pad_to_align(); |
| 5804 | |
| 5805 | assert_eq!(<KL11 as KnownLayout>::LAYOUT, expected); |
| 5806 | assert_eq!(<KL11 as KnownLayout>::LAYOUT, sized_layout(8, 16)); |
| 5807 | |
| 5808 | // ...with `align(N)`: |
| 5809 | #[allow (dead_code)] |
| 5810 | #[derive (KnownLayout)] |
| 5811 | #[repr (C, align(64))] |
| 5812 | struct KL11Align(NotKnownLayout<AU64>, u8); |
| 5813 | |
| 5814 | let repr_align = NonZeroUsize::new(64); |
| 5815 | |
| 5816 | let expected = DstLayout::new_zst(repr_align) |
| 5817 | .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), None) |
| 5818 | .extend(<u8 as KnownLayout>::LAYOUT, None) |
| 5819 | .pad_to_align(); |
| 5820 | |
| 5821 | assert_eq!(<KL11Align as KnownLayout>::LAYOUT, expected); |
| 5822 | assert_eq!(<KL11Align as KnownLayout>::LAYOUT, sized_layout(64, 64)); |
| 5823 | |
| 5824 | // ...with `packed`: |
| 5825 | #[allow (dead_code)] |
| 5826 | #[derive (KnownLayout)] |
| 5827 | #[repr (C, packed)] |
| 5828 | struct KL11Packed(NotKnownLayout<AU64>, u8); |
| 5829 | |
| 5830 | let repr_packed = NonZeroUsize::new(1); |
| 5831 | |
| 5832 | let expected = DstLayout::new_zst(None) |
| 5833 | .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), repr_packed) |
| 5834 | .extend(<u8 as KnownLayout>::LAYOUT, repr_packed) |
| 5835 | .pad_to_align(); |
| 5836 | |
| 5837 | assert_eq!(<KL11Packed as KnownLayout>::LAYOUT, expected); |
| 5838 | assert_eq!(<KL11Packed as KnownLayout>::LAYOUT, sized_layout(1, 9)); |
| 5839 | |
| 5840 | // ...with `packed(N)`: |
| 5841 | #[allow (dead_code)] |
| 5842 | #[derive (KnownLayout)] |
| 5843 | #[repr (C, packed(2))] |
| 5844 | struct KL11PackedN(NotKnownLayout<AU64>, u8); |
| 5845 | |
| 5846 | let repr_packed = NonZeroUsize::new(2); |
| 5847 | |
| 5848 | let expected = DstLayout::new_zst(None) |
| 5849 | .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), repr_packed) |
| 5850 | .extend(<u8 as KnownLayout>::LAYOUT, repr_packed) |
| 5851 | .pad_to_align(); |
| 5852 | |
| 5853 | assert_eq!(<KL11PackedN as KnownLayout>::LAYOUT, expected); |
| 5854 | assert_eq!(<KL11PackedN as KnownLayout>::LAYOUT, sized_layout(2, 10)); |
| 5855 | |
| 5856 | // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | |
| 5857 | // | Y | Y | Y | N | KL14 | |
| 5858 | #[allow (dead_code)] |
| 5859 | #[derive (KnownLayout)] |
| 5860 | #[repr (C)] |
| 5861 | struct KL14<T: ?Sized + KnownLayout>(u8, T); |
| 5862 | |
| 5863 | fn _test_kl14<T: ?Sized + KnownLayout>(kl: &KL14<T>) { |
| 5864 | _assert_kl(kl) |
| 5865 | } |
| 5866 | |
| 5867 | // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | |
| 5868 | // | Y | Y | Y | Y | KL15 | |
| 5869 | #[allow (dead_code)] |
| 5870 | #[derive (KnownLayout)] |
| 5871 | #[repr (C)] |
| 5872 | struct KL15<T: KnownLayout>(u8, T); |
| 5873 | |
| 5874 | fn _test_kl15<T: KnownLayout>(t: T) -> impl KnownLayout { |
| 5875 | let _ = KL15(0u8, t); |
| 5876 | } |
| 5877 | |
| 5878 | // Test a variety of combinations of field types: |
| 5879 | // - () |
| 5880 | // - u8 |
| 5881 | // - AU16 |
| 5882 | // - [()] |
| 5883 | // - [u8] |
| 5884 | // - [AU16] |
| 5885 | |
| 5886 | #[allow (clippy::upper_case_acronyms, dead_code)] |
| 5887 | #[derive (KnownLayout)] |
| 5888 | #[repr (C)] |
| 5889 | struct KLTU<T, U: ?Sized>(T, U); |
| 5890 | |
| 5891 | assert_eq!(<KLTU<(), ()> as KnownLayout>::LAYOUT, sized_layout(1, 0)); |
| 5892 | |
| 5893 | assert_eq!(<KLTU<(), u8> as KnownLayout>::LAYOUT, sized_layout(1, 1)); |
| 5894 | |
| 5895 | assert_eq!(<KLTU<(), AU16> as KnownLayout>::LAYOUT, sized_layout(2, 2)); |
| 5896 | |
| 5897 | assert_eq!(<KLTU<(), [()]> as KnownLayout>::LAYOUT, unsized_layout(1, 0, 0)); |
| 5898 | |
| 5899 | assert_eq!(<KLTU<(), [u8]> as KnownLayout>::LAYOUT, unsized_layout(1, 1, 0)); |
| 5900 | |
| 5901 | assert_eq!(<KLTU<(), [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 0)); |
| 5902 | |
| 5903 | assert_eq!(<KLTU<u8, ()> as KnownLayout>::LAYOUT, sized_layout(1, 1)); |
| 5904 | |
| 5905 | assert_eq!(<KLTU<u8, u8> as KnownLayout>::LAYOUT, sized_layout(1, 2)); |
| 5906 | |
| 5907 | assert_eq!(<KLTU<u8, AU16> as KnownLayout>::LAYOUT, sized_layout(2, 4)); |
| 5908 | |
| 5909 | assert_eq!(<KLTU<u8, [()]> as KnownLayout>::LAYOUT, unsized_layout(1, 0, 1)); |
| 5910 | |
| 5911 | assert_eq!(<KLTU<u8, [u8]> as KnownLayout>::LAYOUT, unsized_layout(1, 1, 1)); |
| 5912 | |
| 5913 | assert_eq!(<KLTU<u8, [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 2)); |
| 5914 | |
| 5915 | assert_eq!(<KLTU<AU16, ()> as KnownLayout>::LAYOUT, sized_layout(2, 2)); |
| 5916 | |
| 5917 | assert_eq!(<KLTU<AU16, u8> as KnownLayout>::LAYOUT, sized_layout(2, 4)); |
| 5918 | |
| 5919 | assert_eq!(<KLTU<AU16, AU16> as KnownLayout>::LAYOUT, sized_layout(2, 4)); |
| 5920 | |
| 5921 | assert_eq!(<KLTU<AU16, [()]> as KnownLayout>::LAYOUT, unsized_layout(2, 0, 2)); |
| 5922 | |
| 5923 | assert_eq!(<KLTU<AU16, [u8]> as KnownLayout>::LAYOUT, unsized_layout(2, 1, 2)); |
| 5924 | |
| 5925 | assert_eq!(<KLTU<AU16, [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 2)); |
| 5926 | |
| 5927 | // Test a variety of field counts. |
| 5928 | |
| 5929 | #[derive (KnownLayout)] |
| 5930 | #[repr (C)] |
| 5931 | struct KLF0; |
| 5932 | |
| 5933 | assert_eq!(<KLF0 as KnownLayout>::LAYOUT, sized_layout(1, 0)); |
| 5934 | |
| 5935 | #[derive (KnownLayout)] |
| 5936 | #[repr (C)] |
| 5937 | struct KLF1([u8]); |
| 5938 | |
| 5939 | assert_eq!(<KLF1 as KnownLayout>::LAYOUT, unsized_layout(1, 1, 0)); |
| 5940 | |
| 5941 | #[derive (KnownLayout)] |
| 5942 | #[repr (C)] |
| 5943 | struct KLF2(NotKnownLayout<u8>, [u8]); |
| 5944 | |
| 5945 | assert_eq!(<KLF2 as KnownLayout>::LAYOUT, unsized_layout(1, 1, 1)); |
| 5946 | |
| 5947 | #[derive (KnownLayout)] |
| 5948 | #[repr (C)] |
| 5949 | struct KLF3(NotKnownLayout<u8>, NotKnownLayout<AU16>, [u8]); |
| 5950 | |
| 5951 | assert_eq!(<KLF3 as KnownLayout>::LAYOUT, unsized_layout(2, 1, 4)); |
| 5952 | |
| 5953 | #[derive (KnownLayout)] |
| 5954 | #[repr (C)] |
| 5955 | struct KLF4(NotKnownLayout<u8>, NotKnownLayout<AU16>, NotKnownLayout<AU32>, [u8]); |
| 5956 | |
| 5957 | assert_eq!(<KLF4 as KnownLayout>::LAYOUT, unsized_layout(4, 1, 8)); |
| 5958 | } |
| 5959 | |
| 5960 | #[test ] |
| 5961 | fn test_object_safety() { |
| 5962 | fn _takes_no_cell(_: &dyn Immutable) {} |
| 5963 | fn _takes_unaligned(_: &dyn Unaligned) {} |
| 5964 | } |
| 5965 | |
| 5966 | #[test ] |
| 5967 | fn test_from_zeros_only() { |
| 5968 | // Test types that implement `FromZeros` but not `FromBytes`. |
| 5969 | |
| 5970 | assert!(!bool::new_zeroed()); |
| 5971 | assert_eq!(char::new_zeroed(), ' \0' ); |
| 5972 | |
| 5973 | #[cfg (feature = "alloc" )] |
| 5974 | { |
| 5975 | assert_eq!(bool::new_box_zeroed(), Ok(Box::new(false))); |
| 5976 | assert_eq!(char::new_box_zeroed(), Ok(Box::new(' \0' ))); |
| 5977 | |
| 5978 | assert_eq!( |
| 5979 | <[bool]>::new_box_zeroed_with_elems(3).unwrap().as_ref(), |
| 5980 | [false, false, false] |
| 5981 | ); |
| 5982 | assert_eq!( |
| 5983 | <[char]>::new_box_zeroed_with_elems(3).unwrap().as_ref(), |
| 5984 | [' \0' , ' \0' , ' \0' ] |
| 5985 | ); |
| 5986 | |
| 5987 | assert_eq!(bool::new_vec_zeroed(3).unwrap().as_ref(), [false, false, false]); |
| 5988 | assert_eq!(char::new_vec_zeroed(3).unwrap().as_ref(), [' \0' , ' \0' , ' \0' ]); |
| 5989 | } |
| 5990 | |
| 5991 | let mut string = "hello" .to_string(); |
| 5992 | let s: &mut str = string.as_mut(); |
| 5993 | assert_eq!(s, "hello" ); |
| 5994 | s.zero(); |
| 5995 | assert_eq!(s, " \0\0\0\0\0" ); |
| 5996 | } |
| 5997 | |
| 5998 | #[test ] |
| 5999 | fn test_zst_count_preserved() { |
| 6000 | // Test that, when an explicit count is provided to for a type with a |
| 6001 | // ZST trailing slice element, that count is preserved. This is |
| 6002 | // important since, for such types, all element counts result in objects |
| 6003 | // of the same size, and so the correct behavior is ambiguous. However, |
| 6004 | // preserving the count as requested by the user is the behavior that we |
| 6005 | // document publicly. |
| 6006 | |
| 6007 | // FromZeros methods |
| 6008 | #[cfg (feature = "alloc" )] |
| 6009 | assert_eq!(<[()]>::new_box_zeroed_with_elems(3).unwrap().len(), 3); |
| 6010 | #[cfg (feature = "alloc" )] |
| 6011 | assert_eq!(<()>::new_vec_zeroed(3).unwrap().len(), 3); |
| 6012 | |
| 6013 | // FromBytes methods |
| 6014 | assert_eq!(<[()]>::ref_from_bytes_with_elems(&[][..], 3).unwrap().len(), 3); |
| 6015 | assert_eq!(<[()]>::ref_from_prefix_with_elems(&[][..], 3).unwrap().0.len(), 3); |
| 6016 | assert_eq!(<[()]>::ref_from_suffix_with_elems(&[][..], 3).unwrap().1.len(), 3); |
| 6017 | assert_eq!(<[()]>::mut_from_bytes_with_elems(&mut [][..], 3).unwrap().len(), 3); |
| 6018 | assert_eq!(<[()]>::mut_from_prefix_with_elems(&mut [][..], 3).unwrap().0.len(), 3); |
| 6019 | assert_eq!(<[()]>::mut_from_suffix_with_elems(&mut [][..], 3).unwrap().1.len(), 3); |
| 6020 | } |
| 6021 | |
| 6022 | #[test ] |
| 6023 | fn test_read_write() { |
| 6024 | const VAL: u64 = 0x12345678; |
| 6025 | #[cfg (target_endian = "big" )] |
| 6026 | const VAL_BYTES: [u8; 8] = VAL.to_be_bytes(); |
| 6027 | #[cfg (target_endian = "little" )] |
| 6028 | const VAL_BYTES: [u8; 8] = VAL.to_le_bytes(); |
| 6029 | const ZEROS: [u8; 8] = [0u8; 8]; |
| 6030 | |
| 6031 | // Test `FromBytes::{read_from, read_from_prefix, read_from_suffix}`. |
| 6032 | |
| 6033 | assert_eq!(u64::read_from_bytes(&VAL_BYTES[..]), Ok(VAL)); |
| 6034 | // The first 8 bytes are from `VAL_BYTES` and the second 8 bytes are all |
| 6035 | // zeros. |
| 6036 | let bytes_with_prefix: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]); |
| 6037 | assert_eq!(u64::read_from_prefix(&bytes_with_prefix[..]), Ok((VAL, &ZEROS[..]))); |
| 6038 | assert_eq!(u64::read_from_suffix(&bytes_with_prefix[..]), Ok((&VAL_BYTES[..], 0))); |
| 6039 | // The first 8 bytes are all zeros and the second 8 bytes are from |
| 6040 | // `VAL_BYTES` |
| 6041 | let bytes_with_suffix: [u8; 16] = transmute!([[0; 8], VAL_BYTES]); |
| 6042 | assert_eq!(u64::read_from_prefix(&bytes_with_suffix[..]), Ok((0, &VAL_BYTES[..]))); |
| 6043 | assert_eq!(u64::read_from_suffix(&bytes_with_suffix[..]), Ok((&ZEROS[..], VAL))); |
| 6044 | |
| 6045 | // Test `IntoBytes::{write_to, write_to_prefix, write_to_suffix}`. |
| 6046 | |
| 6047 | let mut bytes = [0u8; 8]; |
| 6048 | assert_eq!(VAL.write_to(&mut bytes[..]), Ok(())); |
| 6049 | assert_eq!(bytes, VAL_BYTES); |
| 6050 | let mut bytes = [0u8; 16]; |
| 6051 | assert_eq!(VAL.write_to_prefix(&mut bytes[..]), Ok(())); |
| 6052 | let want: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]); |
| 6053 | assert_eq!(bytes, want); |
| 6054 | let mut bytes = [0u8; 16]; |
| 6055 | assert_eq!(VAL.write_to_suffix(&mut bytes[..]), Ok(())); |
| 6056 | let want: [u8; 16] = transmute!([[0; 8], VAL_BYTES]); |
| 6057 | assert_eq!(bytes, want); |
| 6058 | } |
| 6059 | |
| 6060 | #[test ] |
| 6061 | #[cfg (feature = "std" )] |
| 6062 | fn test_read_write_io() { |
| 6063 | let mut long_buffer = [0, 0, 0, 0]; |
| 6064 | assert!(matches!(u16::MAX.write_to_io(&mut long_buffer[..]), Ok(()))); |
| 6065 | assert_eq!(long_buffer, [255, 255, 0, 0]); |
| 6066 | assert!(matches!(u16::read_from_io(&long_buffer[..]), Ok(u16::MAX))); |
| 6067 | |
| 6068 | let mut short_buffer = [0, 0]; |
| 6069 | assert!(u32::MAX.write_to_io(&mut short_buffer[..]).is_err()); |
| 6070 | assert_eq!(short_buffer, [255, 255]); |
| 6071 | assert!(u32::read_from_io(&short_buffer[..]).is_err()); |
| 6072 | } |
| 6073 | |
| 6074 | #[test ] |
| 6075 | fn test_try_from_bytes_try_read_from() { |
| 6076 | assert_eq!(<bool as TryFromBytes>::try_read_from_bytes(&[0]), Ok(false)); |
| 6077 | assert_eq!(<bool as TryFromBytes>::try_read_from_bytes(&[1]), Ok(true)); |
| 6078 | |
| 6079 | assert_eq!(<bool as TryFromBytes>::try_read_from_prefix(&[0, 2]), Ok((false, &[2][..]))); |
| 6080 | assert_eq!(<bool as TryFromBytes>::try_read_from_prefix(&[1, 2]), Ok((true, &[2][..]))); |
| 6081 | |
| 6082 | assert_eq!(<bool as TryFromBytes>::try_read_from_suffix(&[2, 0]), Ok((&[2][..], false))); |
| 6083 | assert_eq!(<bool as TryFromBytes>::try_read_from_suffix(&[2, 1]), Ok((&[2][..], true))); |
| 6084 | |
| 6085 | // If we don't pass enough bytes, it fails. |
| 6086 | assert!(matches!( |
| 6087 | <u8 as TryFromBytes>::try_read_from_bytes(&[]), |
| 6088 | Err(TryReadError::Size(_)) |
| 6089 | )); |
| 6090 | assert!(matches!( |
| 6091 | <u8 as TryFromBytes>::try_read_from_prefix(&[]), |
| 6092 | Err(TryReadError::Size(_)) |
| 6093 | )); |
| 6094 | assert!(matches!( |
| 6095 | <u8 as TryFromBytes>::try_read_from_suffix(&[]), |
| 6096 | Err(TryReadError::Size(_)) |
| 6097 | )); |
| 6098 | |
| 6099 | // If we pass too many bytes, it fails. |
| 6100 | assert!(matches!( |
| 6101 | <u8 as TryFromBytes>::try_read_from_bytes(&[0, 0]), |
| 6102 | Err(TryReadError::Size(_)) |
| 6103 | )); |
| 6104 | |
| 6105 | // If we pass an invalid value, it fails. |
| 6106 | assert!(matches!( |
| 6107 | <bool as TryFromBytes>::try_read_from_bytes(&[2]), |
| 6108 | Err(TryReadError::Validity(_)) |
| 6109 | )); |
| 6110 | assert!(matches!( |
| 6111 | <bool as TryFromBytes>::try_read_from_prefix(&[2, 0]), |
| 6112 | Err(TryReadError::Validity(_)) |
| 6113 | )); |
| 6114 | assert!(matches!( |
| 6115 | <bool as TryFromBytes>::try_read_from_suffix(&[0, 2]), |
| 6116 | Err(TryReadError::Validity(_)) |
| 6117 | )); |
| 6118 | |
| 6119 | // Reading from a misaligned buffer should still succeed. Since `AU64`'s |
| 6120 | // alignment is 8, and since we read from two adjacent addresses one |
| 6121 | // byte apart, it is guaranteed that at least one of them (though |
| 6122 | // possibly both) will be misaligned. |
| 6123 | let bytes: [u8; 9] = [0, 0, 0, 0, 0, 0, 0, 0, 0]; |
| 6124 | assert_eq!(<AU64 as TryFromBytes>::try_read_from_bytes(&bytes[..8]), Ok(AU64(0))); |
| 6125 | assert_eq!(<AU64 as TryFromBytes>::try_read_from_bytes(&bytes[1..9]), Ok(AU64(0))); |
| 6126 | |
| 6127 | assert_eq!( |
| 6128 | <AU64 as TryFromBytes>::try_read_from_prefix(&bytes[..8]), |
| 6129 | Ok((AU64(0), &[][..])) |
| 6130 | ); |
| 6131 | assert_eq!( |
| 6132 | <AU64 as TryFromBytes>::try_read_from_prefix(&bytes[1..9]), |
| 6133 | Ok((AU64(0), &[][..])) |
| 6134 | ); |
| 6135 | |
| 6136 | assert_eq!( |
| 6137 | <AU64 as TryFromBytes>::try_read_from_suffix(&bytes[..8]), |
| 6138 | Ok((&[][..], AU64(0))) |
| 6139 | ); |
| 6140 | assert_eq!( |
| 6141 | <AU64 as TryFromBytes>::try_read_from_suffix(&bytes[1..9]), |
| 6142 | Ok((&[][..], AU64(0))) |
| 6143 | ); |
| 6144 | } |
| 6145 | |
| 6146 | #[test ] |
| 6147 | fn test_ref_from_mut_from() { |
| 6148 | // Test `FromBytes::{ref_from, mut_from}{,_prefix,Suffix}` success cases |
| 6149 | // Exhaustive coverage for these methods is covered by the `Ref` tests above, |
| 6150 | // which these helper methods defer to. |
| 6151 | |
| 6152 | let mut buf = |
| 6153 | Align::<[u8; 16], AU64>::new([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); |
| 6154 | |
| 6155 | assert_eq!( |
| 6156 | AU64::ref_from_bytes(&buf.t[8..]).unwrap().0.to_ne_bytes(), |
| 6157 | [8, 9, 10, 11, 12, 13, 14, 15] |
| 6158 | ); |
| 6159 | let suffix = AU64::mut_from_bytes(&mut buf.t[8..]).unwrap(); |
| 6160 | suffix.0 = 0x0101010101010101; |
| 6161 | // The `[u8:9]` is a non-half size of the full buffer, which would catch |
| 6162 | // `from_prefix` having the same implementation as `from_suffix` (issues #506, #511). |
| 6163 | assert_eq!( |
| 6164 | <[u8; 9]>::ref_from_suffix(&buf.t[..]).unwrap(), |
| 6165 | (&[0, 1, 2, 3, 4, 5, 6][..], &[7u8, 1, 1, 1, 1, 1, 1, 1, 1]) |
| 6166 | ); |
| 6167 | let (prefix, suffix) = AU64::mut_from_suffix(&mut buf.t[1..]).unwrap(); |
| 6168 | assert_eq!(prefix, &mut [1u8, 2, 3, 4, 5, 6, 7][..]); |
| 6169 | suffix.0 = 0x0202020202020202; |
| 6170 | let (prefix, suffix) = <[u8; 10]>::mut_from_suffix(&mut buf.t[..]).unwrap(); |
| 6171 | assert_eq!(prefix, &mut [0u8, 1, 2, 3, 4, 5][..]); |
| 6172 | suffix[0] = 42; |
| 6173 | assert_eq!( |
| 6174 | <[u8; 9]>::ref_from_prefix(&buf.t[..]).unwrap(), |
| 6175 | (&[0u8, 1, 2, 3, 4, 5, 42, 7, 2], &[2u8, 2, 2, 2, 2, 2, 2][..]) |
| 6176 | ); |
| 6177 | <[u8; 2]>::mut_from_prefix(&mut buf.t[..]).unwrap().0[1] = 30; |
| 6178 | assert_eq!(buf.t, [0, 30, 2, 3, 4, 5, 42, 7, 2, 2, 2, 2, 2, 2, 2, 2]); |
| 6179 | } |
| 6180 | |
| 6181 | #[test ] |
| 6182 | fn test_ref_from_mut_from_error() { |
| 6183 | // Test `FromBytes::{ref_from, mut_from}{,_prefix,Suffix}` error cases. |
| 6184 | |
| 6185 | // Fail because the buffer is too large. |
| 6186 | let mut buf = Align::<[u8; 16], AU64>::default(); |
| 6187 | // `buf.t` should be aligned to 8, so only the length check should fail. |
| 6188 | assert!(AU64::ref_from_bytes(&buf.t[..]).is_err()); |
| 6189 | assert!(AU64::mut_from_bytes(&mut buf.t[..]).is_err()); |
| 6190 | assert!(<[u8; 8]>::ref_from_bytes(&buf.t[..]).is_err()); |
| 6191 | assert!(<[u8; 8]>::mut_from_bytes(&mut buf.t[..]).is_err()); |
| 6192 | |
| 6193 | // Fail because the buffer is too small. |
| 6194 | let mut buf = Align::<[u8; 4], AU64>::default(); |
| 6195 | assert!(AU64::ref_from_bytes(&buf.t[..]).is_err()); |
| 6196 | assert!(AU64::mut_from_bytes(&mut buf.t[..]).is_err()); |
| 6197 | assert!(<[u8; 8]>::ref_from_bytes(&buf.t[..]).is_err()); |
| 6198 | assert!(<[u8; 8]>::mut_from_bytes(&mut buf.t[..]).is_err()); |
| 6199 | assert!(AU64::ref_from_prefix(&buf.t[..]).is_err()); |
| 6200 | assert!(AU64::mut_from_prefix(&mut buf.t[..]).is_err()); |
| 6201 | assert!(AU64::ref_from_suffix(&buf.t[..]).is_err()); |
| 6202 | assert!(AU64::mut_from_suffix(&mut buf.t[..]).is_err()); |
| 6203 | assert!(<[u8; 8]>::ref_from_prefix(&buf.t[..]).is_err()); |
| 6204 | assert!(<[u8; 8]>::mut_from_prefix(&mut buf.t[..]).is_err()); |
| 6205 | assert!(<[u8; 8]>::ref_from_suffix(&buf.t[..]).is_err()); |
| 6206 | assert!(<[u8; 8]>::mut_from_suffix(&mut buf.t[..]).is_err()); |
| 6207 | |
| 6208 | // Fail because the alignment is insufficient. |
| 6209 | let mut buf = Align::<[u8; 13], AU64>::default(); |
| 6210 | assert!(AU64::ref_from_bytes(&buf.t[1..]).is_err()); |
| 6211 | assert!(AU64::mut_from_bytes(&mut buf.t[1..]).is_err()); |
| 6212 | assert!(AU64::ref_from_bytes(&buf.t[1..]).is_err()); |
| 6213 | assert!(AU64::mut_from_bytes(&mut buf.t[1..]).is_err()); |
| 6214 | assert!(AU64::ref_from_prefix(&buf.t[1..]).is_err()); |
| 6215 | assert!(AU64::mut_from_prefix(&mut buf.t[1..]).is_err()); |
| 6216 | assert!(AU64::ref_from_suffix(&buf.t[..]).is_err()); |
| 6217 | assert!(AU64::mut_from_suffix(&mut buf.t[..]).is_err()); |
| 6218 | } |
| 6219 | |
| 6220 | #[test ] |
| 6221 | fn test_to_methods() { |
| 6222 | /// Run a series of tests by calling `IntoBytes` methods on `t`. |
| 6223 | /// |
| 6224 | /// `bytes` is the expected byte sequence returned from `t.as_bytes()` |
| 6225 | /// before `t` has been modified. `post_mutation` is the expected |
| 6226 | /// sequence returned from `t.as_bytes()` after `t.as_mut_bytes()[0]` |
| 6227 | /// has had its bits flipped (by applying `^= 0xFF`). |
| 6228 | /// |
| 6229 | /// `N` is the size of `t` in bytes. |
| 6230 | fn test<T: FromBytes + IntoBytes + Immutable + Debug + Eq + ?Sized, const N: usize>( |
| 6231 | t: &mut T, |
| 6232 | bytes: &[u8], |
| 6233 | post_mutation: &T, |
| 6234 | ) { |
| 6235 | // Test that we can access the underlying bytes, and that we get the |
| 6236 | // right bytes and the right number of bytes. |
| 6237 | assert_eq!(t.as_bytes(), bytes); |
| 6238 | |
| 6239 | // Test that changes to the underlying byte slices are reflected in |
| 6240 | // the original object. |
| 6241 | t.as_mut_bytes()[0] ^= 0xFF; |
| 6242 | assert_eq!(t, post_mutation); |
| 6243 | t.as_mut_bytes()[0] ^= 0xFF; |
| 6244 | |
| 6245 | // `write_to` rejects slices that are too small or too large. |
| 6246 | assert!(t.write_to(&mut vec![0; N - 1][..]).is_err()); |
| 6247 | assert!(t.write_to(&mut vec![0; N + 1][..]).is_err()); |
| 6248 | |
| 6249 | // `write_to` works as expected. |
| 6250 | let mut bytes = [0; N]; |
| 6251 | assert_eq!(t.write_to(&mut bytes[..]), Ok(())); |
| 6252 | assert_eq!(bytes, t.as_bytes()); |
| 6253 | |
| 6254 | // `write_to_prefix` rejects slices that are too small. |
| 6255 | assert!(t.write_to_prefix(&mut vec![0; N - 1][..]).is_err()); |
| 6256 | |
| 6257 | // `write_to_prefix` works with exact-sized slices. |
| 6258 | let mut bytes = [0; N]; |
| 6259 | assert_eq!(t.write_to_prefix(&mut bytes[..]), Ok(())); |
| 6260 | assert_eq!(bytes, t.as_bytes()); |
| 6261 | |
| 6262 | // `write_to_prefix` works with too-large slices, and any bytes past |
| 6263 | // the prefix aren't modified. |
| 6264 | let mut too_many_bytes = vec![0; N + 1]; |
| 6265 | too_many_bytes[N] = 123; |
| 6266 | assert_eq!(t.write_to_prefix(&mut too_many_bytes[..]), Ok(())); |
| 6267 | assert_eq!(&too_many_bytes[..N], t.as_bytes()); |
| 6268 | assert_eq!(too_many_bytes[N], 123); |
| 6269 | |
| 6270 | // `write_to_suffix` rejects slices that are too small. |
| 6271 | assert!(t.write_to_suffix(&mut vec![0; N - 1][..]).is_err()); |
| 6272 | |
| 6273 | // `write_to_suffix` works with exact-sized slices. |
| 6274 | let mut bytes = [0; N]; |
| 6275 | assert_eq!(t.write_to_suffix(&mut bytes[..]), Ok(())); |
| 6276 | assert_eq!(bytes, t.as_bytes()); |
| 6277 | |
| 6278 | // `write_to_suffix` works with too-large slices, and any bytes |
| 6279 | // before the suffix aren't modified. |
| 6280 | let mut too_many_bytes = vec![0; N + 1]; |
| 6281 | too_many_bytes[0] = 123; |
| 6282 | assert_eq!(t.write_to_suffix(&mut too_many_bytes[..]), Ok(())); |
| 6283 | assert_eq!(&too_many_bytes[1..], t.as_bytes()); |
| 6284 | assert_eq!(too_many_bytes[0], 123); |
| 6285 | } |
| 6286 | |
| 6287 | #[derive (Debug, Eq, PartialEq, FromBytes, IntoBytes, Immutable)] |
| 6288 | #[repr (C)] |
| 6289 | struct Foo { |
| 6290 | a: u32, |
| 6291 | b: Wrapping<u32>, |
| 6292 | c: Option<NonZeroU32>, |
| 6293 | } |
| 6294 | |
| 6295 | let expected_bytes: Vec<u8> = if cfg!(target_endian = "little" ) { |
| 6296 | vec![1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0] |
| 6297 | } else { |
| 6298 | vec![0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0] |
| 6299 | }; |
| 6300 | let post_mutation_expected_a = |
| 6301 | if cfg!(target_endian = "little" ) { 0x00_00_00_FE } else { 0xFF_00_00_01 }; |
| 6302 | test ::<_, 12>( |
| 6303 | &mut Foo { a: 1, b: Wrapping(2), c: None }, |
| 6304 | expected_bytes.as_bytes(), |
| 6305 | &Foo { a: post_mutation_expected_a, b: Wrapping(2), c: None }, |
| 6306 | ); |
| 6307 | test ::<_, 3>( |
| 6308 | Unsized::from_mut_slice(&mut [1, 2, 3]), |
| 6309 | &[1, 2, 3], |
| 6310 | Unsized::from_mut_slice(&mut [0xFE, 2, 3]), |
| 6311 | ); |
| 6312 | } |
| 6313 | |
| 6314 | #[test ] |
| 6315 | fn test_array() { |
| 6316 | #[derive (FromBytes, IntoBytes, Immutable)] |
| 6317 | #[repr (C)] |
| 6318 | struct Foo { |
| 6319 | a: [u16; 33], |
| 6320 | } |
| 6321 | |
| 6322 | let foo = Foo { a: [0xFFFF; 33] }; |
| 6323 | let expected = [0xFFu8; 66]; |
| 6324 | assert_eq!(foo.as_bytes(), &expected[..]); |
| 6325 | } |
| 6326 | |
| 6327 | #[test ] |
| 6328 | fn test_new_zeroed() { |
| 6329 | assert!(!bool::new_zeroed()); |
| 6330 | assert_eq!(u64::new_zeroed(), 0); |
| 6331 | // This test exists in order to exercise unsafe code, especially when |
| 6332 | // running under Miri. |
| 6333 | #[allow (clippy::unit_cmp)] |
| 6334 | { |
| 6335 | assert_eq!(<()>::new_zeroed(), ()); |
| 6336 | } |
| 6337 | } |
| 6338 | |
| 6339 | #[test ] |
| 6340 | fn test_transparent_packed_generic_struct() { |
| 6341 | #[derive (IntoBytes, FromBytes, Unaligned)] |
| 6342 | #[repr (transparent)] |
| 6343 | #[allow (dead_code)] // We never construct this type |
| 6344 | struct Foo<T> { |
| 6345 | _t: T, |
| 6346 | _phantom: PhantomData<()>, |
| 6347 | } |
| 6348 | |
| 6349 | assert_impl_all!(Foo<u32>: FromZeros, FromBytes, IntoBytes); |
| 6350 | assert_impl_all!(Foo<u8>: Unaligned); |
| 6351 | |
| 6352 | #[derive (IntoBytes, FromBytes, Unaligned)] |
| 6353 | #[repr (C, packed)] |
| 6354 | #[allow (dead_code)] // We never construct this type |
| 6355 | struct Bar<T, U> { |
| 6356 | _t: T, |
| 6357 | _u: U, |
| 6358 | } |
| 6359 | |
| 6360 | assert_impl_all!(Bar<u8, AU64>: FromZeros, FromBytes, IntoBytes, Unaligned); |
| 6361 | } |
| 6362 | |
| 6363 | #[cfg (feature = "alloc" )] |
| 6364 | mod alloc { |
| 6365 | use super::*; |
| 6366 | |
| 6367 | #[cfg (zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)] |
| 6368 | #[test ] |
| 6369 | fn test_extend_vec_zeroed() { |
| 6370 | // Test extending when there is an existing allocation. |
| 6371 | let mut v = vec![100u16, 200, 300]; |
| 6372 | FromZeros::extend_vec_zeroed(&mut v, 3).unwrap(); |
| 6373 | assert_eq!(v.len(), 6); |
| 6374 | assert_eq!(&*v, &[100, 200, 300, 0, 0, 0]); |
| 6375 | drop(v); |
| 6376 | |
| 6377 | // Test extending when there is no existing allocation. |
| 6378 | let mut v: Vec<u64> = Vec::new(); |
| 6379 | FromZeros::extend_vec_zeroed(&mut v, 3).unwrap(); |
| 6380 | assert_eq!(v.len(), 3); |
| 6381 | assert_eq!(&*v, &[0, 0, 0]); |
| 6382 | drop(v); |
| 6383 | } |
| 6384 | |
| 6385 | #[cfg (zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)] |
| 6386 | #[test ] |
| 6387 | fn test_extend_vec_zeroed_zst() { |
| 6388 | // Test extending when there is an existing (fake) allocation. |
| 6389 | let mut v = vec![(), (), ()]; |
| 6390 | <()>::extend_vec_zeroed(&mut v, 3).unwrap(); |
| 6391 | assert_eq!(v.len(), 6); |
| 6392 | assert_eq!(&*v, &[(), (), (), (), (), ()]); |
| 6393 | drop(v); |
| 6394 | |
| 6395 | // Test extending when there is no existing (fake) allocation. |
| 6396 | let mut v: Vec<()> = Vec::new(); |
| 6397 | <()>::extend_vec_zeroed(&mut v, 3).unwrap(); |
| 6398 | assert_eq!(&*v, &[(), (), ()]); |
| 6399 | drop(v); |
| 6400 | } |
| 6401 | |
| 6402 | #[cfg (zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)] |
| 6403 | #[test ] |
| 6404 | fn test_insert_vec_zeroed() { |
| 6405 | // Insert at start (no existing allocation). |
| 6406 | let mut v: Vec<u64> = Vec::new(); |
| 6407 | u64::insert_vec_zeroed(&mut v, 0, 2).unwrap(); |
| 6408 | assert_eq!(v.len(), 2); |
| 6409 | assert_eq!(&*v, &[0, 0]); |
| 6410 | drop(v); |
| 6411 | |
| 6412 | // Insert at start. |
| 6413 | let mut v = vec![100u64, 200, 300]; |
| 6414 | u64::insert_vec_zeroed(&mut v, 0, 2).unwrap(); |
| 6415 | assert_eq!(v.len(), 5); |
| 6416 | assert_eq!(&*v, &[0, 0, 100, 200, 300]); |
| 6417 | drop(v); |
| 6418 | |
| 6419 | // Insert at middle. |
| 6420 | let mut v = vec![100u64, 200, 300]; |
| 6421 | u64::insert_vec_zeroed(&mut v, 1, 1).unwrap(); |
| 6422 | assert_eq!(v.len(), 4); |
| 6423 | assert_eq!(&*v, &[100, 0, 200, 300]); |
| 6424 | drop(v); |
| 6425 | |
| 6426 | // Insert at end. |
| 6427 | let mut v = vec![100u64, 200, 300]; |
| 6428 | u64::insert_vec_zeroed(&mut v, 3, 1).unwrap(); |
| 6429 | assert_eq!(v.len(), 4); |
| 6430 | assert_eq!(&*v, &[100, 200, 300, 0]); |
| 6431 | drop(v); |
| 6432 | } |
| 6433 | |
| 6434 | #[cfg (zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)] |
| 6435 | #[test ] |
| 6436 | fn test_insert_vec_zeroed_zst() { |
| 6437 | // Insert at start (no existing fake allocation). |
| 6438 | let mut v: Vec<()> = Vec::new(); |
| 6439 | <()>::insert_vec_zeroed(&mut v, 0, 2).unwrap(); |
| 6440 | assert_eq!(v.len(), 2); |
| 6441 | assert_eq!(&*v, &[(), ()]); |
| 6442 | drop(v); |
| 6443 | |
| 6444 | // Insert at start. |
| 6445 | let mut v = vec![(), (), ()]; |
| 6446 | <()>::insert_vec_zeroed(&mut v, 0, 2).unwrap(); |
| 6447 | assert_eq!(v.len(), 5); |
| 6448 | assert_eq!(&*v, &[(), (), (), (), ()]); |
| 6449 | drop(v); |
| 6450 | |
| 6451 | // Insert at middle. |
| 6452 | let mut v = vec![(), (), ()]; |
| 6453 | <()>::insert_vec_zeroed(&mut v, 1, 1).unwrap(); |
| 6454 | assert_eq!(v.len(), 4); |
| 6455 | assert_eq!(&*v, &[(), (), (), ()]); |
| 6456 | drop(v); |
| 6457 | |
| 6458 | // Insert at end. |
| 6459 | let mut v = vec![(), (), ()]; |
| 6460 | <()>::insert_vec_zeroed(&mut v, 3, 1).unwrap(); |
| 6461 | assert_eq!(v.len(), 4); |
| 6462 | assert_eq!(&*v, &[(), (), (), ()]); |
| 6463 | drop(v); |
| 6464 | } |
| 6465 | |
| 6466 | #[test ] |
| 6467 | fn test_new_box_zeroed() { |
| 6468 | assert_eq!(u64::new_box_zeroed(), Ok(Box::new(0))); |
| 6469 | } |
| 6470 | |
| 6471 | #[test ] |
| 6472 | fn test_new_box_zeroed_array() { |
| 6473 | drop(<[u32; 0x1000]>::new_box_zeroed()); |
| 6474 | } |
| 6475 | |
| 6476 | #[test ] |
| 6477 | fn test_new_box_zeroed_zst() { |
| 6478 | // This test exists in order to exercise unsafe code, especially |
| 6479 | // when running under Miri. |
| 6480 | #[allow (clippy::unit_cmp)] |
| 6481 | { |
| 6482 | assert_eq!(<()>::new_box_zeroed(), Ok(Box::new(()))); |
| 6483 | } |
| 6484 | } |
| 6485 | |
| 6486 | #[test ] |
| 6487 | fn test_new_box_zeroed_with_elems() { |
| 6488 | let mut s: Box<[u64]> = <[u64]>::new_box_zeroed_with_elems(3).unwrap(); |
| 6489 | assert_eq!(s.len(), 3); |
| 6490 | assert_eq!(&*s, &[0, 0, 0]); |
| 6491 | s[1] = 3; |
| 6492 | assert_eq!(&*s, &[0, 3, 0]); |
| 6493 | } |
| 6494 | |
| 6495 | #[test ] |
| 6496 | fn test_new_box_zeroed_with_elems_empty() { |
| 6497 | let s: Box<[u64]> = <[u64]>::new_box_zeroed_with_elems(0).unwrap(); |
| 6498 | assert_eq!(s.len(), 0); |
| 6499 | } |
| 6500 | |
| 6501 | #[test ] |
| 6502 | fn test_new_box_zeroed_with_elems_zst() { |
| 6503 | let mut s: Box<[()]> = <[()]>::new_box_zeroed_with_elems(3).unwrap(); |
| 6504 | assert_eq!(s.len(), 3); |
| 6505 | assert!(s.get(10).is_none()); |
| 6506 | // This test exists in order to exercise unsafe code, especially |
| 6507 | // when running under Miri. |
| 6508 | #[allow (clippy::unit_cmp)] |
| 6509 | { |
| 6510 | assert_eq!(s[1], ()); |
| 6511 | } |
| 6512 | s[2] = (); |
| 6513 | } |
| 6514 | |
| 6515 | #[test ] |
| 6516 | fn test_new_box_zeroed_with_elems_zst_empty() { |
| 6517 | let s: Box<[()]> = <[()]>::new_box_zeroed_with_elems(0).unwrap(); |
| 6518 | assert_eq!(s.len(), 0); |
| 6519 | } |
| 6520 | |
| 6521 | #[test ] |
| 6522 | fn new_box_zeroed_with_elems_errors() { |
| 6523 | assert_eq!(<[u16]>::new_box_zeroed_with_elems(usize::MAX), Err(AllocError)); |
| 6524 | |
| 6525 | let max = <usize as core::convert::TryFrom<_>>::try_from(isize::MAX).unwrap(); |
| 6526 | assert_eq!( |
| 6527 | <[u16]>::new_box_zeroed_with_elems((max / mem::size_of::<u16>()) + 1), |
| 6528 | Err(AllocError) |
| 6529 | ); |
| 6530 | } |
| 6531 | } |
| 6532 | } |
| 6533 | |
| 6534 | #[cfg (kani)] |
| 6535 | mod proofs { |
| 6536 | use super::*; |
| 6537 | |
| 6538 | impl kani::Arbitrary for DstLayout { |
| 6539 | fn any() -> Self { |
| 6540 | let align: NonZeroUsize = kani::any(); |
| 6541 | let size_info: SizeInfo = kani::any(); |
| 6542 | |
| 6543 | kani::assume(align.is_power_of_two()); |
| 6544 | kani::assume(align < DstLayout::THEORETICAL_MAX_ALIGN); |
| 6545 | |
| 6546 | // For testing purposes, we most care about instantiations of |
| 6547 | // `DstLayout` that can correspond to actual Rust types. We use |
| 6548 | // `Layout` to verify that our `DstLayout` satisfies the validity |
| 6549 | // conditions of Rust layouts. |
| 6550 | kani::assume( |
| 6551 | match size_info { |
| 6552 | SizeInfo::Sized { size } => Layout::from_size_align(size, align.get()), |
| 6553 | SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size: _ }) => { |
| 6554 | // `SliceDst`` cannot encode an exact size, but we know |
| 6555 | // it is at least `offset` bytes. |
| 6556 | Layout::from_size_align(offset, align.get()) |
| 6557 | } |
| 6558 | } |
| 6559 | .is_ok(), |
| 6560 | ); |
| 6561 | |
| 6562 | Self { align: align, size_info: size_info } |
| 6563 | } |
| 6564 | } |
| 6565 | |
| 6566 | impl kani::Arbitrary for SizeInfo { |
| 6567 | fn any() -> Self { |
| 6568 | let is_sized: bool = kani::any(); |
| 6569 | |
| 6570 | match is_sized { |
| 6571 | true => { |
| 6572 | let size: usize = kani::any(); |
| 6573 | |
| 6574 | kani::assume(size <= isize::MAX as _); |
| 6575 | |
| 6576 | SizeInfo::Sized { size } |
| 6577 | } |
| 6578 | false => SizeInfo::SliceDst(kani::any()), |
| 6579 | } |
| 6580 | } |
| 6581 | } |
| 6582 | |
| 6583 | impl kani::Arbitrary for TrailingSliceLayout { |
| 6584 | fn any() -> Self { |
| 6585 | let elem_size: usize = kani::any(); |
| 6586 | let offset: usize = kani::any(); |
| 6587 | |
| 6588 | kani::assume(elem_size < isize::MAX as _); |
| 6589 | kani::assume(offset < isize::MAX as _); |
| 6590 | |
| 6591 | TrailingSliceLayout { elem_size, offset } |
| 6592 | } |
| 6593 | } |
| 6594 | |
| 6595 | #[kani::proof] |
| 6596 | fn prove_dst_layout_extend() { |
| 6597 | use crate::util::{max, min, padding_needed_for}; |
| 6598 | |
| 6599 | let base: DstLayout = kani::any(); |
| 6600 | let field: DstLayout = kani::any(); |
| 6601 | let packed: Option<NonZeroUsize> = kani::any(); |
| 6602 | |
| 6603 | if let Some(max_align) = packed { |
| 6604 | kani::assume(max_align.is_power_of_two()); |
| 6605 | kani::assume(base.align <= max_align); |
| 6606 | } |
| 6607 | |
| 6608 | // The base can only be extended if it's sized. |
| 6609 | kani::assume(matches!(base.size_info, SizeInfo::Sized { .. })); |
| 6610 | let base_size = if let SizeInfo::Sized { size } = base.size_info { |
| 6611 | size |
| 6612 | } else { |
| 6613 | unreachable!(); |
| 6614 | }; |
| 6615 | |
| 6616 | // Under the above conditions, `DstLayout::extend` will not panic. |
| 6617 | let composite = base.extend(field, packed); |
| 6618 | |
| 6619 | // The field's alignment is clamped by `max_align` (i.e., the |
| 6620 | // `packed` attribute, if any) [1]. |
| 6621 | // |
| 6622 | // [1] Per https://doc.rust-lang.org/reference/type-layout.html#the-alignment-modifiers: |
| 6623 | // |
| 6624 | // The alignments of each field, for the purpose of positioning |
| 6625 | // fields, is the smaller of the specified alignment and the |
| 6626 | // alignment of the field's type. |
| 6627 | let field_align = min(field.align, packed.unwrap_or(DstLayout::THEORETICAL_MAX_ALIGN)); |
| 6628 | |
| 6629 | // The struct's alignment is the maximum of its previous alignment and |
| 6630 | // `field_align`. |
| 6631 | assert_eq!(composite.align, max(base.align, field_align)); |
| 6632 | |
| 6633 | // Compute the minimum amount of inter-field padding needed to |
| 6634 | // satisfy the field's alignment, and offset of the trailing field. |
| 6635 | // [1] |
| 6636 | // |
| 6637 | // [1] Per https://doc.rust-lang.org/reference/type-layout.html#the-alignment-modifiers: |
| 6638 | // |
| 6639 | // Inter-field padding is guaranteed to be the minimum required in |
| 6640 | // order to satisfy each field's (possibly altered) alignment. |
| 6641 | let padding = padding_needed_for(base_size, field_align); |
| 6642 | let offset = base_size + padding; |
| 6643 | |
| 6644 | // For testing purposes, we'll also construct `alloc::Layout` |
| 6645 | // stand-ins for `DstLayout`, and show that `extend` behaves |
| 6646 | // comparably on both types. |
| 6647 | let base_analog = Layout::from_size_align(base_size, base.align.get()).unwrap(); |
| 6648 | |
| 6649 | match field.size_info { |
| 6650 | SizeInfo::Sized { size: field_size } => { |
| 6651 | if let SizeInfo::Sized { size: composite_size } = composite.size_info { |
| 6652 | // If the trailing field is sized, the resulting layout will |
| 6653 | // be sized. Its size will be the sum of the preceding |
| 6654 | // layout, the size of the new field, and the size of |
| 6655 | // inter-field padding between the two. |
| 6656 | assert_eq!(composite_size, offset + field_size); |
| 6657 | |
| 6658 | let field_analog = |
| 6659 | Layout::from_size_align(field_size, field_align.get()).unwrap(); |
| 6660 | |
| 6661 | if let Ok((actual_composite, actual_offset)) = base_analog.extend(field_analog) |
| 6662 | { |
| 6663 | assert_eq!(actual_offset, offset); |
| 6664 | assert_eq!(actual_composite.size(), composite_size); |
| 6665 | assert_eq!(actual_composite.align(), composite.align.get()); |
| 6666 | } else { |
| 6667 | // An error here reflects that composite of `base` |
| 6668 | // and `field` cannot correspond to a real Rust type |
| 6669 | // fragment, because such a fragment would violate |
| 6670 | // the basic invariants of a valid Rust layout. At |
| 6671 | // the time of writing, `DstLayout` is a little more |
| 6672 | // permissive than `Layout`, so we don't assert |
| 6673 | // anything in this branch (e.g., unreachability). |
| 6674 | } |
| 6675 | } else { |
| 6676 | panic!("The composite of two sized layouts must be sized." ) |
| 6677 | } |
| 6678 | } |
| 6679 | SizeInfo::SliceDst(TrailingSliceLayout { |
| 6680 | offset: field_offset, |
| 6681 | elem_size: field_elem_size, |
| 6682 | }) => { |
| 6683 | if let SizeInfo::SliceDst(TrailingSliceLayout { |
| 6684 | offset: composite_offset, |
| 6685 | elem_size: composite_elem_size, |
| 6686 | }) = composite.size_info |
| 6687 | { |
| 6688 | // The offset of the trailing slice component is the sum |
| 6689 | // of the offset of the trailing field and the trailing |
| 6690 | // slice offset within that field. |
| 6691 | assert_eq!(composite_offset, offset + field_offset); |
| 6692 | // The elem size is unchanged. |
| 6693 | assert_eq!(composite_elem_size, field_elem_size); |
| 6694 | |
| 6695 | let field_analog = |
| 6696 | Layout::from_size_align(field_offset, field_align.get()).unwrap(); |
| 6697 | |
| 6698 | if let Ok((actual_composite, actual_offset)) = base_analog.extend(field_analog) |
| 6699 | { |
| 6700 | assert_eq!(actual_offset, offset); |
| 6701 | assert_eq!(actual_composite.size(), composite_offset); |
| 6702 | assert_eq!(actual_composite.align(), composite.align.get()); |
| 6703 | } else { |
| 6704 | // An error here reflects that composite of `base` |
| 6705 | // and `field` cannot correspond to a real Rust type |
| 6706 | // fragment, because such a fragment would violate |
| 6707 | // the basic invariants of a valid Rust layout. At |
| 6708 | // the time of writing, `DstLayout` is a little more |
| 6709 | // permissive than `Layout`, so we don't assert |
| 6710 | // anything in this branch (e.g., unreachability). |
| 6711 | } |
| 6712 | } else { |
| 6713 | panic!("The extension of a layout with a DST must result in a DST." ) |
| 6714 | } |
| 6715 | } |
| 6716 | } |
| 6717 | } |
| 6718 | |
| 6719 | #[kani::proof] |
| 6720 | #[kani::should_panic] |
| 6721 | fn prove_dst_layout_extend_dst_panics() { |
| 6722 | let base: DstLayout = kani::any(); |
| 6723 | let field: DstLayout = kani::any(); |
| 6724 | let packed: Option<NonZeroUsize> = kani::any(); |
| 6725 | |
| 6726 | if let Some(max_align) = packed { |
| 6727 | kani::assume(max_align.is_power_of_two()); |
| 6728 | kani::assume(base.align <= max_align); |
| 6729 | } |
| 6730 | |
| 6731 | kani::assume(matches!(base.size_info, SizeInfo::SliceDst(..))); |
| 6732 | |
| 6733 | let _ = base.extend(field, packed); |
| 6734 | } |
| 6735 | |
| 6736 | #[kani::proof] |
| 6737 | fn prove_dst_layout_pad_to_align() { |
| 6738 | use crate::util::padding_needed_for; |
| 6739 | |
| 6740 | let layout: DstLayout = kani::any(); |
| 6741 | |
| 6742 | let padded: DstLayout = layout.pad_to_align(); |
| 6743 | |
| 6744 | // Calling `pad_to_align` does not alter the `DstLayout`'s alignment. |
| 6745 | assert_eq!(padded.align, layout.align); |
| 6746 | |
| 6747 | if let SizeInfo::Sized { size: unpadded_size } = layout.size_info { |
| 6748 | if let SizeInfo::Sized { size: padded_size } = padded.size_info { |
| 6749 | // If the layout is sized, it will remain sized after padding is |
| 6750 | // added. Its sum will be its unpadded size and the size of the |
| 6751 | // trailing padding needed to satisfy its alignment |
| 6752 | // requirements. |
| 6753 | let padding = padding_needed_for(unpadded_size, layout.align); |
| 6754 | assert_eq!(padded_size, unpadded_size + padding); |
| 6755 | |
| 6756 | // Prove that calling `DstLayout::pad_to_align` behaves |
| 6757 | // identically to `Layout::pad_to_align`. |
| 6758 | let layout_analog = |
| 6759 | Layout::from_size_align(unpadded_size, layout.align.get()).unwrap(); |
| 6760 | let padded_analog = layout_analog.pad_to_align(); |
| 6761 | assert_eq!(padded_analog.align(), layout.align.get()); |
| 6762 | assert_eq!(padded_analog.size(), padded_size); |
| 6763 | } else { |
| 6764 | panic!("The padding of a sized layout must result in a sized layout." ) |
| 6765 | } |
| 6766 | } else { |
| 6767 | // If the layout is a DST, padding cannot be statically added. |
| 6768 | assert_eq!(padded.size_info, layout.size_info); |
| 6769 | } |
| 6770 | } |
| 6771 | } |
| 6772 | |