| 1 | // Copyright 2024 The Fuchsia Authors |
| 2 | // |
| 3 | // Licensed under the 2-Clause BSD License <LICENSE-BSD or |
| 4 | // https://opensource.org/license/bsd-2-clause>, Apache License, Version 2.0 |
| 5 | // <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT |
| 6 | // license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option. |
| 7 | // This file may not be copied, modified, or distributed except according to |
| 8 | // those terms. |
| 9 | |
| 10 | use super::*; |
| 11 | |
| 12 | mod def { |
| 13 | use core::marker::PhantomData; |
| 14 | |
| 15 | use crate::{ |
| 16 | ByteSlice, ByteSliceMut, CloneableByteSlice, CopyableByteSlice, IntoByteSlice, |
| 17 | IntoByteSliceMut, |
| 18 | }; |
| 19 | |
| 20 | /// A typed reference derived from a byte slice. |
| 21 | /// |
| 22 | /// A `Ref<B, T>` is a reference to a `T` which is stored in a byte slice, `B`. |
| 23 | /// Unlike a native reference (`&T` or `&mut T`), `Ref<B, T>` has the same |
| 24 | /// mutability as the byte slice it was constructed from (`B`). |
| 25 | /// |
| 26 | /// # Examples |
| 27 | /// |
| 28 | /// `Ref` can be used to treat a sequence of bytes as a structured type, and |
| 29 | /// to read and write the fields of that type as if the byte slice reference |
| 30 | /// were simply a reference to that type. |
| 31 | /// |
| 32 | /// ```rust |
| 33 | /// use zerocopy::*; |
| 34 | /// # use zerocopy_derive::*; |
| 35 | /// |
| 36 | /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable, Unaligned)] |
| 37 | /// #[repr(C)] |
| 38 | /// struct UdpHeader { |
| 39 | /// src_port: [u8; 2], |
| 40 | /// dst_port: [u8; 2], |
| 41 | /// length: [u8; 2], |
| 42 | /// checksum: [u8; 2], |
| 43 | /// } |
| 44 | /// |
| 45 | /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable, Unaligned)] |
| 46 | /// #[repr(C, packed)] |
| 47 | /// struct UdpPacket { |
| 48 | /// header: UdpHeader, |
| 49 | /// body: [u8], |
| 50 | /// } |
| 51 | /// |
| 52 | /// impl UdpPacket { |
| 53 | /// pub fn parse<B: ByteSlice>(bytes: B) -> Option<Ref<B, UdpPacket>> { |
| 54 | /// Ref::from_bytes(bytes).ok() |
| 55 | /// } |
| 56 | /// } |
| 57 | /// ``` |
| 58 | pub struct Ref<B, T: ?Sized>( |
| 59 | // INVARIANTS: The referent (via `.deref`, `.deref_mut`, `.into`) byte |
| 60 | // slice is aligned to `T`'s alignment and its size corresponds to a |
| 61 | // valid size for `T`. |
| 62 | B, |
| 63 | PhantomData<T>, |
| 64 | ); |
| 65 | |
| 66 | impl<B, T: ?Sized> Ref<B, T> { |
| 67 | /// Constructs a new `Ref`. |
| 68 | /// |
| 69 | /// # Safety |
| 70 | /// |
| 71 | /// `bytes` dereferences (via [`deref`], [`deref_mut`], and [`into`]) to |
| 72 | /// a byte slice which is aligned to `T`'s alignment and whose size is a |
| 73 | /// valid size for `T`. |
| 74 | /// |
| 75 | /// [`deref`]: core::ops::Deref::deref |
| 76 | /// [`deref_mut`]: core::ops::DerefMut::deref_mut |
| 77 | /// [`into`]: core::convert::Into::into |
| 78 | pub(crate) unsafe fn new_unchecked(bytes: B) -> Ref<B, T> { |
| 79 | // INVARIANTS: The caller has promised that `bytes`'s referent is |
| 80 | // validly-aligned and has a valid size. |
| 81 | Ref(bytes, PhantomData) |
| 82 | } |
| 83 | } |
| 84 | |
| 85 | impl<B: ByteSlice, T: ?Sized> Ref<B, T> { |
| 86 | /// Access the byte slice as a [`ByteSlice`]. |
| 87 | /// |
| 88 | /// # Safety |
| 89 | /// |
| 90 | /// The caller promises not to call methods on the returned |
| 91 | /// [`ByteSlice`] other than `ByteSlice` methods (for example, via |
| 92 | /// `Any::downcast_ref`). |
| 93 | /// |
| 94 | /// `as_byte_slice` promises to return a `ByteSlice` whose referent is |
| 95 | /// validly-aligned for `T` and has a valid size for `T`. |
| 96 | pub(crate) unsafe fn as_byte_slice(&self) -> &impl ByteSlice { |
| 97 | // INVARIANTS: The caller promises not to call methods other than |
| 98 | // those on `ByteSlice`. Since `B: ByteSlice`, dereference stability |
| 99 | // guarantees that calling `ByteSlice` methods will not change the |
| 100 | // address or length of `self.0`'s referent. |
| 101 | // |
| 102 | // SAFETY: By invariant on `self.0`, the alignment and size |
| 103 | // post-conditions are upheld. |
| 104 | &self.0 |
| 105 | } |
| 106 | } |
| 107 | |
| 108 | impl<B: ByteSliceMut, T: ?Sized> Ref<B, T> { |
| 109 | /// Access the byte slice as a [`ByteSliceMut`]. |
| 110 | /// |
| 111 | /// # Safety |
| 112 | /// |
| 113 | /// The caller promises not to call methods on the returned |
| 114 | /// [`ByteSliceMut`] other than `ByteSliceMut` methods (for example, via |
| 115 | /// `Any::downcast_mut`). |
| 116 | /// |
| 117 | /// `as_byte_slice` promises to return a `ByteSlice` whose referent is |
| 118 | /// validly-aligned for `T` and has a valid size for `T`. |
| 119 | pub(crate) unsafe fn as_byte_slice_mut(&mut self) -> &mut impl ByteSliceMut { |
| 120 | // INVARIANTS: The caller promises not to call methods other than |
| 121 | // those on `ByteSliceMut`. Since `B: ByteSlice`, dereference |
| 122 | // stability guarantees that calling `ByteSlice` methods will not |
| 123 | // change the address or length of `self.0`'s referent. |
| 124 | // |
| 125 | // SAFETY: By invariant on `self.0`, the alignment and size |
| 126 | // post-conditions are upheld. |
| 127 | &mut self.0 |
| 128 | } |
| 129 | } |
| 130 | |
| 131 | impl<'a, B: IntoByteSlice<'a>, T: ?Sized> Ref<B, T> { |
| 132 | /// Access the byte slice as an [`IntoByteSlice`]. |
| 133 | /// |
| 134 | /// # Safety |
| 135 | /// |
| 136 | /// The caller promises not to call methods on the returned |
| 137 | /// [`IntoByteSlice`] other than `IntoByteSlice` methods (for example, |
| 138 | /// via `Any::downcast_ref`). |
| 139 | /// |
| 140 | /// `as_byte_slice` promises to return a `ByteSlice` whose referent is |
| 141 | /// validly-aligned for `T` and has a valid size for `T`. |
| 142 | pub(crate) unsafe fn into_byte_slice(self) -> impl IntoByteSlice<'a> { |
| 143 | // INVARIANTS: The caller promises not to call methods other than |
| 144 | // those on `IntoByteSlice`. Since `B: ByteSlice`, dereference |
| 145 | // stability guarantees that calling `ByteSlice` methods will not |
| 146 | // change the address or length of `self.0`'s referent. |
| 147 | // |
| 148 | // SAFETY: By invariant on `self.0`, the alignment and size |
| 149 | // post-conditions are upheld. |
| 150 | self.0 |
| 151 | } |
| 152 | } |
| 153 | |
| 154 | impl<'a, B: IntoByteSliceMut<'a>, T: ?Sized> Ref<B, T> { |
| 155 | /// Access the byte slice as an [`IntoByteSliceMut`]. |
| 156 | /// |
| 157 | /// # Safety |
| 158 | /// |
| 159 | /// The caller promises not to call methods on the returned |
| 160 | /// [`IntoByteSliceMut`] other than `IntoByteSliceMut` methods (for |
| 161 | /// example, via `Any::downcast_mut`). |
| 162 | /// |
| 163 | /// `as_byte_slice` promises to return a `ByteSlice` whose referent is |
| 164 | /// validly-aligned for `T` and has a valid size for `T`. |
| 165 | pub(crate) unsafe fn into_byte_slice_mut(self) -> impl IntoByteSliceMut<'a> { |
| 166 | // INVARIANTS: The caller promises not to call methods other than |
| 167 | // those on `IntoByteSliceMut`. Since `B: ByteSlice`, dereference |
| 168 | // stability guarantees that calling `ByteSlice` methods will not |
| 169 | // change the address or length of `self.0`'s referent. |
| 170 | // |
| 171 | // SAFETY: By invariant on `self.0`, the alignment and size |
| 172 | // post-conditions are upheld. |
| 173 | self.0 |
| 174 | } |
| 175 | } |
| 176 | |
| 177 | impl<B: CloneableByteSlice + Clone, T: ?Sized> Clone for Ref<B, T> { |
| 178 | #[inline ] |
| 179 | fn clone(&self) -> Ref<B, T> { |
| 180 | // INVARIANTS: Since `B: CloneableByteSlice`, `self.0.clone()` has |
| 181 | // the same address and length as `self.0`. Since `self.0` upholds |
| 182 | // the field invariants, so does `self.0.clone()`. |
| 183 | Ref(self.0.clone(), PhantomData) |
| 184 | } |
| 185 | } |
| 186 | |
| 187 | // INVARIANTS: Since `B: CopyableByteSlice`, the copied `Ref`'s `.0` has the |
| 188 | // same address and length as the original `Ref`'s `.0`. Since the original |
| 189 | // upholds the field invariants, so does the copy. |
| 190 | impl<B: CopyableByteSlice + Copy, T: ?Sized> Copy for Ref<B, T> {} |
| 191 | } |
| 192 | |
| 193 | #[allow (unreachable_pub)] // This is a false positive on our MSRV toolchain. |
| 194 | pub use def::Ref; |
| 195 | |
| 196 | impl<B, T> Ref<B, T> |
| 197 | where |
| 198 | B: ByteSlice, |
| 199 | { |
| 200 | #[must_use = "has no side effects" ] |
| 201 | pub(crate) fn sized_from(bytes: B) -> Result<Ref<B, T>, CastError<B, T>> { |
| 202 | if bytes.len() != mem::size_of::<T>() { |
| 203 | return Err(SizeError::new(src:bytes).into()); |
| 204 | } |
| 205 | if let Err(err: AlignmentError<(), T>) = util::validate_aligned_to::<_, T>(bytes.deref()) { |
| 206 | return Err(err.with_src(new_src:bytes).into()); |
| 207 | } |
| 208 | |
| 209 | // SAFETY: We just validated size and alignment. |
| 210 | Ok(unsafe { Ref::new_unchecked(bytes) }) |
| 211 | } |
| 212 | } |
| 213 | |
| 214 | impl<B, T> Ref<B, T> |
| 215 | where |
| 216 | B: SplitByteSlice, |
| 217 | { |
| 218 | #[must_use = "has no side effects" ] |
| 219 | pub(crate) fn sized_from_prefix(bytes: B) -> Result<(Ref<B, T>, B), CastError<B, T>> { |
| 220 | if bytes.len() < mem::size_of::<T>() { |
| 221 | return Err(SizeError::new(bytes).into()); |
| 222 | } |
| 223 | if let Err(err) = util::validate_aligned_to::<_, T>(bytes.deref()) { |
| 224 | return Err(err.with_src(bytes).into()); |
| 225 | } |
| 226 | let (bytes, suffix) = |
| 227 | bytes.split_at(mem::size_of::<T>()).map_err(|b| SizeError::new(b).into())?; |
| 228 | // SAFETY: We just validated alignment and that `bytes` is at least as |
| 229 | // large as `T`. `bytes.split_at(mem::size_of::<T>())?` ensures that the |
| 230 | // new `bytes` is exactly the size of `T`. By safety postcondition on |
| 231 | // `SplitByteSlice::split_at` we can rely on `split_at` to produce the |
| 232 | // correct `bytes` and `suffix`. |
| 233 | let r = unsafe { Ref::new_unchecked(bytes) }; |
| 234 | Ok((r, suffix)) |
| 235 | } |
| 236 | |
| 237 | #[must_use = "has no side effects" ] |
| 238 | pub(crate) fn sized_from_suffix(bytes: B) -> Result<(B, Ref<B, T>), CastError<B, T>> { |
| 239 | let bytes_len = bytes.len(); |
| 240 | let split_at = if let Some(split_at) = bytes_len.checked_sub(mem::size_of::<T>()) { |
| 241 | split_at |
| 242 | } else { |
| 243 | return Err(SizeError::new(bytes).into()); |
| 244 | }; |
| 245 | let (prefix, bytes) = bytes.split_at(split_at).map_err(|b| SizeError::new(b).into())?; |
| 246 | if let Err(err) = util::validate_aligned_to::<_, T>(bytes.deref()) { |
| 247 | return Err(err.with_src(bytes).into()); |
| 248 | } |
| 249 | // SAFETY: Since `split_at` is defined as `bytes_len - size_of::<T>()`, |
| 250 | // the `bytes` which results from `let (prefix, bytes) = |
| 251 | // bytes.split_at(split_at)?` has length `size_of::<T>()`. After |
| 252 | // constructing `bytes`, we validate that it has the proper alignment. |
| 253 | // By safety postcondition on `SplitByteSlice::split_at` we can rely on |
| 254 | // `split_at` to produce the correct `prefix` and `bytes`. |
| 255 | let r = unsafe { Ref::new_unchecked(bytes) }; |
| 256 | Ok((prefix, r)) |
| 257 | } |
| 258 | } |
| 259 | |
| 260 | impl<B, T> Ref<B, T> |
| 261 | where |
| 262 | B: ByteSlice, |
| 263 | T: KnownLayout + Immutable + ?Sized, |
| 264 | { |
| 265 | /// Constructs a `Ref` from a byte slice. |
| 266 | /// |
| 267 | /// If the length of `source` is not a [valid size of `T`][valid-size], or |
| 268 | /// if `source` is not appropriately aligned for `T`, this returns `Err`. If |
| 269 | /// [`T: Unaligned`][t-unaligned], you can [infallibly discard the alignment |
| 270 | /// error][size-error-from]. |
| 271 | /// |
| 272 | /// `T` may be a sized type, a slice, or a [slice DST][slice-dst]. |
| 273 | /// |
| 274 | /// [valid-size]: crate::KnownLayout#what-is-a-valid-size |
| 275 | /// [t-unaligned]: Unaligned |
| 276 | /// [size-error-from]: error/struct.SizeError.html#method.from-1 |
| 277 | /// [slice-dst]: KnownLayout#dynamically-sized-types |
| 278 | /// |
| 279 | /// # Compile-Time Assertions |
| 280 | /// |
| 281 | /// This method cannot yet be used on unsized types whose dynamically-sized |
| 282 | /// component is zero-sized. Attempting to use this method on such types |
| 283 | /// results in a compile-time assertion error; e.g.: |
| 284 | /// |
| 285 | /// ```compile_fail,E0080 |
| 286 | /// use zerocopy::*; |
| 287 | /// # use zerocopy_derive::*; |
| 288 | /// |
| 289 | /// #[derive(Immutable, KnownLayout)] |
| 290 | /// #[repr(C)] |
| 291 | /// struct ZSTy { |
| 292 | /// leading_sized: u16, |
| 293 | /// trailing_dst: [()], |
| 294 | /// } |
| 295 | /// |
| 296 | /// let _ = Ref::<_, ZSTy>::from_bytes(&b"UU" [..]); // ⚠ Compile Error! |
| 297 | /// ``` |
| 298 | #[must_use = "has no side effects" ] |
| 299 | #[inline ] |
| 300 | pub fn from_bytes(source: B) -> Result<Ref<B, T>, CastError<B, T>> { |
| 301 | static_assert_dst_is_not_zst!(T); |
| 302 | if let Err(e) = |
| 303 | Ptr::from_ref(source.deref()).try_cast_into_no_leftover::<T, BecauseImmutable>(None) |
| 304 | { |
| 305 | return Err(e.with_src(()).with_src(source)); |
| 306 | } |
| 307 | // SAFETY: `try_cast_into_no_leftover` validates size and alignment. |
| 308 | Ok(unsafe { Ref::new_unchecked(source) }) |
| 309 | } |
| 310 | } |
| 311 | |
| 312 | impl<B, T> Ref<B, T> |
| 313 | where |
| 314 | B: SplitByteSlice, |
| 315 | T: KnownLayout + Immutable + ?Sized, |
| 316 | { |
| 317 | /// Constructs a `Ref` from the prefix of a byte slice. |
| 318 | /// |
| 319 | /// This method computes the [largest possible size of `T`][valid-size] that |
| 320 | /// can fit in the leading bytes of `source`, then attempts to return both a |
| 321 | /// `Ref` to those bytes, and a reference to the remaining bytes. If there |
| 322 | /// are insufficient bytes, or if `source` is not appropriately aligned, |
| 323 | /// this returns `Err`. If [`T: Unaligned`][t-unaligned], you can |
| 324 | /// [infallibly discard the alignment error][size-error-from]. |
| 325 | /// |
| 326 | /// `T` may be a sized type, a slice, or a [slice DST][slice-dst]. |
| 327 | /// |
| 328 | /// [valid-size]: crate::KnownLayout#what-is-a-valid-size |
| 329 | /// [t-unaligned]: Unaligned |
| 330 | /// [size-error-from]: error/struct.SizeError.html#method.from-1 |
| 331 | /// [slice-dst]: KnownLayout#dynamically-sized-types |
| 332 | /// |
| 333 | /// # Compile-Time Assertions |
| 334 | /// |
| 335 | /// This method cannot yet be used on unsized types whose dynamically-sized |
| 336 | /// component is zero-sized. Attempting to use this method on such types |
| 337 | /// results in a compile-time assertion error; e.g.: |
| 338 | /// |
| 339 | /// ```compile_fail,E0080 |
| 340 | /// use zerocopy::*; |
| 341 | /// # use zerocopy_derive::*; |
| 342 | /// |
| 343 | /// #[derive(Immutable, KnownLayout)] |
| 344 | /// #[repr(C)] |
| 345 | /// struct ZSTy { |
| 346 | /// leading_sized: u16, |
| 347 | /// trailing_dst: [()], |
| 348 | /// } |
| 349 | /// |
| 350 | /// let _ = Ref::<_, ZSTy>::from_prefix(&b"UU" [..]); // ⚠ Compile Error! |
| 351 | /// ``` |
| 352 | #[must_use = "has no side effects" ] |
| 353 | #[inline ] |
| 354 | pub fn from_prefix(source: B) -> Result<(Ref<B, T>, B), CastError<B, T>> { |
| 355 | static_assert_dst_is_not_zst!(T); |
| 356 | let remainder = match Ptr::from_ref(source.deref()) |
| 357 | .try_cast_into::<T, BecauseImmutable>(CastType::Prefix, None) |
| 358 | { |
| 359 | Ok((_, remainder)) => remainder, |
| 360 | Err(e) => { |
| 361 | return Err(e.with_src(()).with_src(source)); |
| 362 | } |
| 363 | }; |
| 364 | |
| 365 | // SAFETY: `remainder` is constructed as a subset of `source`, and so it |
| 366 | // cannot have a larger size than `source`. Both of their `len` methods |
| 367 | // measure bytes (`source` deref's to `[u8]`, and `remainder` is a |
| 368 | // `Ptr<[u8]>`), so `source.len() >= remainder.len()`. Thus, this cannot |
| 369 | // underflow. |
| 370 | #[allow (unstable_name_collisions, clippy::incompatible_msrv)] |
| 371 | let split_at = unsafe { source.len().unchecked_sub(remainder.len()) }; |
| 372 | let (bytes, suffix) = source.split_at(split_at).map_err(|b| SizeError::new(b).into())?; |
| 373 | // SAFETY: `try_cast_into` validates size and alignment, and returns a |
| 374 | // `split_at` that indicates how many bytes of `source` correspond to a |
| 375 | // valid `T`. By safety postcondition on `SplitByteSlice::split_at` we |
| 376 | // can rely on `split_at` to produce the correct `source` and `suffix`. |
| 377 | let r = unsafe { Ref::new_unchecked(bytes) }; |
| 378 | Ok((r, suffix)) |
| 379 | } |
| 380 | |
| 381 | /// Constructs a `Ref` from the suffix of a byte slice. |
| 382 | /// |
| 383 | /// This method computes the [largest possible size of `T`][valid-size] that |
| 384 | /// can fit in the trailing bytes of `source`, then attempts to return both |
| 385 | /// a `Ref` to those bytes, and a reference to the preceding bytes. If there |
| 386 | /// are insufficient bytes, or if that suffix of `source` is not |
| 387 | /// appropriately aligned, this returns `Err`. If [`T: |
| 388 | /// Unaligned`][t-unaligned], you can [infallibly discard the alignment |
| 389 | /// error][size-error-from]. |
| 390 | /// |
| 391 | /// `T` may be a sized type, a slice, or a [slice DST][slice-dst]. |
| 392 | /// |
| 393 | /// [valid-size]: crate::KnownLayout#what-is-a-valid-size |
| 394 | /// [t-unaligned]: Unaligned |
| 395 | /// [size-error-from]: error/struct.SizeError.html#method.from-1 |
| 396 | /// [slice-dst]: KnownLayout#dynamically-sized-types |
| 397 | /// |
| 398 | /// # Compile-Time Assertions |
| 399 | /// |
| 400 | /// This method cannot yet be used on unsized types whose dynamically-sized |
| 401 | /// component is zero-sized. Attempting to use this method on such types |
| 402 | /// results in a compile-time assertion error; e.g.: |
| 403 | /// |
| 404 | /// ```compile_fail,E0080 |
| 405 | /// use zerocopy::*; |
| 406 | /// # use zerocopy_derive::*; |
| 407 | /// |
| 408 | /// #[derive(Immutable, KnownLayout)] |
| 409 | /// #[repr(C)] |
| 410 | /// struct ZSTy { |
| 411 | /// leading_sized: u16, |
| 412 | /// trailing_dst: [()], |
| 413 | /// } |
| 414 | /// |
| 415 | /// let _ = Ref::<_, ZSTy>::from_suffix(&b"UU" [..]); // ⚠ Compile Error! |
| 416 | /// ``` |
| 417 | #[must_use = "has no side effects" ] |
| 418 | #[inline ] |
| 419 | pub fn from_suffix(source: B) -> Result<(B, Ref<B, T>), CastError<B, T>> { |
| 420 | static_assert_dst_is_not_zst!(T); |
| 421 | let remainder = match Ptr::from_ref(source.deref()) |
| 422 | .try_cast_into::<T, BecauseImmutable>(CastType::Suffix, None) |
| 423 | { |
| 424 | Ok((_, remainder)) => remainder, |
| 425 | Err(e) => { |
| 426 | let e = e.with_src(()); |
| 427 | return Err(e.with_src(source)); |
| 428 | } |
| 429 | }; |
| 430 | |
| 431 | let split_at = remainder.len(); |
| 432 | let (prefix, bytes) = source.split_at(split_at).map_err(|b| SizeError::new(b).into())?; |
| 433 | // SAFETY: `try_cast_into` validates size and alignment, and returns a |
| 434 | // `split_at` that indicates how many bytes of `source` correspond to a |
| 435 | // valid `T`. By safety postcondition on `SplitByteSlice::split_at` we |
| 436 | // can rely on `split_at` to produce the correct `prefix` and `bytes`. |
| 437 | let r = unsafe { Ref::new_unchecked(bytes) }; |
| 438 | Ok((prefix, r)) |
| 439 | } |
| 440 | } |
| 441 | |
| 442 | impl<B, T> Ref<B, T> |
| 443 | where |
| 444 | B: ByteSlice, |
| 445 | T: KnownLayout<PointerMetadata = usize> + Immutable + ?Sized, |
| 446 | { |
| 447 | /// Constructs a `Ref` from the given bytes with DST length equal to `count` |
| 448 | /// without copying. |
| 449 | /// |
| 450 | /// This method attempts to return a `Ref` to the prefix of `source` |
| 451 | /// interpreted as a `T` with `count` trailing elements, and a reference to |
| 452 | /// the remaining bytes. If the length of `source` is not equal to the size |
| 453 | /// of `Self` with `count` elements, or if `source` is not appropriately |
| 454 | /// aligned, this returns `Err`. If [`T: Unaligned`][t-unaligned], you can |
| 455 | /// [infallibly discard the alignment error][size-error-from]. |
| 456 | /// |
| 457 | /// [t-unaligned]: Unaligned |
| 458 | /// [size-error-from]: error/struct.SizeError.html#method.from-1 |
| 459 | /// |
| 460 | /// # Compile-Time Assertions |
| 461 | /// |
| 462 | /// This method cannot yet be used on unsized types whose dynamically-sized |
| 463 | /// component is zero-sized. Attempting to use this method on such types |
| 464 | /// results in a compile-time assertion error; e.g.: |
| 465 | /// |
| 466 | /// ```compile_fail,E0080 |
| 467 | /// use zerocopy::*; |
| 468 | /// # use zerocopy_derive::*; |
| 469 | /// |
| 470 | /// #[derive(Immutable, KnownLayout)] |
| 471 | /// #[repr(C)] |
| 472 | /// struct ZSTy { |
| 473 | /// leading_sized: u16, |
| 474 | /// trailing_dst: [()], |
| 475 | /// } |
| 476 | /// |
| 477 | /// let _ = Ref::<_, ZSTy>::from_bytes_with_elems(&b"UU" [..], 42); // ⚠ Compile Error! |
| 478 | /// ``` |
| 479 | #[inline ] |
| 480 | pub fn from_bytes_with_elems(source: B, count: usize) -> Result<Ref<B, T>, CastError<B, T>> { |
| 481 | static_assert_dst_is_not_zst!(T); |
| 482 | let expected_len = match count.size_for_metadata(T::LAYOUT) { |
| 483 | Some(len) => len, |
| 484 | None => return Err(SizeError::new(source).into()), |
| 485 | }; |
| 486 | if source.len() != expected_len { |
| 487 | return Err(SizeError::new(source).into()); |
| 488 | } |
| 489 | Self::from_bytes(source) |
| 490 | } |
| 491 | } |
| 492 | |
| 493 | impl<B, T> Ref<B, T> |
| 494 | where |
| 495 | B: SplitByteSlice, |
| 496 | T: KnownLayout<PointerMetadata = usize> + Immutable + ?Sized, |
| 497 | { |
| 498 | /// Constructs a `Ref` from the prefix of the given bytes with DST |
| 499 | /// length equal to `count` without copying. |
| 500 | /// |
| 501 | /// This method attempts to return a `Ref` to the prefix of `source` |
| 502 | /// interpreted as a `T` with `count` trailing elements, and a reference to |
| 503 | /// the remaining bytes. If there are insufficient bytes, or if `source` is |
| 504 | /// not appropriately aligned, this returns `Err`. If [`T: |
| 505 | /// Unaligned`][t-unaligned], you can [infallibly discard the alignment |
| 506 | /// error][size-error-from]. |
| 507 | /// |
| 508 | /// [t-unaligned]: Unaligned |
| 509 | /// [size-error-from]: error/struct.SizeError.html#method.from-1 |
| 510 | /// |
| 511 | /// # Compile-Time Assertions |
| 512 | /// |
| 513 | /// This method cannot yet be used on unsized types whose dynamically-sized |
| 514 | /// component is zero-sized. Attempting to use this method on such types |
| 515 | /// results in a compile-time assertion error; e.g.: |
| 516 | /// |
| 517 | /// ```compile_fail,E0080 |
| 518 | /// use zerocopy::*; |
| 519 | /// # use zerocopy_derive::*; |
| 520 | /// |
| 521 | /// #[derive(Immutable, KnownLayout)] |
| 522 | /// #[repr(C)] |
| 523 | /// struct ZSTy { |
| 524 | /// leading_sized: u16, |
| 525 | /// trailing_dst: [()], |
| 526 | /// } |
| 527 | /// |
| 528 | /// let _ = Ref::<_, ZSTy>::from_prefix_with_elems(&b"UU" [..], 42); // ⚠ Compile Error! |
| 529 | /// ``` |
| 530 | #[inline ] |
| 531 | pub fn from_prefix_with_elems( |
| 532 | source: B, |
| 533 | count: usize, |
| 534 | ) -> Result<(Ref<B, T>, B), CastError<B, T>> { |
| 535 | static_assert_dst_is_not_zst!(T); |
| 536 | let expected_len = match count.size_for_metadata(T::LAYOUT) { |
| 537 | Some(len) => len, |
| 538 | None => return Err(SizeError::new(source).into()), |
| 539 | }; |
| 540 | let (prefix, bytes) = source.split_at(expected_len).map_err(SizeError::new)?; |
| 541 | Self::from_bytes(prefix).map(move |l| (l, bytes)) |
| 542 | } |
| 543 | |
| 544 | /// Constructs a `Ref` from the suffix of the given bytes with DST length |
| 545 | /// equal to `count` without copying. |
| 546 | /// |
| 547 | /// This method attempts to return a `Ref` to the suffix of `source` |
| 548 | /// interpreted as a `T` with `count` trailing elements, and a reference to |
| 549 | /// the preceding bytes. If there are insufficient bytes, or if that suffix |
| 550 | /// of `source` is not appropriately aligned, this returns `Err`. If [`T: |
| 551 | /// Unaligned`][t-unaligned], you can [infallibly discard the alignment |
| 552 | /// error][size-error-from]. |
| 553 | /// |
| 554 | /// [t-unaligned]: Unaligned |
| 555 | /// [size-error-from]: error/struct.SizeError.html#method.from-1 |
| 556 | /// |
| 557 | /// # Compile-Time Assertions |
| 558 | /// |
| 559 | /// This method cannot yet be used on unsized types whose dynamically-sized |
| 560 | /// component is zero-sized. Attempting to use this method on such types |
| 561 | /// results in a compile-time assertion error; e.g.: |
| 562 | /// |
| 563 | /// ```compile_fail,E0080 |
| 564 | /// use zerocopy::*; |
| 565 | /// # use zerocopy_derive::*; |
| 566 | /// |
| 567 | /// #[derive(Immutable, KnownLayout)] |
| 568 | /// #[repr(C)] |
| 569 | /// struct ZSTy { |
| 570 | /// leading_sized: u16, |
| 571 | /// trailing_dst: [()], |
| 572 | /// } |
| 573 | /// |
| 574 | /// let _ = Ref::<_, ZSTy>::from_suffix_with_elems(&b"UU" [..], 42); // ⚠ Compile Error! |
| 575 | /// ``` |
| 576 | #[inline ] |
| 577 | pub fn from_suffix_with_elems( |
| 578 | source: B, |
| 579 | count: usize, |
| 580 | ) -> Result<(B, Ref<B, T>), CastError<B, T>> { |
| 581 | static_assert_dst_is_not_zst!(T); |
| 582 | let expected_len = match count.size_for_metadata(T::LAYOUT) { |
| 583 | Some(len) => len, |
| 584 | None => return Err(SizeError::new(source).into()), |
| 585 | }; |
| 586 | let split_at = if let Some(split_at) = source.len().checked_sub(expected_len) { |
| 587 | split_at |
| 588 | } else { |
| 589 | return Err(SizeError::new(source).into()); |
| 590 | }; |
| 591 | // SAFETY: The preceeding `source.len().checked_sub(expected_len)` |
| 592 | // guarantees that `split_at` is in-bounds. |
| 593 | let (bytes, suffix) = unsafe { source.split_at_unchecked(split_at) }; |
| 594 | Self::from_bytes(suffix).map(move |l| (bytes, l)) |
| 595 | } |
| 596 | } |
| 597 | |
| 598 | impl<'a, B, T> Ref<B, T> |
| 599 | where |
| 600 | B: 'a + IntoByteSlice<'a>, |
| 601 | T: FromBytes + KnownLayout + Immutable + ?Sized, |
| 602 | { |
| 603 | /// Converts this `Ref` into a reference. |
| 604 | /// |
| 605 | /// `into_ref` consumes the `Ref`, and returns a reference to `T`. |
| 606 | /// |
| 607 | /// Note: this is an associated function, which means that you have to call |
| 608 | /// it as `Ref::into_ref(r)` instead of `r.into_ref()`. This is so that |
| 609 | /// there is no conflict with a method on the inner type. |
| 610 | #[must_use = "has no side effects" ] |
| 611 | #[inline (always)] |
| 612 | pub fn into_ref(r: Self) -> &'a T { |
| 613 | // Presumably unreachable, since we've guarded each constructor of `Ref`. |
| 614 | static_assert_dst_is_not_zst!(T); |
| 615 | |
| 616 | // SAFETY: We don't call any methods on `b` other than those provided by |
| 617 | // `IntoByteSlice`. |
| 618 | let b = unsafe { r.into_byte_slice() }; |
| 619 | |
| 620 | // PANICS: By post-condition on `into_byte_slice`, `b`'s size and |
| 621 | // alignment are valid for `T`. By post-condition, `b.into_byte_slice()` |
| 622 | // produces a byte slice with identical address and length to that |
| 623 | // produced by `b.deref()`. |
| 624 | let ptr = Ptr::from_ref(b.into_byte_slice()) |
| 625 | .try_cast_into_no_leftover::<T, BecauseImmutable>(None) |
| 626 | .expect("zerocopy internal error: into_ref should be infallible" ); |
| 627 | let ptr = ptr.recall_validity(); |
| 628 | ptr.as_ref() |
| 629 | } |
| 630 | } |
| 631 | |
| 632 | impl<'a, B, T> Ref<B, T> |
| 633 | where |
| 634 | B: 'a + IntoByteSliceMut<'a>, |
| 635 | T: FromBytes + IntoBytes + KnownLayout + ?Sized, |
| 636 | { |
| 637 | /// Converts this `Ref` into a mutable reference. |
| 638 | /// |
| 639 | /// `into_mut` consumes the `Ref`, and returns a mutable reference to `T`. |
| 640 | /// |
| 641 | /// Note: this is an associated function, which means that you have to call |
| 642 | /// it as `Ref::into_mut(r)` instead of `r.into_mut()`. This is so that |
| 643 | /// there is no conflict with a method on the inner type. |
| 644 | #[must_use = "has no side effects" ] |
| 645 | #[inline (always)] |
| 646 | pub fn into_mut(r: Self) -> &'a mut T { |
| 647 | // Presumably unreachable, since we've guarded each constructor of `Ref`. |
| 648 | static_assert_dst_is_not_zst!(T); |
| 649 | |
| 650 | // SAFETY: We don't call any methods on `b` other than those provided by |
| 651 | // `IntoByteSliceMut`. |
| 652 | let b = unsafe { r.into_byte_slice_mut() }; |
| 653 | |
| 654 | // PANICS: By post-condition on `into_byte_slice_mut`, `b`'s size and |
| 655 | // alignment are valid for `T`. By post-condition, |
| 656 | // `b.into_byte_slice_mut()` produces a byte slice with identical |
| 657 | // address and length to that produced by `b.deref_mut()`. |
| 658 | let ptr = Ptr::from_mut(b.into_byte_slice_mut()) |
| 659 | .try_cast_into_no_leftover::<T, BecauseExclusive>(None) |
| 660 | .expect("zerocopy internal error: into_ref should be infallible" ); |
| 661 | let ptr = ptr.recall_validity(); |
| 662 | ptr.as_mut() |
| 663 | } |
| 664 | } |
| 665 | |
| 666 | impl<B, T> Ref<B, T> |
| 667 | where |
| 668 | B: ByteSlice, |
| 669 | T: ?Sized, |
| 670 | { |
| 671 | /// Gets the underlying bytes. |
| 672 | /// |
| 673 | /// Note: this is an associated function, which means that you have to call |
| 674 | /// it as `Ref::bytes(r)` instead of `r.bytes()`. This is so that there is |
| 675 | /// no conflict with a method on the inner type. |
| 676 | #[inline ] |
| 677 | pub fn bytes(r: &Self) -> &[u8] { |
| 678 | // SAFETY: We don't call any methods on `b` other than those provided by |
| 679 | // `ByteSlice`. |
| 680 | unsafe { r.as_byte_slice().deref() } |
| 681 | } |
| 682 | } |
| 683 | |
| 684 | impl<B, T> Ref<B, T> |
| 685 | where |
| 686 | B: ByteSliceMut, |
| 687 | T: ?Sized, |
| 688 | { |
| 689 | /// Gets the underlying bytes mutably. |
| 690 | /// |
| 691 | /// Note: this is an associated function, which means that you have to call |
| 692 | /// it as `Ref::bytes_mut(r)` instead of `r.bytes_mut()`. This is so that |
| 693 | /// there is no conflict with a method on the inner type. |
| 694 | #[inline ] |
| 695 | pub fn bytes_mut(r: &mut Self) -> &mut [u8] { |
| 696 | // SAFETY: We don't call any methods on `b` other than those provided by |
| 697 | // `ByteSliceMut`. |
| 698 | unsafe { r.as_byte_slice_mut().deref_mut() } |
| 699 | } |
| 700 | } |
| 701 | |
| 702 | impl<B, T> Ref<B, T> |
| 703 | where |
| 704 | B: ByteSlice, |
| 705 | T: FromBytes, |
| 706 | { |
| 707 | /// Reads a copy of `T`. |
| 708 | /// |
| 709 | /// Note: this is an associated function, which means that you have to call |
| 710 | /// it as `Ref::read(r)` instead of `r.read()`. This is so that there is no |
| 711 | /// conflict with a method on the inner type. |
| 712 | #[must_use = "has no side effects" ] |
| 713 | #[inline ] |
| 714 | pub fn read(r: &Self) -> T { |
| 715 | // SAFETY: We don't call any methods on `b` other than those provided by |
| 716 | // `ByteSlice`. |
| 717 | let b: &impl ByteSlice = unsafe { r.as_byte_slice() }; |
| 718 | |
| 719 | // SAFETY: By postcondition on `as_byte_slice`, we know that `b` is a |
| 720 | // valid size and ailgnment for `T`. By safety invariant on `ByteSlice`, |
| 721 | // we know that this is preserved via `.deref()`. Because `T: |
| 722 | // FromBytes`, it is sound to interpret these bytes as a `T`. |
| 723 | unsafe { ptr::read(src:b.deref().as_ptr().cast::<T>()) } |
| 724 | } |
| 725 | } |
| 726 | |
| 727 | impl<B, T> Ref<B, T> |
| 728 | where |
| 729 | B: ByteSliceMut, |
| 730 | T: IntoBytes, |
| 731 | { |
| 732 | /// Writes the bytes of `t` and then forgets `t`. |
| 733 | /// |
| 734 | /// Note: this is an associated function, which means that you have to call |
| 735 | /// it as `Ref::write(r, t)` instead of `r.write(t)`. This is so that there |
| 736 | /// is no conflict with a method on the inner type. |
| 737 | #[inline ] |
| 738 | pub fn write(r: &mut Self, t: T) { |
| 739 | // SAFETY: We don't call any methods on `b` other than those provided by |
| 740 | // `ByteSliceMut`. |
| 741 | let b: &mut impl ByteSliceMut = unsafe { r.as_byte_slice_mut() }; |
| 742 | |
| 743 | // SAFETY: By postcondition on `as_byte_slice_mut`, we know that `b` is |
| 744 | // a valid size and ailgnment for `T`. By safety invariant on |
| 745 | // `ByteSlice`, we know that this is preserved via `.deref()`. Writing |
| 746 | // `t` to the buffer will allow all of the bytes of `t` to be accessed |
| 747 | // as a `[u8]`, but because `T: IntoBytes`, we know that this is sound. |
| 748 | unsafe { ptr::write(dst:b.deref_mut().as_mut_ptr().cast::<T>(), src:t) } |
| 749 | } |
| 750 | } |
| 751 | |
| 752 | impl<B, T> Deref for Ref<B, T> |
| 753 | where |
| 754 | B: ByteSlice, |
| 755 | T: FromBytes + KnownLayout + Immutable + ?Sized, |
| 756 | { |
| 757 | type Target = T; |
| 758 | #[inline ] |
| 759 | fn deref(&self) -> &T { |
| 760 | // Presumably unreachable, since we've guarded each constructor of `Ref`. |
| 761 | static_assert_dst_is_not_zst!(T); |
| 762 | |
| 763 | // SAFETY: We don't call any methods on `b` other than those provided by |
| 764 | // `ByteSlice`. |
| 765 | let b: &impl ByteSlice = unsafe { self.as_byte_slice() }; |
| 766 | |
| 767 | // PANICS: By postcondition on `as_byte_slice`, `b`'s size and alignment |
| 768 | // are valid for `T`, and by invariant on `ByteSlice`, these are |
| 769 | // preserved through `.deref()`, so this `unwrap` will not panic. |
| 770 | let ptr: Ptr<'_, T, (Shared, Aligned, …)> = Ptr::from_ref(b.deref()) |
| 771 | .try_cast_into_no_leftover::<T, BecauseImmutable>(None) |
| 772 | .expect(msg:"zerocopy internal error: Deref::deref should be infallible" ); |
| 773 | let ptr: Ptr<'_, T, (Shared, Aligned, …)> = ptr.recall_validity(); |
| 774 | ptr.as_ref() |
| 775 | } |
| 776 | } |
| 777 | |
| 778 | impl<B, T> DerefMut for Ref<B, T> |
| 779 | where |
| 780 | B: ByteSliceMut, |
| 781 | // TODO(#251): We can't remove `Immutable` here because it's required by |
| 782 | // the impl of `Deref`, which is a super-trait of `DerefMut`. Maybe we can |
| 783 | // add a separate inherent method for this? |
| 784 | T: FromBytes + IntoBytes + KnownLayout + Immutable + ?Sized, |
| 785 | { |
| 786 | #[inline ] |
| 787 | fn deref_mut(&mut self) -> &mut T { |
| 788 | // Presumably unreachable, since we've guarded each constructor of `Ref`. |
| 789 | static_assert_dst_is_not_zst!(T); |
| 790 | |
| 791 | // SAFETY: We don't call any methods on `b` other than those provided by |
| 792 | // `ByteSliceMut`. |
| 793 | let b: &mut impl ByteSliceMut = unsafe { self.as_byte_slice_mut() }; |
| 794 | |
| 795 | // PANICS: By postcondition on `as_byte_slice_mut`, `b`'s size and |
| 796 | // alignment are valid for `T`, and by invariant on `ByteSlice`, these |
| 797 | // are preserved through `.deref_mut()`, so this `unwrap` will not |
| 798 | // panic. |
| 799 | let ptr: Ptr<'_, T, (Exclusive, Aligned, …)> = Ptr::from_mut(b.deref_mut()) |
| 800 | .try_cast_into_no_leftover::<T, BecauseExclusive>(None) |
| 801 | .expect(msg:"zerocopy internal error: DerefMut::deref_mut should be infallible" ); |
| 802 | let ptr: Ptr<'_, T, (Exclusive, Aligned, …)> = ptr.recall_validity::<_, (_, (_, (BecauseExclusive, BecauseExclusive)))>(); |
| 803 | ptr.as_mut() |
| 804 | } |
| 805 | } |
| 806 | |
| 807 | impl<T, B> Display for Ref<B, T> |
| 808 | where |
| 809 | B: ByteSlice, |
| 810 | T: FromBytes + Display + KnownLayout + Immutable + ?Sized, |
| 811 | { |
| 812 | #[inline ] |
| 813 | fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result { |
| 814 | let inner: &T = self; |
| 815 | inner.fmt(fmt) |
| 816 | } |
| 817 | } |
| 818 | |
| 819 | impl<T, B> Debug for Ref<B, T> |
| 820 | where |
| 821 | B: ByteSlice, |
| 822 | T: FromBytes + Debug + KnownLayout + Immutable + ?Sized, |
| 823 | { |
| 824 | #[inline ] |
| 825 | fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result { |
| 826 | let inner: &T = self; |
| 827 | fmt.debug_tuple(name:"Ref" ).field(&inner).finish() |
| 828 | } |
| 829 | } |
| 830 | |
| 831 | impl<T, B> Eq for Ref<B, T> |
| 832 | where |
| 833 | B: ByteSlice, |
| 834 | T: FromBytes + Eq + KnownLayout + Immutable + ?Sized, |
| 835 | { |
| 836 | } |
| 837 | |
| 838 | impl<T, B> PartialEq for Ref<B, T> |
| 839 | where |
| 840 | B: ByteSlice, |
| 841 | T: FromBytes + PartialEq + KnownLayout + Immutable + ?Sized, |
| 842 | { |
| 843 | #[inline ] |
| 844 | fn eq(&self, other: &Self) -> bool { |
| 845 | self.deref().eq(other.deref()) |
| 846 | } |
| 847 | } |
| 848 | |
| 849 | impl<T, B> Ord for Ref<B, T> |
| 850 | where |
| 851 | B: ByteSlice, |
| 852 | T: FromBytes + Ord + KnownLayout + Immutable + ?Sized, |
| 853 | { |
| 854 | #[inline ] |
| 855 | fn cmp(&self, other: &Self) -> Ordering { |
| 856 | let inner: &T = self; |
| 857 | let other_inner: &T = other; |
| 858 | inner.cmp(other_inner) |
| 859 | } |
| 860 | } |
| 861 | |
| 862 | impl<T, B> PartialOrd for Ref<B, T> |
| 863 | where |
| 864 | B: ByteSlice, |
| 865 | T: FromBytes + PartialOrd + KnownLayout + Immutable + ?Sized, |
| 866 | { |
| 867 | #[inline ] |
| 868 | fn partial_cmp(&self, other: &Self) -> Option<Ordering> { |
| 869 | let inner: &T = self; |
| 870 | let other_inner: &T = other; |
| 871 | inner.partial_cmp(other_inner) |
| 872 | } |
| 873 | } |
| 874 | |
| 875 | #[cfg (test)] |
| 876 | #[allow (clippy::assertions_on_result_states)] |
| 877 | mod tests { |
| 878 | use core::convert::TryInto as _; |
| 879 | |
| 880 | use super::*; |
| 881 | use crate::util::testutil::*; |
| 882 | |
| 883 | #[test ] |
| 884 | fn test_mut_slice_into_ref() { |
| 885 | // Prior to #1260/#1299, calling `into_ref` on a `&mut [u8]`-backed |
| 886 | // `Ref` was not supportd. |
| 887 | let mut buf = [0u8]; |
| 888 | let r = Ref::<&mut [u8], u8>::from_bytes(&mut buf).unwrap(); |
| 889 | assert_eq!(Ref::into_ref(r), &0); |
| 890 | } |
| 891 | |
| 892 | #[test ] |
| 893 | fn test_address() { |
| 894 | // Test that the `Deref` and `DerefMut` implementations return a |
| 895 | // reference which points to the right region of memory. |
| 896 | |
| 897 | let buf = [0]; |
| 898 | let r = Ref::<_, u8>::from_bytes(&buf[..]).unwrap(); |
| 899 | let buf_ptr = buf.as_ptr(); |
| 900 | let deref_ptr: *const u8 = r.deref(); |
| 901 | assert_eq!(buf_ptr, deref_ptr); |
| 902 | |
| 903 | let buf = [0]; |
| 904 | let r = Ref::<_, [u8]>::from_bytes(&buf[..]).unwrap(); |
| 905 | let buf_ptr = buf.as_ptr(); |
| 906 | let deref_ptr = r.deref().as_ptr(); |
| 907 | assert_eq!(buf_ptr, deref_ptr); |
| 908 | } |
| 909 | |
| 910 | // Verify that values written to a `Ref` are properly shared between the |
| 911 | // typed and untyped representations, that reads via `deref` and `read` |
| 912 | // behave the same, and that writes via `deref_mut` and `write` behave the |
| 913 | // same. |
| 914 | fn test_new_helper(mut r: Ref<&mut [u8], AU64>) { |
| 915 | // assert that the value starts at 0 |
| 916 | assert_eq!(*r, AU64(0)); |
| 917 | assert_eq!(Ref::read(&r), AU64(0)); |
| 918 | |
| 919 | // Assert that values written to the typed value are reflected in the |
| 920 | // byte slice. |
| 921 | const VAL1: AU64 = AU64(0xFF00FF00FF00FF00); |
| 922 | *r = VAL1; |
| 923 | assert_eq!(Ref::bytes(&r), &VAL1.to_bytes()); |
| 924 | *r = AU64(0); |
| 925 | Ref::write(&mut r, VAL1); |
| 926 | assert_eq!(Ref::bytes(&r), &VAL1.to_bytes()); |
| 927 | |
| 928 | // Assert that values written to the byte slice are reflected in the |
| 929 | // typed value. |
| 930 | const VAL2: AU64 = AU64(!VAL1.0); // different from `VAL1` |
| 931 | Ref::bytes_mut(&mut r).copy_from_slice(&VAL2.to_bytes()[..]); |
| 932 | assert_eq!(*r, VAL2); |
| 933 | assert_eq!(Ref::read(&r), VAL2); |
| 934 | } |
| 935 | |
| 936 | // Verify that values written to a `Ref` are properly shared between the |
| 937 | // typed and untyped representations; pass a value with `typed_len` `AU64`s |
| 938 | // backed by an array of `typed_len * 8` bytes. |
| 939 | fn test_new_helper_slice(mut r: Ref<&mut [u8], [AU64]>, typed_len: usize) { |
| 940 | // Assert that the value starts out zeroed. |
| 941 | assert_eq!(&*r, vec![AU64(0); typed_len].as_slice()); |
| 942 | |
| 943 | // Check the backing storage is the exact same slice. |
| 944 | let untyped_len = typed_len * 8; |
| 945 | assert_eq!(Ref::bytes(&r).len(), untyped_len); |
| 946 | assert_eq!(Ref::bytes(&r).as_ptr(), r.as_ptr().cast::<u8>()); |
| 947 | |
| 948 | // Assert that values written to the typed value are reflected in the |
| 949 | // byte slice. |
| 950 | const VAL1: AU64 = AU64(0xFF00FF00FF00FF00); |
| 951 | for typed in &mut *r { |
| 952 | *typed = VAL1; |
| 953 | } |
| 954 | assert_eq!(Ref::bytes(&r), VAL1.0.to_ne_bytes().repeat(typed_len).as_slice()); |
| 955 | |
| 956 | // Assert that values written to the byte slice are reflected in the |
| 957 | // typed value. |
| 958 | const VAL2: AU64 = AU64(!VAL1.0); // different from VAL1 |
| 959 | Ref::bytes_mut(&mut r).copy_from_slice(&VAL2.0.to_ne_bytes().repeat(typed_len)); |
| 960 | assert!(r.iter().copied().all(|x| x == VAL2)); |
| 961 | } |
| 962 | |
| 963 | #[test ] |
| 964 | fn test_new_aligned_sized() { |
| 965 | // Test that a properly-aligned, properly-sized buffer works for new, |
| 966 | // new_from_prefix, and new_from_suffix, and that new_from_prefix and |
| 967 | // new_from_suffix return empty slices. Test that a properly-aligned |
| 968 | // buffer whose length is a multiple of the element size works for |
| 969 | // new_slice. |
| 970 | |
| 971 | // A buffer with an alignment of 8. |
| 972 | let mut buf = Align::<[u8; 8], AU64>::default(); |
| 973 | // `buf.t` should be aligned to 8, so this should always succeed. |
| 974 | test_new_helper(Ref::<_, AU64>::from_bytes(&mut buf.t[..]).unwrap()); |
| 975 | { |
| 976 | // In a block so that `r` and `suffix` don't live too long. |
| 977 | buf.set_default(); |
| 978 | let (r, suffix) = Ref::<_, AU64>::from_prefix(&mut buf.t[..]).unwrap(); |
| 979 | assert!(suffix.is_empty()); |
| 980 | test_new_helper(r); |
| 981 | } |
| 982 | { |
| 983 | buf.set_default(); |
| 984 | let (prefix, r) = Ref::<_, AU64>::from_suffix(&mut buf.t[..]).unwrap(); |
| 985 | assert!(prefix.is_empty()); |
| 986 | test_new_helper(r); |
| 987 | } |
| 988 | |
| 989 | // A buffer with alignment 8 and length 24. We choose this length very |
| 990 | // intentionally: if we instead used length 16, then the prefix and |
| 991 | // suffix lengths would be identical. In the past, we used length 16, |
| 992 | // which resulted in this test failing to discover the bug uncovered in |
| 993 | // #506. |
| 994 | let mut buf = Align::<[u8; 24], AU64>::default(); |
| 995 | // `buf.t` should be aligned to 8 and have a length which is a multiple |
| 996 | // of `size_of::<AU64>()`, so this should always succeed. |
| 997 | test_new_helper_slice(Ref::<_, [AU64]>::from_bytes(&mut buf.t[..]).unwrap(), 3); |
| 998 | buf.set_default(); |
| 999 | let r = Ref::<_, [AU64]>::from_bytes_with_elems(&mut buf.t[..], 3).unwrap(); |
| 1000 | test_new_helper_slice(r, 3); |
| 1001 | |
| 1002 | let ascending: [u8; 24] = (0..24).collect::<Vec<_>>().try_into().unwrap(); |
| 1003 | // 16 ascending bytes followed by 8 zeros. |
| 1004 | let mut ascending_prefix = ascending; |
| 1005 | ascending_prefix[16..].copy_from_slice(&[0, 0, 0, 0, 0, 0, 0, 0]); |
| 1006 | // 8 zeros followed by 16 ascending bytes. |
| 1007 | let mut ascending_suffix = ascending; |
| 1008 | ascending_suffix[..8].copy_from_slice(&[0, 0, 0, 0, 0, 0, 0, 0]); |
| 1009 | { |
| 1010 | buf.t = ascending_suffix; |
| 1011 | let (r, suffix) = Ref::<_, [AU64]>::from_prefix_with_elems(&mut buf.t[..], 1).unwrap(); |
| 1012 | assert_eq!(suffix, &ascending[8..]); |
| 1013 | test_new_helper_slice(r, 1); |
| 1014 | } |
| 1015 | { |
| 1016 | buf.t = ascending_prefix; |
| 1017 | let (prefix, r) = Ref::<_, [AU64]>::from_suffix_with_elems(&mut buf.t[..], 1).unwrap(); |
| 1018 | assert_eq!(prefix, &ascending[..16]); |
| 1019 | test_new_helper_slice(r, 1); |
| 1020 | } |
| 1021 | } |
| 1022 | |
| 1023 | #[test ] |
| 1024 | fn test_new_oversized() { |
| 1025 | // Test that a properly-aligned, overly-sized buffer works for |
| 1026 | // `new_from_prefix` and `new_from_suffix`, and that they return the |
| 1027 | // remainder and prefix of the slice respectively. |
| 1028 | |
| 1029 | let mut buf = Align::<[u8; 16], AU64>::default(); |
| 1030 | { |
| 1031 | // In a block so that `r` and `suffix` don't live too long. `buf.t` |
| 1032 | // should be aligned to 8, so this should always succeed. |
| 1033 | let (r, suffix) = Ref::<_, AU64>::from_prefix(&mut buf.t[..]).unwrap(); |
| 1034 | assert_eq!(suffix.len(), 8); |
| 1035 | test_new_helper(r); |
| 1036 | } |
| 1037 | { |
| 1038 | buf.set_default(); |
| 1039 | // `buf.t` should be aligned to 8, so this should always succeed. |
| 1040 | let (prefix, r) = Ref::<_, AU64>::from_suffix(&mut buf.t[..]).unwrap(); |
| 1041 | assert_eq!(prefix.len(), 8); |
| 1042 | test_new_helper(r); |
| 1043 | } |
| 1044 | } |
| 1045 | |
| 1046 | #[test ] |
| 1047 | #[allow (clippy::cognitive_complexity)] |
| 1048 | fn test_new_error() { |
| 1049 | // Fail because the buffer is too large. |
| 1050 | |
| 1051 | // A buffer with an alignment of 8. |
| 1052 | let buf = Align::<[u8; 16], AU64>::default(); |
| 1053 | // `buf.t` should be aligned to 8, so only the length check should fail. |
| 1054 | assert!(Ref::<_, AU64>::from_bytes(&buf.t[..]).is_err()); |
| 1055 | |
| 1056 | // Fail because the buffer is too small. |
| 1057 | |
| 1058 | // A buffer with an alignment of 8. |
| 1059 | let buf = Align::<[u8; 4], AU64>::default(); |
| 1060 | // `buf.t` should be aligned to 8, so only the length check should fail. |
| 1061 | assert!(Ref::<_, AU64>::from_bytes(&buf.t[..]).is_err()); |
| 1062 | assert!(Ref::<_, AU64>::from_prefix(&buf.t[..]).is_err()); |
| 1063 | assert!(Ref::<_, AU64>::from_suffix(&buf.t[..]).is_err()); |
| 1064 | |
| 1065 | // Fail because the length is not a multiple of the element size. |
| 1066 | |
| 1067 | let buf = Align::<[u8; 12], AU64>::default(); |
| 1068 | // `buf.t` has length 12, but element size is 8. |
| 1069 | assert!(Ref::<_, [AU64]>::from_bytes(&buf.t[..]).is_err()); |
| 1070 | |
| 1071 | // Fail because the buffer is too short. |
| 1072 | let buf = Align::<[u8; 12], AU64>::default(); |
| 1073 | // `buf.t` has length 12, but the element size is 8 (and we're expecting |
| 1074 | // two of them). For each function, we test with a length that would |
| 1075 | // cause the size to overflow `usize`, and with a normal length that |
| 1076 | // will fail thanks to the buffer being too short; these are different |
| 1077 | // error paths, and while the error types are the same, the distinction |
| 1078 | // shows up in code coverage metrics. |
| 1079 | let n = (usize::MAX / mem::size_of::<AU64>()) + 1; |
| 1080 | assert!(Ref::<_, [AU64]>::from_bytes_with_elems(&buf.t[..], n).is_err()); |
| 1081 | assert!(Ref::<_, [AU64]>::from_bytes_with_elems(&buf.t[..], 2).is_err()); |
| 1082 | assert!(Ref::<_, [AU64]>::from_prefix_with_elems(&buf.t[..], n).is_err()); |
| 1083 | assert!(Ref::<_, [AU64]>::from_prefix_with_elems(&buf.t[..], 2).is_err()); |
| 1084 | assert!(Ref::<_, [AU64]>::from_suffix_with_elems(&buf.t[..], n).is_err()); |
| 1085 | assert!(Ref::<_, [AU64]>::from_suffix_with_elems(&buf.t[..], 2).is_err()); |
| 1086 | |
| 1087 | // Fail because the alignment is insufficient. |
| 1088 | |
| 1089 | // A buffer with an alignment of 8. An odd buffer size is chosen so that |
| 1090 | // the last byte of the buffer has odd alignment. |
| 1091 | let buf = Align::<[u8; 13], AU64>::default(); |
| 1092 | // Slicing from 1, we get a buffer with size 12 (so the length check |
| 1093 | // should succeed) but an alignment of only 1, which is insufficient. |
| 1094 | assert!(Ref::<_, AU64>::from_bytes(&buf.t[1..]).is_err()); |
| 1095 | assert!(Ref::<_, AU64>::from_prefix(&buf.t[1..]).is_err()); |
| 1096 | assert!(Ref::<_, [AU64]>::from_bytes(&buf.t[1..]).is_err()); |
| 1097 | assert!(Ref::<_, [AU64]>::from_bytes_with_elems(&buf.t[1..], 1).is_err()); |
| 1098 | assert!(Ref::<_, [AU64]>::from_prefix_with_elems(&buf.t[1..], 1).is_err()); |
| 1099 | assert!(Ref::<_, [AU64]>::from_suffix_with_elems(&buf.t[1..], 1).is_err()); |
| 1100 | // Slicing is unnecessary here because `new_from_suffix` uses the suffix |
| 1101 | // of the slice, which has odd alignment. |
| 1102 | assert!(Ref::<_, AU64>::from_suffix(&buf.t[..]).is_err()); |
| 1103 | |
| 1104 | // Fail due to arithmetic overflow. |
| 1105 | |
| 1106 | let buf = Align::<[u8; 16], AU64>::default(); |
| 1107 | let unreasonable_len = usize::MAX / mem::size_of::<AU64>() + 1; |
| 1108 | assert!(Ref::<_, [AU64]>::from_prefix_with_elems(&buf.t[..], unreasonable_len).is_err()); |
| 1109 | assert!(Ref::<_, [AU64]>::from_suffix_with_elems(&buf.t[..], unreasonable_len).is_err()); |
| 1110 | } |
| 1111 | |
| 1112 | #[test ] |
| 1113 | #[allow (unstable_name_collisions)] |
| 1114 | #[allow (clippy::as_conversions)] |
| 1115 | fn test_into_ref_mut() { |
| 1116 | #[allow (unused)] |
| 1117 | use crate::util::AsAddress as _; |
| 1118 | |
| 1119 | let mut buf = Align::<[u8; 8], u64>::default(); |
| 1120 | let r = Ref::<_, u64>::from_bytes(&buf.t[..]).unwrap(); |
| 1121 | let rf = Ref::into_ref(r); |
| 1122 | assert_eq!(rf, &0u64); |
| 1123 | let buf_addr = (&buf.t as *const [u8; 8]).addr(); |
| 1124 | assert_eq!((rf as *const u64).addr(), buf_addr); |
| 1125 | |
| 1126 | let r = Ref::<_, u64>::from_bytes(&mut buf.t[..]).unwrap(); |
| 1127 | let rf = Ref::into_mut(r); |
| 1128 | assert_eq!(rf, &mut 0u64); |
| 1129 | assert_eq!((rf as *mut u64).addr(), buf_addr); |
| 1130 | |
| 1131 | *rf = u64::MAX; |
| 1132 | assert_eq!(buf.t, [0xFF; 8]); |
| 1133 | } |
| 1134 | |
| 1135 | #[test ] |
| 1136 | fn test_display_debug() { |
| 1137 | let buf = Align::<[u8; 8], u64>::default(); |
| 1138 | let r = Ref::<_, u64>::from_bytes(&buf.t[..]).unwrap(); |
| 1139 | assert_eq!(format!("{}" , r), "0" ); |
| 1140 | assert_eq!(format!("{:?}" , r), "Ref(0)" ); |
| 1141 | |
| 1142 | let buf = Align::<[u8; 8], u64>::default(); |
| 1143 | let r = Ref::<_, [u64]>::from_bytes(&buf.t[..]).unwrap(); |
| 1144 | assert_eq!(format!("{:?}" , r), "Ref([0])" ); |
| 1145 | } |
| 1146 | |
| 1147 | #[test ] |
| 1148 | fn test_eq() { |
| 1149 | let buf1 = 0_u64; |
| 1150 | let r1 = Ref::<_, u64>::from_bytes(buf1.as_bytes()).unwrap(); |
| 1151 | let buf2 = 0_u64; |
| 1152 | let r2 = Ref::<_, u64>::from_bytes(buf2.as_bytes()).unwrap(); |
| 1153 | assert_eq!(r1, r2); |
| 1154 | } |
| 1155 | |
| 1156 | #[test ] |
| 1157 | fn test_ne() { |
| 1158 | let buf1 = 0_u64; |
| 1159 | let r1 = Ref::<_, u64>::from_bytes(buf1.as_bytes()).unwrap(); |
| 1160 | let buf2 = 1_u64; |
| 1161 | let r2 = Ref::<_, u64>::from_bytes(buf2.as_bytes()).unwrap(); |
| 1162 | assert_ne!(r1, r2); |
| 1163 | } |
| 1164 | |
| 1165 | #[test ] |
| 1166 | fn test_ord() { |
| 1167 | let buf1 = 0_u64; |
| 1168 | let r1 = Ref::<_, u64>::from_bytes(buf1.as_bytes()).unwrap(); |
| 1169 | let buf2 = 1_u64; |
| 1170 | let r2 = Ref::<_, u64>::from_bytes(buf2.as_bytes()).unwrap(); |
| 1171 | assert!(r1 < r2); |
| 1172 | assert_eq!(PartialOrd::partial_cmp(&r1, &r2), Some(Ordering::Less)); |
| 1173 | assert_eq!(Ord::cmp(&r1, &r2), Ordering::Less); |
| 1174 | } |
| 1175 | } |
| 1176 | |