| 1 | // Copyright 2018 Developers of the Rand project. |
| 2 | // |
| 3 | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or |
| 4 | // https://www.apache.org/licenses/LICENSE-2.0> or the MIT license |
| 5 | // <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your |
| 6 | // option. This file may not be copied, modified, or distributed |
| 7 | // except according to those terms. |
| 8 | |
| 9 | //! The `BlockRngCore` trait and implementation helpers |
| 10 | //! |
| 11 | //! The [`BlockRngCore`] trait exists to assist in the implementation of RNGs |
| 12 | //! which generate a block of data in a cache instead of returning generated |
| 13 | //! values directly. |
| 14 | //! |
| 15 | //! Usage of this trait is optional, but provides two advantages: |
| 16 | //! implementations only need to concern themselves with generation of the |
| 17 | //! block, not the various [`RngCore`] methods (especially [`fill_bytes`], where |
| 18 | //! the optimal implementations are not trivial), and this allows |
| 19 | //! `ReseedingRng` (see [`rand`](https://docs.rs/rand) crate) perform periodic |
| 20 | //! reseeding with very low overhead. |
| 21 | //! |
| 22 | //! # Example |
| 23 | //! |
| 24 | //! ```no_run |
| 25 | //! use rand_core::{RngCore, SeedableRng}; |
| 26 | //! use rand_core::block::{BlockRngCore, BlockRng}; |
| 27 | //! |
| 28 | //! struct MyRngCore; |
| 29 | //! |
| 30 | //! impl BlockRngCore for MyRngCore { |
| 31 | //! type Item = u32; |
| 32 | //! type Results = [u32; 16]; |
| 33 | //! |
| 34 | //! fn generate(&mut self, results: &mut Self::Results) { |
| 35 | //! unimplemented!() |
| 36 | //! } |
| 37 | //! } |
| 38 | //! |
| 39 | //! impl SeedableRng for MyRngCore { |
| 40 | //! type Seed = [u8; 32]; |
| 41 | //! fn from_seed(seed: Self::Seed) -> Self { |
| 42 | //! unimplemented!() |
| 43 | //! } |
| 44 | //! } |
| 45 | //! |
| 46 | //! // optionally, also implement CryptoRng for MyRngCore |
| 47 | //! |
| 48 | //! // Final RNG. |
| 49 | //! let mut rng = BlockRng::<MyRngCore>::seed_from_u64(0); |
| 50 | //! println!("First value: {}" , rng.next_u32()); |
| 51 | //! ``` |
| 52 | //! |
| 53 | //! [`BlockRngCore`]: crate::block::BlockRngCore |
| 54 | //! [`fill_bytes`]: RngCore::fill_bytes |
| 55 | |
| 56 | use crate::impls::{fill_via_u32_chunks, fill_via_u64_chunks}; |
| 57 | use crate::{CryptoRng, Error, RngCore, SeedableRng}; |
| 58 | use core::convert::AsRef; |
| 59 | use core::fmt; |
| 60 | #[cfg (feature = "serde1" )] |
| 61 | use serde::{Deserialize, Serialize}; |
| 62 | |
| 63 | /// A trait for RNGs which do not generate random numbers individually, but in |
| 64 | /// blocks (typically `[u32; N]`). This technique is commonly used by |
| 65 | /// cryptographic RNGs to improve performance. |
| 66 | /// |
| 67 | /// See the [module][crate::block] documentation for details. |
| 68 | pub trait BlockRngCore { |
| 69 | /// Results element type, e.g. `u32`. |
| 70 | type Item; |
| 71 | |
| 72 | /// Results type. This is the 'block' an RNG implementing `BlockRngCore` |
| 73 | /// generates, which will usually be an array like `[u32; 16]`. |
| 74 | type Results: AsRef<[Self::Item]> + AsMut<[Self::Item]> + Default; |
| 75 | |
| 76 | /// Generate a new block of results. |
| 77 | fn generate(&mut self, results: &mut Self::Results); |
| 78 | } |
| 79 | |
| 80 | /// A wrapper type implementing [`RngCore`] for some type implementing |
| 81 | /// [`BlockRngCore`] with `u32` array buffer; i.e. this can be used to implement |
| 82 | /// a full RNG from just a `generate` function. |
| 83 | /// |
| 84 | /// The `core` field may be accessed directly but the results buffer may not. |
| 85 | /// PRNG implementations can simply use a type alias |
| 86 | /// (`pub type MyRng = BlockRng<MyRngCore>;`) but might prefer to use a |
| 87 | /// wrapper type (`pub struct MyRng(BlockRng<MyRngCore>);`); the latter must |
| 88 | /// re-implement `RngCore` but hides the implementation details and allows |
| 89 | /// extra functionality to be defined on the RNG |
| 90 | /// (e.g. `impl MyRng { fn set_stream(...){...} }`). |
| 91 | /// |
| 92 | /// `BlockRng` has heavily optimized implementations of the [`RngCore`] methods |
| 93 | /// reading values from the results buffer, as well as |
| 94 | /// calling [`BlockRngCore::generate`] directly on the output array when |
| 95 | /// [`fill_bytes`] / [`try_fill_bytes`] is called on a large array. These methods |
| 96 | /// also handle the bookkeeping of when to generate a new batch of values. |
| 97 | /// |
| 98 | /// No whole generated `u32` values are thrown away and all values are consumed |
| 99 | /// in-order. [`next_u32`] simply takes the next available `u32` value. |
| 100 | /// [`next_u64`] is implemented by combining two `u32` values, least |
| 101 | /// significant first. [`fill_bytes`] and [`try_fill_bytes`] consume a whole |
| 102 | /// number of `u32` values, converting each `u32` to a byte slice in |
| 103 | /// little-endian order. If the requested byte length is not a multiple of 4, |
| 104 | /// some bytes will be discarded. |
| 105 | /// |
| 106 | /// See also [`BlockRng64`] which uses `u64` array buffers. Currently there is |
| 107 | /// no direct support for other buffer types. |
| 108 | /// |
| 109 | /// For easy initialization `BlockRng` also implements [`SeedableRng`]. |
| 110 | /// |
| 111 | /// [`next_u32`]: RngCore::next_u32 |
| 112 | /// [`next_u64`]: RngCore::next_u64 |
| 113 | /// [`fill_bytes`]: RngCore::fill_bytes |
| 114 | /// [`try_fill_bytes`]: RngCore::try_fill_bytes |
| 115 | #[derive (Clone)] |
| 116 | #[cfg_attr (feature = "serde1" , derive(Serialize, Deserialize))] |
| 117 | #[cfg_attr ( |
| 118 | feature = "serde1" , |
| 119 | serde( |
| 120 | bound = "for<'x> R: Serialize + Deserialize<'x> + Sized, for<'x> R::Results: Serialize + Deserialize<'x>" |
| 121 | ) |
| 122 | )] |
| 123 | pub struct BlockRng<R: BlockRngCore + ?Sized> { |
| 124 | results: R::Results, |
| 125 | index: usize, |
| 126 | /// The *core* part of the RNG, implementing the `generate` function. |
| 127 | pub core: R, |
| 128 | } |
| 129 | |
| 130 | // Custom Debug implementation that does not expose the contents of `results`. |
| 131 | impl<R: BlockRngCore + fmt::Debug> fmt::Debug for BlockRng<R> { |
| 132 | fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { |
| 133 | fmt&mut DebugStruct<'_, '_>.debug_struct("BlockRng" ) |
| 134 | .field("core" , &self.core) |
| 135 | .field("result_len" , &self.results.as_ref().len()) |
| 136 | .field(name:"index" , &self.index) |
| 137 | .finish() |
| 138 | } |
| 139 | } |
| 140 | |
| 141 | impl<R: BlockRngCore> BlockRng<R> { |
| 142 | /// Create a new `BlockRng` from an existing RNG implementing |
| 143 | /// `BlockRngCore`. Results will be generated on first use. |
| 144 | #[inline ] |
| 145 | pub fn new(core: R) -> BlockRng<R> { |
| 146 | let results_empty = R::Results::default(); |
| 147 | BlockRng { |
| 148 | core, |
| 149 | index: results_empty.as_ref().len(), |
| 150 | results: results_empty, |
| 151 | } |
| 152 | } |
| 153 | |
| 154 | /// Get the index into the result buffer. |
| 155 | /// |
| 156 | /// If this is equal to or larger than the size of the result buffer then |
| 157 | /// the buffer is "empty" and `generate()` must be called to produce new |
| 158 | /// results. |
| 159 | #[inline (always)] |
| 160 | pub fn index(&self) -> usize { |
| 161 | self.index |
| 162 | } |
| 163 | |
| 164 | /// Reset the number of available results. |
| 165 | /// This will force a new set of results to be generated on next use. |
| 166 | #[inline ] |
| 167 | pub fn reset(&mut self) { |
| 168 | self.index = self.results.as_ref().len(); |
| 169 | } |
| 170 | |
| 171 | /// Generate a new set of results immediately, setting the index to the |
| 172 | /// given value. |
| 173 | #[inline ] |
| 174 | pub fn generate_and_set(&mut self, index: usize) { |
| 175 | assert!(index < self.results.as_ref().len()); |
| 176 | self.core.generate(&mut self.results); |
| 177 | self.index = index; |
| 178 | } |
| 179 | } |
| 180 | |
| 181 | impl<R: BlockRngCore<Item = u32>> RngCore for BlockRng<R> |
| 182 | where |
| 183 | <R as BlockRngCore>::Results: AsRef<[u32]> + AsMut<[u32]>, |
| 184 | { |
| 185 | #[inline ] |
| 186 | fn next_u32(&mut self) -> u32 { |
| 187 | if self.index >= self.results.as_ref().len() { |
| 188 | self.generate_and_set(0); |
| 189 | } |
| 190 | |
| 191 | let value = self.results.as_ref()[self.index]; |
| 192 | self.index += 1; |
| 193 | value |
| 194 | } |
| 195 | |
| 196 | #[inline ] |
| 197 | fn next_u64(&mut self) -> u64 { |
| 198 | let read_u64 = |results: &[u32], index| { |
| 199 | let data = &results[index..=index + 1]; |
| 200 | u64::from(data[1]) << 32 | u64::from(data[0]) |
| 201 | }; |
| 202 | |
| 203 | let len = self.results.as_ref().len(); |
| 204 | |
| 205 | let index = self.index; |
| 206 | if index < len - 1 { |
| 207 | self.index += 2; |
| 208 | // Read an u64 from the current index |
| 209 | read_u64(self.results.as_ref(), index) |
| 210 | } else if index >= len { |
| 211 | self.generate_and_set(2); |
| 212 | read_u64(self.results.as_ref(), 0) |
| 213 | } else { |
| 214 | let x = u64::from(self.results.as_ref()[len - 1]); |
| 215 | self.generate_and_set(1); |
| 216 | let y = u64::from(self.results.as_ref()[0]); |
| 217 | (y << 32) | x |
| 218 | } |
| 219 | } |
| 220 | |
| 221 | #[inline ] |
| 222 | fn fill_bytes(&mut self, dest: &mut [u8]) { |
| 223 | let mut read_len = 0; |
| 224 | while read_len < dest.len() { |
| 225 | if self.index >= self.results.as_ref().len() { |
| 226 | self.generate_and_set(0); |
| 227 | } |
| 228 | let (consumed_u32, filled_u8) = |
| 229 | fill_via_u32_chunks(&self.results.as_ref()[self.index..], &mut dest[read_len..]); |
| 230 | |
| 231 | self.index += consumed_u32; |
| 232 | read_len += filled_u8; |
| 233 | } |
| 234 | } |
| 235 | |
| 236 | #[inline (always)] |
| 237 | fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { |
| 238 | self.fill_bytes(dest); |
| 239 | Ok(()) |
| 240 | } |
| 241 | } |
| 242 | |
| 243 | impl<R: BlockRngCore + SeedableRng> SeedableRng for BlockRng<R> { |
| 244 | type Seed = R::Seed; |
| 245 | |
| 246 | #[inline (always)] |
| 247 | fn from_seed(seed: Self::Seed) -> Self { |
| 248 | Self::new(R::from_seed(seed)) |
| 249 | } |
| 250 | |
| 251 | #[inline (always)] |
| 252 | fn seed_from_u64(seed: u64) -> Self { |
| 253 | Self::new(R::seed_from_u64(state:seed)) |
| 254 | } |
| 255 | |
| 256 | #[inline (always)] |
| 257 | fn from_rng<S: RngCore>(rng: S) -> Result<Self, Error> { |
| 258 | Ok(Self::new(R::from_rng(rng)?)) |
| 259 | } |
| 260 | } |
| 261 | |
| 262 | /// A wrapper type implementing [`RngCore`] for some type implementing |
| 263 | /// [`BlockRngCore`] with `u64` array buffer; i.e. this can be used to implement |
| 264 | /// a full RNG from just a `generate` function. |
| 265 | /// |
| 266 | /// This is similar to [`BlockRng`], but specialized for algorithms that operate |
| 267 | /// on `u64` values. |
| 268 | /// |
| 269 | /// No whole generated `u64` values are thrown away and all values are consumed |
| 270 | /// in-order. [`next_u64`] simply takes the next available `u64` value. |
| 271 | /// [`next_u32`] is however a bit special: half of a `u64` is consumed, leaving |
| 272 | /// the other half in the buffer. If the next function called is [`next_u32`] |
| 273 | /// then the other half is then consumed, however both [`next_u64`] and |
| 274 | /// [`fill_bytes`] discard the rest of any half-consumed `u64`s when called. |
| 275 | /// |
| 276 | /// [`fill_bytes`] and [`try_fill_bytes`] consume a whole number of `u64` |
| 277 | /// values. If the requested length is not a multiple of 8, some bytes will be |
| 278 | /// discarded. |
| 279 | /// |
| 280 | /// [`next_u32`]: RngCore::next_u32 |
| 281 | /// [`next_u64`]: RngCore::next_u64 |
| 282 | /// [`fill_bytes`]: RngCore::fill_bytes |
| 283 | /// [`try_fill_bytes`]: RngCore::try_fill_bytes |
| 284 | #[derive (Clone)] |
| 285 | #[cfg_attr (feature = "serde1" , derive(Serialize, Deserialize))] |
| 286 | pub struct BlockRng64<R: BlockRngCore + ?Sized> { |
| 287 | results: R::Results, |
| 288 | index: usize, |
| 289 | half_used: bool, // true if only half of the previous result is used |
| 290 | /// The *core* part of the RNG, implementing the `generate` function. |
| 291 | pub core: R, |
| 292 | } |
| 293 | |
| 294 | // Custom Debug implementation that does not expose the contents of `results`. |
| 295 | impl<R: BlockRngCore + fmt::Debug> fmt::Debug for BlockRng64<R> { |
| 296 | fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { |
| 297 | fmt&mut DebugStruct<'_, '_>.debug_struct("BlockRng64" ) |
| 298 | .field("core" , &self.core) |
| 299 | .field("result_len" , &self.results.as_ref().len()) |
| 300 | .field("index" , &self.index) |
| 301 | .field(name:"half_used" , &self.half_used) |
| 302 | .finish() |
| 303 | } |
| 304 | } |
| 305 | |
| 306 | impl<R: BlockRngCore> BlockRng64<R> { |
| 307 | /// Create a new `BlockRng` from an existing RNG implementing |
| 308 | /// `BlockRngCore`. Results will be generated on first use. |
| 309 | #[inline ] |
| 310 | pub fn new(core: R) -> BlockRng64<R> { |
| 311 | let results_empty = R::Results::default(); |
| 312 | BlockRng64 { |
| 313 | core, |
| 314 | index: results_empty.as_ref().len(), |
| 315 | half_used: false, |
| 316 | results: results_empty, |
| 317 | } |
| 318 | } |
| 319 | |
| 320 | /// Get the index into the result buffer. |
| 321 | /// |
| 322 | /// If this is equal to or larger than the size of the result buffer then |
| 323 | /// the buffer is "empty" and `generate()` must be called to produce new |
| 324 | /// results. |
| 325 | #[inline (always)] |
| 326 | pub fn index(&self) -> usize { |
| 327 | self.index |
| 328 | } |
| 329 | |
| 330 | /// Reset the number of available results. |
| 331 | /// This will force a new set of results to be generated on next use. |
| 332 | #[inline ] |
| 333 | pub fn reset(&mut self) { |
| 334 | self.index = self.results.as_ref().len(); |
| 335 | self.half_used = false; |
| 336 | } |
| 337 | |
| 338 | /// Generate a new set of results immediately, setting the index to the |
| 339 | /// given value. |
| 340 | #[inline ] |
| 341 | pub fn generate_and_set(&mut self, index: usize) { |
| 342 | assert!(index < self.results.as_ref().len()); |
| 343 | self.core.generate(&mut self.results); |
| 344 | self.index = index; |
| 345 | self.half_used = false; |
| 346 | } |
| 347 | } |
| 348 | |
| 349 | impl<R: BlockRngCore<Item = u64>> RngCore for BlockRng64<R> |
| 350 | where |
| 351 | <R as BlockRngCore>::Results: AsRef<[u64]> + AsMut<[u64]>, |
| 352 | { |
| 353 | #[inline ] |
| 354 | fn next_u32(&mut self) -> u32 { |
| 355 | let mut index = self.index - self.half_used as usize; |
| 356 | if index >= self.results.as_ref().len() { |
| 357 | self.core.generate(&mut self.results); |
| 358 | self.index = 0; |
| 359 | index = 0; |
| 360 | // `self.half_used` is by definition `false` |
| 361 | self.half_used = false; |
| 362 | } |
| 363 | |
| 364 | let shift = 32 * (self.half_used as usize); |
| 365 | |
| 366 | self.half_used = !self.half_used; |
| 367 | self.index += self.half_used as usize; |
| 368 | |
| 369 | (self.results.as_ref()[index] >> shift) as u32 |
| 370 | } |
| 371 | |
| 372 | #[inline ] |
| 373 | fn next_u64(&mut self) -> u64 { |
| 374 | if self.index >= self.results.as_ref().len() { |
| 375 | self.core.generate(&mut self.results); |
| 376 | self.index = 0; |
| 377 | } |
| 378 | |
| 379 | let value = self.results.as_ref()[self.index]; |
| 380 | self.index += 1; |
| 381 | self.half_used = false; |
| 382 | value |
| 383 | } |
| 384 | |
| 385 | #[inline ] |
| 386 | fn fill_bytes(&mut self, dest: &mut [u8]) { |
| 387 | let mut read_len = 0; |
| 388 | self.half_used = false; |
| 389 | while read_len < dest.len() { |
| 390 | if self.index as usize >= self.results.as_ref().len() { |
| 391 | self.core.generate(&mut self.results); |
| 392 | self.index = 0; |
| 393 | } |
| 394 | |
| 395 | let (consumed_u64, filled_u8) = fill_via_u64_chunks( |
| 396 | &self.results.as_ref()[self.index as usize..], |
| 397 | &mut dest[read_len..], |
| 398 | ); |
| 399 | |
| 400 | self.index += consumed_u64; |
| 401 | read_len += filled_u8; |
| 402 | } |
| 403 | } |
| 404 | |
| 405 | #[inline (always)] |
| 406 | fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { |
| 407 | self.fill_bytes(dest); |
| 408 | Ok(()) |
| 409 | } |
| 410 | } |
| 411 | |
| 412 | impl<R: BlockRngCore + SeedableRng> SeedableRng for BlockRng64<R> { |
| 413 | type Seed = R::Seed; |
| 414 | |
| 415 | #[inline (always)] |
| 416 | fn from_seed(seed: Self::Seed) -> Self { |
| 417 | Self::new(R::from_seed(seed)) |
| 418 | } |
| 419 | |
| 420 | #[inline (always)] |
| 421 | fn seed_from_u64(seed: u64) -> Self { |
| 422 | Self::new(R::seed_from_u64(state:seed)) |
| 423 | } |
| 424 | |
| 425 | #[inline (always)] |
| 426 | fn from_rng<S: RngCore>(rng: S) -> Result<Self, Error> { |
| 427 | Ok(Self::new(R::from_rng(rng)?)) |
| 428 | } |
| 429 | } |
| 430 | |
| 431 | impl<R: BlockRngCore + CryptoRng> CryptoRng for BlockRng<R> {} |
| 432 | |
| 433 | #[cfg (test)] |
| 434 | mod test { |
| 435 | use crate::{SeedableRng, RngCore}; |
| 436 | use crate::block::{BlockRng, BlockRng64, BlockRngCore}; |
| 437 | |
| 438 | #[derive (Debug, Clone)] |
| 439 | struct DummyRng { |
| 440 | counter: u32, |
| 441 | } |
| 442 | |
| 443 | impl BlockRngCore for DummyRng { |
| 444 | type Item = u32; |
| 445 | |
| 446 | type Results = [u32; 16]; |
| 447 | |
| 448 | fn generate(&mut self, results: &mut Self::Results) { |
| 449 | for r in results { |
| 450 | *r = self.counter; |
| 451 | self.counter = self.counter.wrapping_add(3511615421); |
| 452 | } |
| 453 | } |
| 454 | } |
| 455 | |
| 456 | impl SeedableRng for DummyRng { |
| 457 | type Seed = [u8; 4]; |
| 458 | |
| 459 | fn from_seed(seed: Self::Seed) -> Self { |
| 460 | DummyRng { counter: u32::from_le_bytes(seed) } |
| 461 | } |
| 462 | } |
| 463 | |
| 464 | #[test ] |
| 465 | fn blockrng_next_u32_vs_next_u64() { |
| 466 | let mut rng1 = BlockRng::<DummyRng>::from_seed([1, 2, 3, 4]); |
| 467 | let mut rng2 = rng1.clone(); |
| 468 | let mut rng3 = rng1.clone(); |
| 469 | |
| 470 | let mut a = [0; 16]; |
| 471 | (&mut a[..4]).copy_from_slice(&rng1.next_u32().to_le_bytes()); |
| 472 | (&mut a[4..12]).copy_from_slice(&rng1.next_u64().to_le_bytes()); |
| 473 | (&mut a[12..]).copy_from_slice(&rng1.next_u32().to_le_bytes()); |
| 474 | |
| 475 | let mut b = [0; 16]; |
| 476 | (&mut b[..4]).copy_from_slice(&rng2.next_u32().to_le_bytes()); |
| 477 | (&mut b[4..8]).copy_from_slice(&rng2.next_u32().to_le_bytes()); |
| 478 | (&mut b[8..]).copy_from_slice(&rng2.next_u64().to_le_bytes()); |
| 479 | assert_eq!(a, b); |
| 480 | |
| 481 | let mut c = [0; 16]; |
| 482 | (&mut c[..8]).copy_from_slice(&rng3.next_u64().to_le_bytes()); |
| 483 | (&mut c[8..12]).copy_from_slice(&rng3.next_u32().to_le_bytes()); |
| 484 | (&mut c[12..]).copy_from_slice(&rng3.next_u32().to_le_bytes()); |
| 485 | assert_eq!(a, c); |
| 486 | } |
| 487 | |
| 488 | #[derive (Debug, Clone)] |
| 489 | struct DummyRng64 { |
| 490 | counter: u64, |
| 491 | } |
| 492 | |
| 493 | impl BlockRngCore for DummyRng64 { |
| 494 | type Item = u64; |
| 495 | |
| 496 | type Results = [u64; 8]; |
| 497 | |
| 498 | fn generate(&mut self, results: &mut Self::Results) { |
| 499 | for r in results { |
| 500 | *r = self.counter; |
| 501 | self.counter = self.counter.wrapping_add(2781463553396133981); |
| 502 | } |
| 503 | } |
| 504 | } |
| 505 | |
| 506 | impl SeedableRng for DummyRng64 { |
| 507 | type Seed = [u8; 8]; |
| 508 | |
| 509 | fn from_seed(seed: Self::Seed) -> Self { |
| 510 | DummyRng64 { counter: u64::from_le_bytes(seed) } |
| 511 | } |
| 512 | } |
| 513 | |
| 514 | #[test ] |
| 515 | fn blockrng64_next_u32_vs_next_u64() { |
| 516 | let mut rng1 = BlockRng64::<DummyRng64>::from_seed([1, 2, 3, 4, 5, 6, 7, 8]); |
| 517 | let mut rng2 = rng1.clone(); |
| 518 | let mut rng3 = rng1.clone(); |
| 519 | |
| 520 | let mut a = [0; 16]; |
| 521 | (&mut a[..4]).copy_from_slice(&rng1.next_u32().to_le_bytes()); |
| 522 | (&mut a[4..12]).copy_from_slice(&rng1.next_u64().to_le_bytes()); |
| 523 | (&mut a[12..]).copy_from_slice(&rng1.next_u32().to_le_bytes()); |
| 524 | |
| 525 | let mut b = [0; 16]; |
| 526 | (&mut b[..4]).copy_from_slice(&rng2.next_u32().to_le_bytes()); |
| 527 | (&mut b[4..8]).copy_from_slice(&rng2.next_u32().to_le_bytes()); |
| 528 | (&mut b[8..]).copy_from_slice(&rng2.next_u64().to_le_bytes()); |
| 529 | assert_ne!(a, b); |
| 530 | assert_eq!(&a[..4], &b[..4]); |
| 531 | assert_eq!(&a[4..12], &b[8..]); |
| 532 | |
| 533 | let mut c = [0; 16]; |
| 534 | (&mut c[..8]).copy_from_slice(&rng3.next_u64().to_le_bytes()); |
| 535 | (&mut c[8..12]).copy_from_slice(&rng3.next_u32().to_le_bytes()); |
| 536 | (&mut c[12..]).copy_from_slice(&rng3.next_u32().to_le_bytes()); |
| 537 | assert_eq!(b, c); |
| 538 | } |
| 539 | } |
| 540 | |