1 | // Copyright 2018 Developers of the Rand project. |
2 | // |
3 | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or |
4 | // https://www.apache.org/licenses/LICENSE-2.0> or the MIT license |
5 | // <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your |
6 | // option. This file may not be copied, modified, or distributed |
7 | // except according to those terms. |
8 | |
9 | //! The `BlockRngCore` trait and implementation helpers |
10 | //! |
11 | //! The [`BlockRngCore`] trait exists to assist in the implementation of RNGs |
12 | //! which generate a block of data in a cache instead of returning generated |
13 | //! values directly. |
14 | //! |
15 | //! Usage of this trait is optional, but provides two advantages: |
16 | //! implementations only need to concern themselves with generation of the |
17 | //! block, not the various [`RngCore`] methods (especially [`fill_bytes`], where |
18 | //! the optimal implementations are not trivial), and this allows |
19 | //! `ReseedingRng` (see [`rand`](https://docs.rs/rand) crate) perform periodic |
20 | //! reseeding with very low overhead. |
21 | //! |
22 | //! # Example |
23 | //! |
24 | //! ```no_run |
25 | //! use rand_core::{RngCore, SeedableRng}; |
26 | //! use rand_core::block::{BlockRngCore, BlockRng}; |
27 | //! |
28 | //! struct MyRngCore; |
29 | //! |
30 | //! impl BlockRngCore for MyRngCore { |
31 | //! type Item = u32; |
32 | //! type Results = [u32; 16]; |
33 | //! |
34 | //! fn generate(&mut self, results: &mut Self::Results) { |
35 | //! unimplemented!() |
36 | //! } |
37 | //! } |
38 | //! |
39 | //! impl SeedableRng for MyRngCore { |
40 | //! type Seed = [u8; 32]; |
41 | //! fn from_seed(seed: Self::Seed) -> Self { |
42 | //! unimplemented!() |
43 | //! } |
44 | //! } |
45 | //! |
46 | //! // optionally, also implement CryptoBlockRng for MyRngCore |
47 | //! |
48 | //! // Final RNG. |
49 | //! let mut rng = BlockRng::<MyRngCore>::seed_from_u64(0); |
50 | //! println!("First value: {}" , rng.next_u32()); |
51 | //! ``` |
52 | //! |
53 | //! [`BlockRngCore`]: crate::block::BlockRngCore |
54 | //! [`fill_bytes`]: RngCore::fill_bytes |
55 | |
56 | use crate::impls::{fill_via_u32_chunks, fill_via_u64_chunks}; |
57 | use crate::{CryptoRng, RngCore, SeedableRng, TryRngCore}; |
58 | use core::fmt; |
59 | #[cfg (feature = "serde" )] |
60 | use serde::{Deserialize, Serialize}; |
61 | |
62 | /// A trait for RNGs which do not generate random numbers individually, but in |
63 | /// blocks (typically `[u32; N]`). This technique is commonly used by |
64 | /// cryptographic RNGs to improve performance. |
65 | /// |
66 | /// See the [module][crate::block] documentation for details. |
67 | pub trait BlockRngCore { |
68 | /// Results element type, e.g. `u32`. |
69 | type Item; |
70 | |
71 | /// Results type. This is the 'block' an RNG implementing `BlockRngCore` |
72 | /// generates, which will usually be an array like `[u32; 16]`. |
73 | type Results: AsRef<[Self::Item]> + AsMut<[Self::Item]> + Default; |
74 | |
75 | /// Generate a new block of results. |
76 | fn generate(&mut self, results: &mut Self::Results); |
77 | } |
78 | |
79 | /// A marker trait used to indicate that an [`RngCore`] implementation is |
80 | /// supposed to be cryptographically secure. |
81 | /// |
82 | /// See [`CryptoRng`] docs for more information. |
83 | pub trait CryptoBlockRng: BlockRngCore {} |
84 | |
85 | /// A wrapper type implementing [`RngCore`] for some type implementing |
86 | /// [`BlockRngCore`] with `u32` array buffer; i.e. this can be used to implement |
87 | /// a full RNG from just a `generate` function. |
88 | /// |
89 | /// The `core` field may be accessed directly but the results buffer may not. |
90 | /// PRNG implementations can simply use a type alias |
91 | /// (`pub type MyRng = BlockRng<MyRngCore>;`) but might prefer to use a |
92 | /// wrapper type (`pub struct MyRng(BlockRng<MyRngCore>);`); the latter must |
93 | /// re-implement `RngCore` but hides the implementation details and allows |
94 | /// extra functionality to be defined on the RNG |
95 | /// (e.g. `impl MyRng { fn set_stream(...){...} }`). |
96 | /// |
97 | /// `BlockRng` has heavily optimized implementations of the [`RngCore`] methods |
98 | /// reading values from the results buffer, as well as |
99 | /// calling [`BlockRngCore::generate`] directly on the output array when |
100 | /// [`fill_bytes`] is called on a large array. These methods also handle |
101 | /// the bookkeeping of when to generate a new batch of values. |
102 | /// |
103 | /// No whole generated `u32` values are thrown away and all values are consumed |
104 | /// in-order. [`next_u32`] simply takes the next available `u32` value. |
105 | /// [`next_u64`] is implemented by combining two `u32` values, least |
106 | /// significant first. [`fill_bytes`] consume a whole number of `u32` values, |
107 | /// converting each `u32` to a byte slice in little-endian order. If the requested byte |
108 | /// length is not a multiple of 4, some bytes will be discarded. |
109 | /// |
110 | /// See also [`BlockRng64`] which uses `u64` array buffers. Currently there is |
111 | /// no direct support for other buffer types. |
112 | /// |
113 | /// For easy initialization `BlockRng` also implements [`SeedableRng`]. |
114 | /// |
115 | /// [`next_u32`]: RngCore::next_u32 |
116 | /// [`next_u64`]: RngCore::next_u64 |
117 | /// [`fill_bytes`]: RngCore::fill_bytes |
118 | #[derive(Clone)] |
119 | #[cfg_attr (feature = "serde" , derive(Serialize, Deserialize))] |
120 | #[cfg_attr ( |
121 | feature = "serde" , |
122 | serde( |
123 | bound = "for<'x> R: Serialize + Deserialize<'x>, for<'x> R::Results: Serialize + Deserialize<'x>" |
124 | ) |
125 | )] |
126 | pub struct BlockRng<R: BlockRngCore> { |
127 | results: R::Results, |
128 | index: usize, |
129 | /// The *core* part of the RNG, implementing the `generate` function. |
130 | pub core: R, |
131 | } |
132 | |
133 | // Custom Debug implementation that does not expose the contents of `results`. |
134 | impl<R: BlockRngCore + fmt::Debug> fmt::Debug for BlockRng<R> { |
135 | fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { |
136 | fmt.debug_struct("BlockRng" ) |
137 | .field("core" , &self.core) |
138 | .field("result_len" , &self.results.as_ref().len()) |
139 | .field("index" , &self.index) |
140 | .finish() |
141 | } |
142 | } |
143 | |
144 | impl<R: BlockRngCore> BlockRng<R> { |
145 | /// Create a new `BlockRng` from an existing RNG implementing |
146 | /// `BlockRngCore`. Results will be generated on first use. |
147 | #[inline ] |
148 | pub fn new(core: R) -> BlockRng<R> { |
149 | let results_empty = R::Results::default(); |
150 | BlockRng { |
151 | core, |
152 | index: results_empty.as_ref().len(), |
153 | results: results_empty, |
154 | } |
155 | } |
156 | |
157 | /// Get the index into the result buffer. |
158 | /// |
159 | /// If this is equal to or larger than the size of the result buffer then |
160 | /// the buffer is "empty" and `generate()` must be called to produce new |
161 | /// results. |
162 | #[inline (always)] |
163 | pub fn index(&self) -> usize { |
164 | self.index |
165 | } |
166 | |
167 | /// Reset the number of available results. |
168 | /// This will force a new set of results to be generated on next use. |
169 | #[inline ] |
170 | pub fn reset(&mut self) { |
171 | self.index = self.results.as_ref().len(); |
172 | } |
173 | |
174 | /// Generate a new set of results immediately, setting the index to the |
175 | /// given value. |
176 | #[inline ] |
177 | pub fn generate_and_set(&mut self, index: usize) { |
178 | assert!(index < self.results.as_ref().len()); |
179 | self.core.generate(&mut self.results); |
180 | self.index = index; |
181 | } |
182 | } |
183 | |
184 | impl<R: BlockRngCore<Item = u32>> RngCore for BlockRng<R> { |
185 | #[inline ] |
186 | fn next_u32(&mut self) -> u32 { |
187 | if self.index >= self.results.as_ref().len() { |
188 | self.generate_and_set(0); |
189 | } |
190 | |
191 | let value = self.results.as_ref()[self.index]; |
192 | self.index += 1; |
193 | value |
194 | } |
195 | |
196 | #[inline ] |
197 | fn next_u64(&mut self) -> u64 { |
198 | let read_u64 = |results: &[u32], index| { |
199 | let data = &results[index..=index + 1]; |
200 | u64::from(data[1]) << 32 | u64::from(data[0]) |
201 | }; |
202 | |
203 | let len = self.results.as_ref().len(); |
204 | |
205 | let index = self.index; |
206 | if index < len - 1 { |
207 | self.index += 2; |
208 | // Read an u64 from the current index |
209 | read_u64(self.results.as_ref(), index) |
210 | } else if index >= len { |
211 | self.generate_and_set(2); |
212 | read_u64(self.results.as_ref(), 0) |
213 | } else { |
214 | let x = u64::from(self.results.as_ref()[len - 1]); |
215 | self.generate_and_set(1); |
216 | let y = u64::from(self.results.as_ref()[0]); |
217 | (y << 32) | x |
218 | } |
219 | } |
220 | |
221 | #[inline ] |
222 | fn fill_bytes(&mut self, dest: &mut [u8]) { |
223 | let mut read_len = 0; |
224 | while read_len < dest.len() { |
225 | if self.index >= self.results.as_ref().len() { |
226 | self.generate_and_set(0); |
227 | } |
228 | let (consumed_u32, filled_u8) = fill_via_u32_chunks( |
229 | &mut self.results.as_mut()[self.index..], |
230 | &mut dest[read_len..], |
231 | ); |
232 | |
233 | self.index += consumed_u32; |
234 | read_len += filled_u8; |
235 | } |
236 | } |
237 | } |
238 | |
239 | impl<R: BlockRngCore + SeedableRng> SeedableRng for BlockRng<R> { |
240 | type Seed = R::Seed; |
241 | |
242 | #[inline (always)] |
243 | fn from_seed(seed: Self::Seed) -> Self { |
244 | Self::new(R::from_seed(seed)) |
245 | } |
246 | |
247 | #[inline (always)] |
248 | fn seed_from_u64(seed: u64) -> Self { |
249 | Self::new(R::seed_from_u64(state:seed)) |
250 | } |
251 | |
252 | #[inline (always)] |
253 | fn from_rng(rng: &mut impl RngCore) -> Self { |
254 | Self::new(R::from_rng(rng)) |
255 | } |
256 | |
257 | #[inline (always)] |
258 | fn try_from_rng<S: TryRngCore>(rng: &mut S) -> Result<Self, S::Error> { |
259 | R::try_from_rng(rng).map(Self::new) |
260 | } |
261 | } |
262 | |
263 | impl<R: CryptoBlockRng + BlockRngCore<Item = u32>> CryptoRng for BlockRng<R> {} |
264 | |
265 | /// A wrapper type implementing [`RngCore`] for some type implementing |
266 | /// [`BlockRngCore`] with `u64` array buffer; i.e. this can be used to implement |
267 | /// a full RNG from just a `generate` function. |
268 | /// |
269 | /// This is similar to [`BlockRng`], but specialized for algorithms that operate |
270 | /// on `u64` values. |
271 | /// |
272 | /// No whole generated `u64` values are thrown away and all values are consumed |
273 | /// in-order. [`next_u64`] simply takes the next available `u64` value. |
274 | /// [`next_u32`] is however a bit special: half of a `u64` is consumed, leaving |
275 | /// the other half in the buffer. If the next function called is [`next_u32`] |
276 | /// then the other half is then consumed, however both [`next_u64`] and |
277 | /// [`fill_bytes`] discard the rest of any half-consumed `u64`s when called. |
278 | /// |
279 | /// [`fill_bytes`] consumes a whole number of `u64` values. If the requested length |
280 | /// is not a multiple of 8, some bytes will be discarded. |
281 | /// |
282 | /// [`next_u32`]: RngCore::next_u32 |
283 | /// [`next_u64`]: RngCore::next_u64 |
284 | /// [`fill_bytes`]: RngCore::fill_bytes |
285 | #[derive(Clone)] |
286 | #[cfg_attr (feature = "serde" , derive(Serialize, Deserialize))] |
287 | pub struct BlockRng64<R: BlockRngCore + ?Sized> { |
288 | results: R::Results, |
289 | index: usize, |
290 | half_used: bool, // true if only half of the previous result is used |
291 | /// The *core* part of the RNG, implementing the `generate` function. |
292 | pub core: R, |
293 | } |
294 | |
295 | // Custom Debug implementation that does not expose the contents of `results`. |
296 | impl<R: BlockRngCore + fmt::Debug> fmt::Debug for BlockRng64<R> { |
297 | fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { |
298 | fmt.debug_struct("BlockRng64" ) |
299 | .field("core" , &self.core) |
300 | .field("result_len" , &self.results.as_ref().len()) |
301 | .field("index" , &self.index) |
302 | .field("half_used" , &self.half_used) |
303 | .finish() |
304 | } |
305 | } |
306 | |
307 | impl<R: BlockRngCore> BlockRng64<R> { |
308 | /// Create a new `BlockRng` from an existing RNG implementing |
309 | /// `BlockRngCore`. Results will be generated on first use. |
310 | #[inline ] |
311 | pub fn new(core: R) -> BlockRng64<R> { |
312 | let results_empty = R::Results::default(); |
313 | BlockRng64 { |
314 | core, |
315 | index: results_empty.as_ref().len(), |
316 | half_used: false, |
317 | results: results_empty, |
318 | } |
319 | } |
320 | |
321 | /// Get the index into the result buffer. |
322 | /// |
323 | /// If this is equal to or larger than the size of the result buffer then |
324 | /// the buffer is "empty" and `generate()` must be called to produce new |
325 | /// results. |
326 | #[inline (always)] |
327 | pub fn index(&self) -> usize { |
328 | self.index |
329 | } |
330 | |
331 | /// Reset the number of available results. |
332 | /// This will force a new set of results to be generated on next use. |
333 | #[inline ] |
334 | pub fn reset(&mut self) { |
335 | self.index = self.results.as_ref().len(); |
336 | self.half_used = false; |
337 | } |
338 | |
339 | /// Generate a new set of results immediately, setting the index to the |
340 | /// given value. |
341 | #[inline ] |
342 | pub fn generate_and_set(&mut self, index: usize) { |
343 | assert!(index < self.results.as_ref().len()); |
344 | self.core.generate(&mut self.results); |
345 | self.index = index; |
346 | self.half_used = false; |
347 | } |
348 | } |
349 | |
350 | impl<R: BlockRngCore<Item = u64>> RngCore for BlockRng64<R> { |
351 | #[inline ] |
352 | fn next_u32(&mut self) -> u32 { |
353 | let mut index = self.index - self.half_used as usize; |
354 | if index >= self.results.as_ref().len() { |
355 | self.core.generate(&mut self.results); |
356 | self.index = 0; |
357 | index = 0; |
358 | // `self.half_used` is by definition `false` |
359 | self.half_used = false; |
360 | } |
361 | |
362 | let shift = 32 * (self.half_used as usize); |
363 | |
364 | self.half_used = !self.half_used; |
365 | self.index += self.half_used as usize; |
366 | |
367 | (self.results.as_ref()[index] >> shift) as u32 |
368 | } |
369 | |
370 | #[inline ] |
371 | fn next_u64(&mut self) -> u64 { |
372 | if self.index >= self.results.as_ref().len() { |
373 | self.core.generate(&mut self.results); |
374 | self.index = 0; |
375 | } |
376 | |
377 | let value = self.results.as_ref()[self.index]; |
378 | self.index += 1; |
379 | self.half_used = false; |
380 | value |
381 | } |
382 | |
383 | #[inline ] |
384 | fn fill_bytes(&mut self, dest: &mut [u8]) { |
385 | let mut read_len = 0; |
386 | self.half_used = false; |
387 | while read_len < dest.len() { |
388 | if self.index >= self.results.as_ref().len() { |
389 | self.core.generate(&mut self.results); |
390 | self.index = 0; |
391 | } |
392 | |
393 | let (consumed_u64, filled_u8) = fill_via_u64_chunks( |
394 | &mut self.results.as_mut()[self.index..], |
395 | &mut dest[read_len..], |
396 | ); |
397 | |
398 | self.index += consumed_u64; |
399 | read_len += filled_u8; |
400 | } |
401 | } |
402 | } |
403 | |
404 | impl<R: BlockRngCore + SeedableRng> SeedableRng for BlockRng64<R> { |
405 | type Seed = R::Seed; |
406 | |
407 | #[inline (always)] |
408 | fn from_seed(seed: Self::Seed) -> Self { |
409 | Self::new(R::from_seed(seed)) |
410 | } |
411 | |
412 | #[inline (always)] |
413 | fn seed_from_u64(seed: u64) -> Self { |
414 | Self::new(R::seed_from_u64(state:seed)) |
415 | } |
416 | |
417 | #[inline (always)] |
418 | fn from_rng(rng: &mut impl RngCore) -> Self { |
419 | Self::new(R::from_rng(rng)) |
420 | } |
421 | |
422 | #[inline (always)] |
423 | fn try_from_rng<S: TryRngCore>(rng: &mut S) -> Result<Self, S::Error> { |
424 | R::try_from_rng(rng).map(Self::new) |
425 | } |
426 | } |
427 | |
428 | impl<R: CryptoBlockRng + BlockRngCore<Item = u64>> CryptoRng for BlockRng64<R> {} |
429 | |
430 | #[cfg (test)] |
431 | mod test { |
432 | use crate::block::{BlockRng, BlockRng64, BlockRngCore}; |
433 | use crate::{RngCore, SeedableRng}; |
434 | |
435 | #[derive(Debug, Clone)] |
436 | struct DummyRng { |
437 | counter: u32, |
438 | } |
439 | |
440 | impl BlockRngCore for DummyRng { |
441 | type Item = u32; |
442 | type Results = [u32; 16]; |
443 | |
444 | fn generate(&mut self, results: &mut Self::Results) { |
445 | for r in results { |
446 | *r = self.counter; |
447 | self.counter = self.counter.wrapping_add(3511615421); |
448 | } |
449 | } |
450 | } |
451 | |
452 | impl SeedableRng for DummyRng { |
453 | type Seed = [u8; 4]; |
454 | |
455 | fn from_seed(seed: Self::Seed) -> Self { |
456 | DummyRng { |
457 | counter: u32::from_le_bytes(seed), |
458 | } |
459 | } |
460 | } |
461 | |
462 | #[test] |
463 | fn blockrng_next_u32_vs_next_u64() { |
464 | let mut rng1 = BlockRng::<DummyRng>::from_seed([1, 2, 3, 4]); |
465 | let mut rng2 = rng1.clone(); |
466 | let mut rng3 = rng1.clone(); |
467 | |
468 | let mut a = [0; 16]; |
469 | a[..4].copy_from_slice(&rng1.next_u32().to_le_bytes()); |
470 | a[4..12].copy_from_slice(&rng1.next_u64().to_le_bytes()); |
471 | a[12..].copy_from_slice(&rng1.next_u32().to_le_bytes()); |
472 | |
473 | let mut b = [0; 16]; |
474 | b[..4].copy_from_slice(&rng2.next_u32().to_le_bytes()); |
475 | b[4..8].copy_from_slice(&rng2.next_u32().to_le_bytes()); |
476 | b[8..].copy_from_slice(&rng2.next_u64().to_le_bytes()); |
477 | assert_eq!(a, b); |
478 | |
479 | let mut c = [0; 16]; |
480 | c[..8].copy_from_slice(&rng3.next_u64().to_le_bytes()); |
481 | c[8..12].copy_from_slice(&rng3.next_u32().to_le_bytes()); |
482 | c[12..].copy_from_slice(&rng3.next_u32().to_le_bytes()); |
483 | assert_eq!(a, c); |
484 | } |
485 | |
486 | #[derive(Debug, Clone)] |
487 | struct DummyRng64 { |
488 | counter: u64, |
489 | } |
490 | |
491 | impl BlockRngCore for DummyRng64 { |
492 | type Item = u64; |
493 | type Results = [u64; 8]; |
494 | |
495 | fn generate(&mut self, results: &mut Self::Results) { |
496 | for r in results { |
497 | *r = self.counter; |
498 | self.counter = self.counter.wrapping_add(2781463553396133981); |
499 | } |
500 | } |
501 | } |
502 | |
503 | impl SeedableRng for DummyRng64 { |
504 | type Seed = [u8; 8]; |
505 | |
506 | fn from_seed(seed: Self::Seed) -> Self { |
507 | DummyRng64 { |
508 | counter: u64::from_le_bytes(seed), |
509 | } |
510 | } |
511 | } |
512 | |
513 | #[test] |
514 | fn blockrng64_next_u32_vs_next_u64() { |
515 | let mut rng1 = BlockRng64::<DummyRng64>::from_seed([1, 2, 3, 4, 5, 6, 7, 8]); |
516 | let mut rng2 = rng1.clone(); |
517 | let mut rng3 = rng1.clone(); |
518 | |
519 | let mut a = [0; 16]; |
520 | a[..4].copy_from_slice(&rng1.next_u32().to_le_bytes()); |
521 | a[4..12].copy_from_slice(&rng1.next_u64().to_le_bytes()); |
522 | a[12..].copy_from_slice(&rng1.next_u32().to_le_bytes()); |
523 | |
524 | let mut b = [0; 16]; |
525 | b[..4].copy_from_slice(&rng2.next_u32().to_le_bytes()); |
526 | b[4..8].copy_from_slice(&rng2.next_u32().to_le_bytes()); |
527 | b[8..].copy_from_slice(&rng2.next_u64().to_le_bytes()); |
528 | assert_ne!(a, b); |
529 | assert_eq!(&a[..4], &b[..4]); |
530 | assert_eq!(&a[4..12], &b[8..]); |
531 | |
532 | let mut c = [0; 16]; |
533 | c[..8].copy_from_slice(&rng3.next_u64().to_le_bytes()); |
534 | c[8..12].copy_from_slice(&rng3.next_u32().to_le_bytes()); |
535 | c[12..].copy_from_slice(&rng3.next_u32().to_le_bytes()); |
536 | assert_eq!(b, c); |
537 | } |
538 | } |
539 | |