| 1 | // Copyright 2018 Developers of the Rand project. |
| 2 | // Copyright 2013 The Rust Project Developers. |
| 3 | // |
| 4 | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or |
| 5 | // https://www.apache.org/licenses/LICENSE-2.0> or the MIT license |
| 6 | // <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your |
| 7 | // option. This file may not be copied, modified, or distributed |
| 8 | // except according to those terms. |
| 9 | |
| 10 | //! A wrapper around another PRNG that reseeds it after it |
| 11 | //! generates a certain number of random bytes. |
| 12 | |
| 13 | use core::mem::size_of; |
| 14 | |
| 15 | use rand_core::block::{BlockRng, BlockRngCore}; |
| 16 | use rand_core::{CryptoRng, Error, RngCore, SeedableRng}; |
| 17 | |
| 18 | /// A wrapper around any PRNG that implements [`BlockRngCore`], that adds the |
| 19 | /// ability to reseed it. |
| 20 | /// |
| 21 | /// `ReseedingRng` reseeds the underlying PRNG in the following cases: |
| 22 | /// |
| 23 | /// - On a manual call to [`reseed()`]. |
| 24 | /// - After `clone()`, the clone will be reseeded on first use. |
| 25 | /// - When a process is forked on UNIX, the RNGs in both the parent and child |
| 26 | /// processes will be reseeded just before the next call to |
| 27 | /// [`BlockRngCore::generate`], i.e. "soon". For ChaCha and Hc128 this is a |
| 28 | /// maximum of fifteen `u32` values before reseeding. |
| 29 | /// - After the PRNG has generated a configurable number of random bytes. |
| 30 | /// |
| 31 | /// # When should reseeding after a fixed number of generated bytes be used? |
| 32 | /// |
| 33 | /// Reseeding after a fixed number of generated bytes is never strictly |
| 34 | /// *necessary*. Cryptographic PRNGs don't have a limited number of bytes they |
| 35 | /// can output, or at least not a limit reachable in any practical way. There is |
| 36 | /// no such thing as 'running out of entropy'. |
| 37 | /// |
| 38 | /// Occasionally reseeding can be seen as some form of 'security in depth'. Even |
| 39 | /// if in the future a cryptographic weakness is found in the CSPRNG being used, |
| 40 | /// or a flaw in the implementation, occasionally reseeding should make |
| 41 | /// exploiting it much more difficult or even impossible. |
| 42 | /// |
| 43 | /// Use [`ReseedingRng::new`] with a `threshold` of `0` to disable reseeding |
| 44 | /// after a fixed number of generated bytes. |
| 45 | /// |
| 46 | /// # Limitations |
| 47 | /// |
| 48 | /// It is recommended that a `ReseedingRng` (including `ThreadRng`) not be used |
| 49 | /// from a fork handler. |
| 50 | /// Use `OsRng` or `getrandom`, or defer your use of the RNG until later. |
| 51 | /// |
| 52 | /// # Error handling |
| 53 | /// |
| 54 | /// Although unlikely, reseeding the wrapped PRNG can fail. `ReseedingRng` will |
| 55 | /// never panic but try to handle the error intelligently through some |
| 56 | /// combination of retrying and delaying reseeding until later. |
| 57 | /// If handling the source error fails `ReseedingRng` will continue generating |
| 58 | /// data from the wrapped PRNG without reseeding. |
| 59 | /// |
| 60 | /// Manually calling [`reseed()`] will not have this retry or delay logic, but |
| 61 | /// reports the error. |
| 62 | /// |
| 63 | /// # Example |
| 64 | /// |
| 65 | /// ``` |
| 66 | /// use rand::prelude::*; |
| 67 | /// use rand_chacha::ChaCha20Core; // Internal part of ChaChaRng that |
| 68 | /// // implements BlockRngCore |
| 69 | /// use rand::rngs::OsRng; |
| 70 | /// use rand::rngs::adapter::ReseedingRng; |
| 71 | /// |
| 72 | /// let prng = ChaCha20Core::from_entropy(); |
| 73 | /// let mut reseeding_rng = ReseedingRng::new(prng, 0, OsRng); |
| 74 | /// |
| 75 | /// println!("{}" , reseeding_rng.gen::<u64>()); |
| 76 | /// |
| 77 | /// let mut cloned_rng = reseeding_rng.clone(); |
| 78 | /// assert!(reseeding_rng.gen::<u64>() != cloned_rng.gen::<u64>()); |
| 79 | /// ``` |
| 80 | /// |
| 81 | /// [`BlockRngCore`]: rand_core::block::BlockRngCore |
| 82 | /// [`ReseedingRng::new`]: ReseedingRng::new |
| 83 | /// [`reseed()`]: ReseedingRng::reseed |
| 84 | #[derive (Debug)] |
| 85 | pub struct ReseedingRng<R, Rsdr>(BlockRng<ReseedingCore<R, Rsdr>>) |
| 86 | where |
| 87 | R: BlockRngCore + SeedableRng, |
| 88 | Rsdr: RngCore; |
| 89 | |
| 90 | impl<R, Rsdr> ReseedingRng<R, Rsdr> |
| 91 | where |
| 92 | R: BlockRngCore + SeedableRng, |
| 93 | Rsdr: RngCore, |
| 94 | { |
| 95 | /// Create a new `ReseedingRng` from an existing PRNG, combined with a RNG |
| 96 | /// to use as reseeder. |
| 97 | /// |
| 98 | /// `threshold` sets the number of generated bytes after which to reseed the |
| 99 | /// PRNG. Set it to zero to never reseed based on the number of generated |
| 100 | /// values. |
| 101 | pub fn new(rng: R, threshold: u64, reseeder: Rsdr) -> Self { |
| 102 | ReseedingRng(BlockRng::new(core:ReseedingCore::new(rng, threshold, reseeder))) |
| 103 | } |
| 104 | |
| 105 | /// Reseed the internal PRNG. |
| 106 | pub fn reseed(&mut self) -> Result<(), Error> { |
| 107 | self.0.core.reseed() |
| 108 | } |
| 109 | } |
| 110 | |
| 111 | // TODO: this should be implemented for any type where the inner type |
| 112 | // implements RngCore, but we can't specify that because ReseedingCore is private |
| 113 | impl<R, Rsdr: RngCore> RngCore for ReseedingRng<R, Rsdr> |
| 114 | where |
| 115 | R: BlockRngCore<Item = u32> + SeedableRng, |
| 116 | <R as BlockRngCore>::Results: AsRef<[u32]> + AsMut<[u32]>, |
| 117 | { |
| 118 | #[inline (always)] |
| 119 | fn next_u32(&mut self) -> u32 { |
| 120 | self.0.next_u32() |
| 121 | } |
| 122 | |
| 123 | #[inline (always)] |
| 124 | fn next_u64(&mut self) -> u64 { |
| 125 | self.0.next_u64() |
| 126 | } |
| 127 | |
| 128 | fn fill_bytes(&mut self, dest: &mut [u8]) { |
| 129 | self.0.fill_bytes(dest) |
| 130 | } |
| 131 | |
| 132 | fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { |
| 133 | self.0.try_fill_bytes(dest) |
| 134 | } |
| 135 | } |
| 136 | |
| 137 | impl<R, Rsdr> Clone for ReseedingRng<R, Rsdr> |
| 138 | where |
| 139 | R: BlockRngCore + SeedableRng + Clone, |
| 140 | Rsdr: RngCore + Clone, |
| 141 | { |
| 142 | fn clone(&self) -> ReseedingRng<R, Rsdr> { |
| 143 | // Recreating `BlockRng` seems easier than cloning it and resetting |
| 144 | // the index. |
| 145 | ReseedingRng(BlockRng::new(self.0.core.clone())) |
| 146 | } |
| 147 | } |
| 148 | |
| 149 | impl<R, Rsdr> CryptoRng for ReseedingRng<R, Rsdr> |
| 150 | where |
| 151 | R: BlockRngCore + SeedableRng + CryptoRng, |
| 152 | Rsdr: RngCore + CryptoRng, |
| 153 | { |
| 154 | } |
| 155 | |
| 156 | #[derive (Debug)] |
| 157 | struct ReseedingCore<R, Rsdr> { |
| 158 | inner: R, |
| 159 | reseeder: Rsdr, |
| 160 | threshold: i64, |
| 161 | bytes_until_reseed: i64, |
| 162 | fork_counter: usize, |
| 163 | } |
| 164 | |
| 165 | impl<R, Rsdr> BlockRngCore for ReseedingCore<R, Rsdr> |
| 166 | where |
| 167 | R: BlockRngCore + SeedableRng, |
| 168 | Rsdr: RngCore, |
| 169 | { |
| 170 | type Item = <R as BlockRngCore>::Item; |
| 171 | type Results = <R as BlockRngCore>::Results; |
| 172 | |
| 173 | fn generate(&mut self, results: &mut Self::Results) { |
| 174 | let global_fork_counter: usize = fork::get_fork_counter(); |
| 175 | if self.bytes_until_reseed <= 0 || self.is_forked(global_fork_counter) { |
| 176 | // We get better performance by not calling only `reseed` here |
| 177 | // and continuing with the rest of the function, but by directly |
| 178 | // returning from a non-inlined function. |
| 179 | return self.reseed_and_generate(results, global_fork_counter); |
| 180 | } |
| 181 | let num_bytes: usize = results.as_ref().len() * size_of::<Self::Item>(); |
| 182 | self.bytes_until_reseed -= num_bytes as i64; |
| 183 | self.inner.generate(results); |
| 184 | } |
| 185 | } |
| 186 | |
| 187 | impl<R, Rsdr> ReseedingCore<R, Rsdr> |
| 188 | where |
| 189 | R: BlockRngCore + SeedableRng, |
| 190 | Rsdr: RngCore, |
| 191 | { |
| 192 | /// Create a new `ReseedingCore`. |
| 193 | fn new(rng: R, threshold: u64, reseeder: Rsdr) -> Self { |
| 194 | use ::core::i64::MAX; |
| 195 | fork::register_fork_handler(); |
| 196 | |
| 197 | // Because generating more values than `i64::MAX` takes centuries on |
| 198 | // current hardware, we just clamp to that value. |
| 199 | // Also we set a threshold of 0, which indicates no limit, to that |
| 200 | // value. |
| 201 | let threshold = if threshold == 0 { |
| 202 | MAX |
| 203 | } else if threshold <= MAX as u64 { |
| 204 | threshold as i64 |
| 205 | } else { |
| 206 | MAX |
| 207 | }; |
| 208 | |
| 209 | ReseedingCore { |
| 210 | inner: rng, |
| 211 | reseeder, |
| 212 | threshold: threshold as i64, |
| 213 | bytes_until_reseed: threshold as i64, |
| 214 | fork_counter: 0, |
| 215 | } |
| 216 | } |
| 217 | |
| 218 | /// Reseed the internal PRNG. |
| 219 | fn reseed(&mut self) -> Result<(), Error> { |
| 220 | R::from_rng(&mut self.reseeder).map(|result| { |
| 221 | self.bytes_until_reseed = self.threshold; |
| 222 | self.inner = result |
| 223 | }) |
| 224 | } |
| 225 | |
| 226 | fn is_forked(&self, global_fork_counter: usize) -> bool { |
| 227 | // In theory, on 32-bit platforms, it is possible for |
| 228 | // `global_fork_counter` to wrap around after ~4e9 forks. |
| 229 | // |
| 230 | // This check will detect a fork in the normal case where |
| 231 | // `fork_counter < global_fork_counter`, and also when the difference |
| 232 | // between both is greater than `isize::MAX` (wrapped around). |
| 233 | // |
| 234 | // It will still fail to detect a fork if there have been more than |
| 235 | // `isize::MAX` forks, without any reseed in between. Seems unlikely |
| 236 | // enough. |
| 237 | (self.fork_counter.wrapping_sub(global_fork_counter) as isize) < 0 |
| 238 | } |
| 239 | |
| 240 | #[inline (never)] |
| 241 | fn reseed_and_generate( |
| 242 | &mut self, results: &mut <Self as BlockRngCore>::Results, global_fork_counter: usize, |
| 243 | ) { |
| 244 | #![allow (clippy::if_same_then_else)] // false positive |
| 245 | if self.is_forked(global_fork_counter) { |
| 246 | info!("Fork detected, reseeding RNG" ); |
| 247 | } else { |
| 248 | trace!("Reseeding RNG (periodic reseed)" ); |
| 249 | } |
| 250 | |
| 251 | let num_bytes = results.as_ref().len() * size_of::<<R as BlockRngCore>::Item>(); |
| 252 | |
| 253 | if let Err(e) = self.reseed() { |
| 254 | warn!("Reseeding RNG failed: {}" , e); |
| 255 | let _ = e; |
| 256 | } |
| 257 | self.fork_counter = global_fork_counter; |
| 258 | |
| 259 | self.bytes_until_reseed = self.threshold - num_bytes as i64; |
| 260 | self.inner.generate(results); |
| 261 | } |
| 262 | } |
| 263 | |
| 264 | impl<R, Rsdr> Clone for ReseedingCore<R, Rsdr> |
| 265 | where |
| 266 | R: BlockRngCore + SeedableRng + Clone, |
| 267 | Rsdr: RngCore + Clone, |
| 268 | { |
| 269 | fn clone(&self) -> ReseedingCore<R, Rsdr> { |
| 270 | ReseedingCore { |
| 271 | inner: self.inner.clone(), |
| 272 | reseeder: self.reseeder.clone(), |
| 273 | threshold: self.threshold, |
| 274 | bytes_until_reseed: 0, // reseed clone on first use |
| 275 | fork_counter: self.fork_counter, |
| 276 | } |
| 277 | } |
| 278 | } |
| 279 | |
| 280 | impl<R, Rsdr> CryptoRng for ReseedingCore<R, Rsdr> |
| 281 | where |
| 282 | R: BlockRngCore + SeedableRng + CryptoRng, |
| 283 | Rsdr: RngCore + CryptoRng, |
| 284 | { |
| 285 | } |
| 286 | |
| 287 | |
| 288 | #[cfg (all(unix, not(target_os = "emscripten" )))] |
| 289 | mod fork { |
| 290 | use core::sync::atomic::{AtomicUsize, Ordering}; |
| 291 | use std::sync::Once; |
| 292 | |
| 293 | // Fork protection |
| 294 | // |
| 295 | // We implement fork protection on Unix using `pthread_atfork`. |
| 296 | // When the process is forked, we increment `RESEEDING_RNG_FORK_COUNTER`. |
| 297 | // Every `ReseedingRng` stores the last known value of the static in |
| 298 | // `fork_counter`. If the cached `fork_counter` is less than |
| 299 | // `RESEEDING_RNG_FORK_COUNTER`, it is time to reseed this RNG. |
| 300 | // |
| 301 | // If reseeding fails, we don't deal with this by setting a delay, but just |
| 302 | // don't update `fork_counter`, so a reseed is attempted as soon as |
| 303 | // possible. |
| 304 | |
| 305 | static RESEEDING_RNG_FORK_COUNTER: AtomicUsize = AtomicUsize::new(0); |
| 306 | |
| 307 | pub fn get_fork_counter() -> usize { |
| 308 | RESEEDING_RNG_FORK_COUNTER.load(Ordering::Relaxed) |
| 309 | } |
| 310 | |
| 311 | extern "C" fn fork_handler() { |
| 312 | // Note: fetch_add is defined to wrap on overflow |
| 313 | // (which is what we want). |
| 314 | RESEEDING_RNG_FORK_COUNTER.fetch_add(1, Ordering::Relaxed); |
| 315 | } |
| 316 | |
| 317 | pub fn register_fork_handler() { |
| 318 | static REGISTER: Once = Once::new(); |
| 319 | REGISTER.call_once(|| { |
| 320 | // Bump the counter before and after forking (see #1169): |
| 321 | let ret = unsafe { libc::pthread_atfork( |
| 322 | Some(fork_handler), |
| 323 | Some(fork_handler), |
| 324 | Some(fork_handler), |
| 325 | ) }; |
| 326 | if ret != 0 { |
| 327 | panic!("libc::pthread_atfork failed with code {}" , ret); |
| 328 | } |
| 329 | }); |
| 330 | } |
| 331 | } |
| 332 | |
| 333 | #[cfg (not(all(unix, not(target_os = "emscripten" ))))] |
| 334 | mod fork { |
| 335 | pub fn get_fork_counter() -> usize { |
| 336 | 0 |
| 337 | } |
| 338 | pub fn register_fork_handler() {} |
| 339 | } |
| 340 | |
| 341 | |
| 342 | #[cfg (feature = "std_rng" )] |
| 343 | #[cfg (test)] |
| 344 | mod test { |
| 345 | use super::ReseedingRng; |
| 346 | use crate::rngs::mock::StepRng; |
| 347 | use crate::rngs::std::Core; |
| 348 | use crate::{Rng, SeedableRng}; |
| 349 | |
| 350 | #[test ] |
| 351 | fn test_reseeding() { |
| 352 | let mut zero = StepRng::new(0, 0); |
| 353 | let rng = Core::from_rng(&mut zero).unwrap(); |
| 354 | let thresh = 1; // reseed every time the buffer is exhausted |
| 355 | let mut reseeding = ReseedingRng::new(rng, thresh, zero); |
| 356 | |
| 357 | // RNG buffer size is [u32; 64] |
| 358 | // Debug is only implemented up to length 32 so use two arrays |
| 359 | let mut buf = ([0u32; 32], [0u32; 32]); |
| 360 | reseeding.fill(&mut buf.0); |
| 361 | reseeding.fill(&mut buf.1); |
| 362 | let seq = buf; |
| 363 | for _ in 0..10 { |
| 364 | reseeding.fill(&mut buf.0); |
| 365 | reseeding.fill(&mut buf.1); |
| 366 | assert_eq!(buf, seq); |
| 367 | } |
| 368 | } |
| 369 | |
| 370 | #[test ] |
| 371 | fn test_clone_reseeding() { |
| 372 | #![allow (clippy::redundant_clone)] |
| 373 | |
| 374 | let mut zero = StepRng::new(0, 0); |
| 375 | let rng = Core::from_rng(&mut zero).unwrap(); |
| 376 | let mut rng1 = ReseedingRng::new(rng, 32 * 4, zero); |
| 377 | |
| 378 | let first: u32 = rng1.gen(); |
| 379 | for _ in 0..10 { |
| 380 | let _ = rng1.gen::<u32>(); |
| 381 | } |
| 382 | |
| 383 | let mut rng2 = rng1.clone(); |
| 384 | assert_eq!(first, rng2.gen::<u32>()); |
| 385 | } |
| 386 | } |
| 387 | |