| 1 | // Copyright 2018 Developers of the Rand project. |
| 2 | // |
| 3 | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or |
| 4 | // https://www.apache.org/licenses/LICENSE-2.0> or the MIT license |
| 5 | // <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your |
| 6 | // option. This file may not be copied, modified, or distributed |
| 7 | // except according to those terms. |
| 8 | |
| 9 | //! Helper functions for implementing `RngCore` functions. |
| 10 | //! |
| 11 | //! For cross-platform reproducibility, these functions all use Little Endian: |
| 12 | //! least-significant part first. For example, `next_u64_via_u32` takes `u32` |
| 13 | //! values `x, y`, then outputs `(y << 32) | x`. To implement `next_u32` |
| 14 | //! from `next_u64` in little-endian order, one should use `next_u64() as u32`. |
| 15 | //! |
| 16 | //! Byte-swapping (like the std `to_le` functions) is only needed to convert |
| 17 | //! to/from byte sequences, and since its purpose is reproducibility, |
| 18 | //! non-reproducible sources (e.g. `OsRng`) need not bother with it. |
| 19 | |
| 20 | use crate::RngCore; |
| 21 | |
| 22 | /// Implement `next_u64` via `next_u32`, little-endian order. |
| 23 | pub fn next_u64_via_u32<R: RngCore + ?Sized>(rng: &mut R) -> u64 { |
| 24 | // Use LE; we explicitly generate one value before the next. |
| 25 | let x = u64::from(rng.next_u32()); |
| 26 | let y = u64::from(rng.next_u32()); |
| 27 | (y << 32) | x |
| 28 | } |
| 29 | |
| 30 | /// Implement `fill_bytes` via `next_u64` and `next_u32`, little-endian order. |
| 31 | /// |
| 32 | /// The fastest way to fill a slice is usually to work as long as possible with |
| 33 | /// integers. That is why this method mostly uses `next_u64`, and only when |
| 34 | /// there are 4 or less bytes remaining at the end of the slice it uses |
| 35 | /// `next_u32` once. |
| 36 | pub fn fill_bytes_via_next<R: RngCore + ?Sized>(rng: &mut R, dest: &mut [u8]) { |
| 37 | let mut left: &mut [u8] = dest; |
| 38 | while left.len() >= 8 { |
| 39 | let (l, r: &mut [u8]) = { left }.split_at_mut(8); |
| 40 | left = r; |
| 41 | let chunk: [u8; 8] = rng.next_u64().to_le_bytes(); |
| 42 | l.copy_from_slice(&chunk); |
| 43 | } |
| 44 | let n = left.len(); |
| 45 | if n > 4 { |
| 46 | let chunk: [u8; 8] = rng.next_u64().to_le_bytes(); |
| 47 | left.copy_from_slice(&chunk[..n]); |
| 48 | } else if n > 0 { |
| 49 | let chunk: [u8; 4] = rng.next_u32().to_le_bytes(); |
| 50 | left.copy_from_slice(&chunk[..n]); |
| 51 | } |
| 52 | } |
| 53 | |
| 54 | pub(crate) trait Observable: Copy { |
| 55 | type Bytes: Sized + AsRef<[u8]>; |
| 56 | fn to_le_bytes(self) -> Self::Bytes; |
| 57 | } |
| 58 | impl Observable for u32 { |
| 59 | type Bytes = [u8; 4]; |
| 60 | |
| 61 | fn to_le_bytes(self) -> Self::Bytes { |
| 62 | Self::to_le_bytes(self) |
| 63 | } |
| 64 | } |
| 65 | impl Observable for u64 { |
| 66 | type Bytes = [u8; 8]; |
| 67 | |
| 68 | fn to_le_bytes(self) -> Self::Bytes { |
| 69 | Self::to_le_bytes(self) |
| 70 | } |
| 71 | } |
| 72 | |
| 73 | /// Fill dest from src |
| 74 | /// |
| 75 | /// Returns `(n, byte_len)`. `src[..n]` is consumed, |
| 76 | /// `dest[..byte_len]` is filled. `src[n..]` and `dest[byte_len..]` are left |
| 77 | /// unaltered. |
| 78 | pub(crate) fn fill_via_chunks<T: Observable>(src: &[T], dest: &mut [u8]) -> (usize, usize) { |
| 79 | let size = core::mem::size_of::<T>(); |
| 80 | |
| 81 | // Always use little endian for portability of results. |
| 82 | |
| 83 | let mut dest = dest.chunks_exact_mut(size); |
| 84 | let mut src = src.iter(); |
| 85 | |
| 86 | let zipped = dest.by_ref().zip(src.by_ref()); |
| 87 | let num_chunks: usize = zipped.len(); |
| 88 | zipped.for_each(|(dest, src)| dest.copy_from_slice(src.to_le_bytes().as_ref())); |
| 89 | |
| 90 | let byte_len: usize = num_chunks * size; |
| 91 | if let Some(src) = src.next() { |
| 92 | // We have consumed all full chunks of dest, but not src. |
| 93 | let dest = dest.into_remainder(); |
| 94 | let n = dest.len(); |
| 95 | if n > 0 { |
| 96 | dest.copy_from_slice(&src.to_le_bytes().as_ref()[..n]); |
| 97 | return (num_chunks + 1, byte_len + n); |
| 98 | } |
| 99 | } |
| 100 | (num_chunks, byte_len) |
| 101 | } |
| 102 | |
| 103 | /// Implement `fill_bytes` by reading chunks from the output buffer of a block |
| 104 | /// based RNG. |
| 105 | /// |
| 106 | /// The return values are `(consumed_u32, filled_u8)`. |
| 107 | /// |
| 108 | /// `src` is not modified; it is taken as a `&mut` reference for backward |
| 109 | /// compatibility with previous versions that did change it. |
| 110 | /// |
| 111 | /// `filled_u8` is the number of filled bytes in `dest`, which may be less than |
| 112 | /// the length of `dest`. |
| 113 | /// `consumed_u32` is the number of words consumed from `src`, which is the same |
| 114 | /// as `filled_u8 / 4` rounded up. |
| 115 | /// |
| 116 | /// # Example |
| 117 | /// (from `IsaacRng`) |
| 118 | /// |
| 119 | /// ```ignore |
| 120 | /// fn fill_bytes(&mut self, dest: &mut [u8]) { |
| 121 | /// let mut read_len = 0; |
| 122 | /// while read_len < dest.len() { |
| 123 | /// if self.index >= self.rsl.len() { |
| 124 | /// self.isaac(); |
| 125 | /// } |
| 126 | /// |
| 127 | /// let (consumed_u32, filled_u8) = |
| 128 | /// impls::fill_via_u32_chunks(&mut self.rsl[self.index..], |
| 129 | /// &mut dest[read_len..]); |
| 130 | /// |
| 131 | /// self.index += consumed_u32; |
| 132 | /// read_len += filled_u8; |
| 133 | /// } |
| 134 | /// } |
| 135 | /// ``` |
| 136 | #[deprecated (since = "0.9.3" , note = "use BlockRng instead" )] |
| 137 | pub fn fill_via_u32_chunks(src: &mut [u32], dest: &mut [u8]) -> (usize, usize) { |
| 138 | fill_via_chunks(src, dest) |
| 139 | } |
| 140 | |
| 141 | /// Implement `fill_bytes` by reading chunks from the output buffer of a block |
| 142 | /// based RNG. |
| 143 | /// |
| 144 | /// The return values are `(consumed_u64, filled_u8)`. |
| 145 | /// |
| 146 | /// `src` is not modified; it is taken as a `&mut` reference for backward |
| 147 | /// compatibility with previous versions that did change it. |
| 148 | /// |
| 149 | /// `filled_u8` is the number of filled bytes in `dest`, which may be less than |
| 150 | /// the length of `dest`. |
| 151 | /// `consumed_u64` is the number of words consumed from `src`, which is the same |
| 152 | /// as `filled_u8 / 8` rounded up. |
| 153 | /// |
| 154 | /// See `fill_via_u32_chunks` for an example. |
| 155 | #[deprecated (since = "0.9.3" , note = "use BlockRng64 instead" )] |
| 156 | pub fn fill_via_u64_chunks(src: &mut [u64], dest: &mut [u8]) -> (usize, usize) { |
| 157 | fill_via_chunks(src, dest) |
| 158 | } |
| 159 | |
| 160 | /// Implement `next_u32` via `fill_bytes`, little-endian order. |
| 161 | pub fn next_u32_via_fill<R: RngCore + ?Sized>(rng: &mut R) -> u32 { |
| 162 | let mut buf: [i32; 4] = [0; 4]; |
| 163 | rng.fill_bytes(&mut buf); |
| 164 | u32::from_le_bytes(buf) |
| 165 | } |
| 166 | |
| 167 | /// Implement `next_u64` via `fill_bytes`, little-endian order. |
| 168 | pub fn next_u64_via_fill<R: RngCore + ?Sized>(rng: &mut R) -> u64 { |
| 169 | let mut buf: [i32; 8] = [0; 8]; |
| 170 | rng.fill_bytes(&mut buf); |
| 171 | u64::from_le_bytes(buf) |
| 172 | } |
| 173 | |
| 174 | #[cfg (test)] |
| 175 | mod test { |
| 176 | use super::*; |
| 177 | |
| 178 | #[test] |
| 179 | fn test_fill_via_u32_chunks() { |
| 180 | let src_orig = [1u32, 2, 3]; |
| 181 | |
| 182 | let mut src = src_orig; |
| 183 | let mut dst = [0u8; 11]; |
| 184 | assert_eq!(fill_via_chunks(&mut src, &mut dst), (3, 11)); |
| 185 | assert_eq!(dst, [1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0]); |
| 186 | |
| 187 | let mut src = src_orig; |
| 188 | let mut dst = [0u8; 13]; |
| 189 | assert_eq!(fill_via_chunks(&mut src, &mut dst), (3, 12)); |
| 190 | assert_eq!(dst, [1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 0]); |
| 191 | |
| 192 | let mut src = src_orig; |
| 193 | let mut dst = [0u8; 5]; |
| 194 | assert_eq!(fill_via_chunks(&mut src, &mut dst), (2, 5)); |
| 195 | assert_eq!(dst, [1, 0, 0, 0, 2]); |
| 196 | } |
| 197 | |
| 198 | #[test] |
| 199 | fn test_fill_via_u64_chunks() { |
| 200 | let src_orig = [1u64, 2]; |
| 201 | |
| 202 | let mut src = src_orig; |
| 203 | let mut dst = [0u8; 11]; |
| 204 | assert_eq!(fill_via_chunks(&mut src, &mut dst), (2, 11)); |
| 205 | assert_eq!(dst, [1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0]); |
| 206 | |
| 207 | let mut src = src_orig; |
| 208 | let mut dst = [0u8; 17]; |
| 209 | assert_eq!(fill_via_chunks(&mut src, &mut dst), (2, 16)); |
| 210 | assert_eq!(dst, [1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0]); |
| 211 | |
| 212 | let mut src = src_orig; |
| 213 | let mut dst = [0u8; 5]; |
| 214 | assert_eq!(fill_via_chunks(&mut src, &mut dst), (1, 5)); |
| 215 | assert_eq!(dst, [1, 0, 0, 0, 0]); |
| 216 | } |
| 217 | } |
| 218 | |