| 1 | // Copyright 2018 Developers of the Rand project. |
| 2 | // |
| 3 | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or |
| 4 | // https://www.apache.org/licenses/LICENSE-2.0> or the MIT license |
| 5 | // <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your |
| 6 | // option. This file may not be copied, modified, or distributed |
| 7 | // except according to those terms. |
| 8 | |
| 9 | //! Helper functions for implementing `RngCore` functions. |
| 10 | //! |
| 11 | //! For cross-platform reproducibility, these functions all use Little Endian: |
| 12 | //! least-significant part first. For example, `next_u64_via_u32` takes `u32` |
| 13 | //! values `x, y`, then outputs `(y << 32) | x`. To implement `next_u32` |
| 14 | //! from `next_u64` in little-endian order, one should use `next_u64() as u32`. |
| 15 | //! |
| 16 | //! Byte-swapping (like the std `to_le` functions) is only needed to convert |
| 17 | //! to/from byte sequences, and since its purpose is reproducibility, |
| 18 | //! non-reproducible sources (e.g. `OsRng`) need not bother with it. |
| 19 | |
| 20 | use crate::RngCore; |
| 21 | use core::cmp::min; |
| 22 | use zerocopy::{Immutable, IntoBytes}; |
| 23 | |
| 24 | /// Implement `next_u64` via `next_u32`, little-endian order. |
| 25 | pub fn next_u64_via_u32<R: RngCore + ?Sized>(rng: &mut R) -> u64 { |
| 26 | // Use LE; we explicitly generate one value before the next. |
| 27 | let x: u64 = u64::from(rng.next_u32()); |
| 28 | let y: u64 = u64::from(rng.next_u32()); |
| 29 | (y << 32) | x |
| 30 | } |
| 31 | |
| 32 | /// Implement `fill_bytes` via `next_u64` and `next_u32`, little-endian order. |
| 33 | /// |
| 34 | /// The fastest way to fill a slice is usually to work as long as possible with |
| 35 | /// integers. That is why this method mostly uses `next_u64`, and only when |
| 36 | /// there are 4 or less bytes remaining at the end of the slice it uses |
| 37 | /// `next_u32` once. |
| 38 | pub fn fill_bytes_via_next<R: RngCore + ?Sized>(rng: &mut R, dest: &mut [u8]) { |
| 39 | let mut left: &mut [u8] = dest; |
| 40 | while left.len() >= 8 { |
| 41 | let (l: &mut [u8], r: &mut [u8]) = { left }.split_at_mut(mid:8); |
| 42 | left = r; |
| 43 | let chunk: [u8; 8] = rng.next_u64().to_le_bytes(); |
| 44 | l.copy_from_slice(&chunk); |
| 45 | } |
| 46 | let n: usize = left.len(); |
| 47 | if n > 4 { |
| 48 | let chunk: [u8; 8] = rng.next_u64().to_le_bytes(); |
| 49 | left.copy_from_slice(&chunk[..n]); |
| 50 | } else if n > 0 { |
| 51 | let chunk: [u8; 4] = rng.next_u32().to_le_bytes(); |
| 52 | left.copy_from_slice(&chunk[..n]); |
| 53 | } |
| 54 | } |
| 55 | |
| 56 | trait Observable: IntoBytes + Immutable + Copy { |
| 57 | fn to_le(self) -> Self; |
| 58 | } |
| 59 | impl Observable for u32 { |
| 60 | fn to_le(self) -> Self { |
| 61 | self.to_le() |
| 62 | } |
| 63 | } |
| 64 | impl Observable for u64 { |
| 65 | fn to_le(self) -> Self { |
| 66 | self.to_le() |
| 67 | } |
| 68 | } |
| 69 | |
| 70 | /// Fill dest from src |
| 71 | /// |
| 72 | /// Returns `(n, byte_len)`. `src[..n]` is consumed (and possibly mutated), |
| 73 | /// `dest[..byte_len]` is filled. `src[n..]` and `dest[byte_len..]` are left |
| 74 | /// unaltered. |
| 75 | fn fill_via_chunks<T: Observable>(src: &mut [T], dest: &mut [u8]) -> (usize, usize) { |
| 76 | let size: usize = core::mem::size_of::<T>(); |
| 77 | let byte_len: usize = min(v1:core::mem::size_of_val(src), v2:dest.len()); |
| 78 | let num_chunks: usize = (byte_len + size - 1) / size; |
| 79 | |
| 80 | // Byte-swap for portability of results. This must happen before copying |
| 81 | // since the size of dest is not guaranteed to be a multiple of T or to be |
| 82 | // sufficiently aligned. |
| 83 | if cfg!(target_endian = "big" ) { |
| 84 | for x: &mut T in &mut src[..num_chunks] { |
| 85 | *x = x.to_le(); |
| 86 | } |
| 87 | } |
| 88 | |
| 89 | dest[..byte_len].copy_from_slice(&<[T]>::as_bytes(&src[..num_chunks])[..byte_len]); |
| 90 | |
| 91 | (num_chunks, byte_len) |
| 92 | } |
| 93 | |
| 94 | /// Implement `fill_bytes` by reading chunks from the output buffer of a block |
| 95 | /// based RNG. |
| 96 | /// |
| 97 | /// The return values are `(consumed_u32, filled_u8)`. |
| 98 | /// |
| 99 | /// On big-endian systems, endianness of `src[..consumed_u32]` values is |
| 100 | /// swapped. No other adjustments to `src` are made. |
| 101 | /// |
| 102 | /// `filled_u8` is the number of filled bytes in `dest`, which may be less than |
| 103 | /// the length of `dest`. |
| 104 | /// `consumed_u32` is the number of words consumed from `src`, which is the same |
| 105 | /// as `filled_u8 / 4` rounded up. |
| 106 | /// |
| 107 | /// # Example |
| 108 | /// (from `IsaacRng`) |
| 109 | /// |
| 110 | /// ```ignore |
| 111 | /// fn fill_bytes(&mut self, dest: &mut [u8]) { |
| 112 | /// let mut read_len = 0; |
| 113 | /// while read_len < dest.len() { |
| 114 | /// if self.index >= self.rsl.len() { |
| 115 | /// self.isaac(); |
| 116 | /// } |
| 117 | /// |
| 118 | /// let (consumed_u32, filled_u8) = |
| 119 | /// impls::fill_via_u32_chunks(&mut self.rsl[self.index..], |
| 120 | /// &mut dest[read_len..]); |
| 121 | /// |
| 122 | /// self.index += consumed_u32; |
| 123 | /// read_len += filled_u8; |
| 124 | /// } |
| 125 | /// } |
| 126 | /// ``` |
| 127 | pub fn fill_via_u32_chunks(src: &mut [u32], dest: &mut [u8]) -> (usize, usize) { |
| 128 | fill_via_chunks(src, dest) |
| 129 | } |
| 130 | |
| 131 | /// Implement `fill_bytes` by reading chunks from the output buffer of a block |
| 132 | /// based RNG. |
| 133 | /// |
| 134 | /// The return values are `(consumed_u64, filled_u8)`. |
| 135 | /// |
| 136 | /// On big-endian systems, endianness of `src[..consumed_u64]` values is |
| 137 | /// swapped. No other adjustments to `src` are made. |
| 138 | /// |
| 139 | /// `filled_u8` is the number of filled bytes in `dest`, which may be less than |
| 140 | /// the length of `dest`. |
| 141 | /// `consumed_u64` is the number of words consumed from `src`, which is the same |
| 142 | /// as `filled_u8 / 8` rounded up. |
| 143 | /// |
| 144 | /// See `fill_via_u32_chunks` for an example. |
| 145 | pub fn fill_via_u64_chunks(src: &mut [u64], dest: &mut [u8]) -> (usize, usize) { |
| 146 | fill_via_chunks(src, dest) |
| 147 | } |
| 148 | |
| 149 | /// Implement `next_u32` via `fill_bytes`, little-endian order. |
| 150 | pub fn next_u32_via_fill<R: RngCore + ?Sized>(rng: &mut R) -> u32 { |
| 151 | let mut buf: [u8; 4] = [0; 4]; |
| 152 | rng.fill_bytes(&mut buf); |
| 153 | u32::from_le_bytes(buf) |
| 154 | } |
| 155 | |
| 156 | /// Implement `next_u64` via `fill_bytes`, little-endian order. |
| 157 | pub fn next_u64_via_fill<R: RngCore + ?Sized>(rng: &mut R) -> u64 { |
| 158 | let mut buf: [u8; 8] = [0; 8]; |
| 159 | rng.fill_bytes(&mut buf); |
| 160 | u64::from_le_bytes(buf) |
| 161 | } |
| 162 | |
| 163 | #[cfg (test)] |
| 164 | mod test { |
| 165 | use super::*; |
| 166 | |
| 167 | #[test ] |
| 168 | fn test_fill_via_u32_chunks() { |
| 169 | let src_orig = [1, 2, 3]; |
| 170 | |
| 171 | let mut src = src_orig; |
| 172 | let mut dst = [0u8; 11]; |
| 173 | assert_eq!(fill_via_u32_chunks(&mut src, &mut dst), (3, 11)); |
| 174 | assert_eq!(dst, [1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0]); |
| 175 | |
| 176 | let mut src = src_orig; |
| 177 | let mut dst = [0u8; 13]; |
| 178 | assert_eq!(fill_via_u32_chunks(&mut src, &mut dst), (3, 12)); |
| 179 | assert_eq!(dst, [1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 0]); |
| 180 | |
| 181 | let mut src = src_orig; |
| 182 | let mut dst = [0u8; 5]; |
| 183 | assert_eq!(fill_via_u32_chunks(&mut src, &mut dst), (2, 5)); |
| 184 | assert_eq!(dst, [1, 0, 0, 0, 2]); |
| 185 | } |
| 186 | |
| 187 | #[test ] |
| 188 | fn test_fill_via_u64_chunks() { |
| 189 | let src_orig = [1, 2]; |
| 190 | |
| 191 | let mut src = src_orig; |
| 192 | let mut dst = [0u8; 11]; |
| 193 | assert_eq!(fill_via_u64_chunks(&mut src, &mut dst), (2, 11)); |
| 194 | assert_eq!(dst, [1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0]); |
| 195 | |
| 196 | let mut src = src_orig; |
| 197 | let mut dst = [0u8; 17]; |
| 198 | assert_eq!(fill_via_u64_chunks(&mut src, &mut dst), (2, 16)); |
| 199 | assert_eq!(dst, [1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0]); |
| 200 | |
| 201 | let mut src = src_orig; |
| 202 | let mut dst = [0u8; 5]; |
| 203 | assert_eq!(fill_via_u64_chunks(&mut src, &mut dst), (1, 5)); |
| 204 | assert_eq!(dst, [1, 0, 0, 0, 0]); |
| 205 | } |
| 206 | } |
| 207 | |