1 | // Copyright 2018 Developers of the Rand project. |
2 | // |
3 | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or |
4 | // https://www.apache.org/licenses/LICENSE-2.0> or the MIT license |
5 | // <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your |
6 | // option. This file may not be copied, modified, or distributed |
7 | // except according to those terms. |
8 | |
9 | /// Initialize a RNG from a `u64` seed using `SplitMix64`. |
10 | macro_rules! from_splitmix { |
11 | ($seed:expr) => { { |
12 | let mut rng = crate::SplitMix64::seed_from_u64($seed); |
13 | Self::from_rng(&mut rng).unwrap() |
14 | } } |
15 | } |
16 | |
17 | /// Apply the ** scrambler used by some RNGs from the xoshiro family. |
18 | macro_rules! starstar_u64 { |
19 | ($x:expr) => { |
20 | $x.wrapping_mul(5).rotate_left(7).wrapping_mul(9) |
21 | } |
22 | } |
23 | |
24 | /// Apply the ** scrambler used by some RNGs from the xoshiro family. |
25 | macro_rules! starstar_u32 { |
26 | ($x:expr) => { |
27 | $x.wrapping_mul(0x9E3779BB).rotate_left(5).wrapping_mul(5) |
28 | } |
29 | } |
30 | |
31 | /// Apply the ++ scrambler used by some RNGs from the xoshiro family. |
32 | macro_rules! plusplus_u64 { |
33 | ($x:expr, $y:expr, $rot:expr) => { |
34 | $x.wrapping_add($y).rotate_left($rot).wrapping_add($x) |
35 | } |
36 | } |
37 | |
38 | /// Apply the ++ scrambler used by some RNGs from the xoshiro family. |
39 | macro_rules! plusplus_u32 { |
40 | ($x:expr, $y:expr) => { |
41 | $x.wrapping_add($y).rotate_left(7).wrapping_add($x) |
42 | } |
43 | } |
44 | |
45 | /// Implement a jump function for an RNG from the xoshiro family. |
46 | macro_rules! impl_jump { |
47 | (u32, $self:expr, [$j0:expr, $j1:expr]) => { |
48 | const JUMP: [u32; 2] = [$j0, $j1]; |
49 | let mut s0 = 0; |
50 | let mut s1 = 0; |
51 | for j in &JUMP { |
52 | for b in 0..32 { |
53 | if (j & 1 << b) != 0 { |
54 | s0 ^= $self.s0; |
55 | s1 ^= $self.s1; |
56 | } |
57 | $self.next_u32(); |
58 | } |
59 | } |
60 | $self.s0 = s0; |
61 | $self.s1 = s1; |
62 | }; |
63 | (u64, $self:expr, [$j0:expr, $j1:expr]) => { |
64 | const JUMP: [u64; 2] = [$j0, $j1]; |
65 | let mut s0 = 0; |
66 | let mut s1 = 0; |
67 | for j in &JUMP { |
68 | for b in 0..64 { |
69 | if (j & 1 << b) != 0 { |
70 | s0 ^= $self.s0; |
71 | s1 ^= $self.s1; |
72 | } |
73 | $self.next_u64(); |
74 | } |
75 | } |
76 | $self.s0 = s0; |
77 | $self.s1 = s1; |
78 | }; |
79 | (u32, $self:expr, [$j0:expr, $j1:expr, $j2:expr, $j3:expr]) => { |
80 | const JUMP: [u32; 4] = [$j0, $j1, $j2, $j3]; |
81 | let mut s0 = 0; |
82 | let mut s1 = 0; |
83 | let mut s2 = 0; |
84 | let mut s3 = 0; |
85 | for j in &JUMP { |
86 | for b in 0..32 { |
87 | if (j & 1 << b) != 0 { |
88 | s0 ^= $self.s[0]; |
89 | s1 ^= $self.s[1]; |
90 | s2 ^= $self.s[2]; |
91 | s3 ^= $self.s[3]; |
92 | } |
93 | $self.next_u32(); |
94 | } |
95 | } |
96 | $self.s[0] = s0; |
97 | $self.s[1] = s1; |
98 | $self.s[2] = s2; |
99 | $self.s[3] = s3; |
100 | }; |
101 | (u64, $self:expr, [$j0:expr, $j1:expr, $j2:expr, $j3:expr]) => { |
102 | const JUMP: [u64; 4] = [$j0, $j1, $j2, $j3]; |
103 | let mut s0 = 0; |
104 | let mut s1 = 0; |
105 | let mut s2 = 0; |
106 | let mut s3 = 0; |
107 | for j in &JUMP { |
108 | for b in 0..64 { |
109 | if (j & 1 << b) != 0 { |
110 | s0 ^= $self.s[0]; |
111 | s1 ^= $self.s[1]; |
112 | s2 ^= $self.s[2]; |
113 | s3 ^= $self.s[3]; |
114 | } |
115 | $self.next_u64(); |
116 | } |
117 | } |
118 | $self.s[0] = s0; |
119 | $self.s[1] = s1; |
120 | $self.s[2] = s2; |
121 | $self.s[3] = s3; |
122 | }; |
123 | (u64, $self:expr, [$j0:expr, $j1:expr, $j2:expr, $j3:expr, |
124 | $j4:expr, $j5:expr, $j6:expr, $j7:expr]) => { |
125 | const JUMP: [u64; 8] = [$j0, $j1, $j2, $j3, $j4, $j5, $j6, $j7]; |
126 | let mut s = [0; 8]; |
127 | for j in &JUMP { |
128 | for b in 0..64 { |
129 | if (j & 1 << b) != 0 { |
130 | s[0] ^= $self.s[0]; |
131 | s[1] ^= $self.s[1]; |
132 | s[2] ^= $self.s[2]; |
133 | s[3] ^= $self.s[3]; |
134 | s[4] ^= $self.s[4]; |
135 | s[5] ^= $self.s[5]; |
136 | s[6] ^= $self.s[6]; |
137 | s[7] ^= $self.s[7]; |
138 | } |
139 | $self.next_u64(); |
140 | } |
141 | } |
142 | $self.s = s; |
143 | }; |
144 | } |
145 | |
146 | /// Implement the xoroshiro iteration. |
147 | macro_rules! impl_xoroshiro_u32 { |
148 | ($self:expr) => { |
149 | $self.s1 ^= $self.s0; |
150 | $self.s0 = $self.s0.rotate_left(26) ^ $self.s1 ^ ($self.s1 << 9); |
151 | $self.s1 = $self.s1.rotate_left(13); |
152 | } |
153 | } |
154 | |
155 | /// Implement the xoroshiro iteration. |
156 | macro_rules! impl_xoroshiro_u64 { |
157 | ($self:expr) => { |
158 | $self.s1 ^= $self.s0; |
159 | $self.s0 = $self.s0.rotate_left(24) ^ $self.s1 ^ ($self.s1 << 16); |
160 | $self.s1 = $self.s1.rotate_left(37); |
161 | } |
162 | } |
163 | |
164 | /// Implement the xoroshiro iteration for the ++ scrambler. |
165 | macro_rules! impl_xoroshiro_u64_plusplus { |
166 | ($self:expr) => { |
167 | $self.s1 ^= $self.s0; |
168 | $self.s0 = $self.s0.rotate_left(49) ^ $self.s1 ^ ($self.s1 << 21); |
169 | $self.s1 = $self.s1.rotate_left(28); |
170 | } |
171 | } |
172 | |
173 | /// Implement the xoshiro iteration for `u32` output. |
174 | macro_rules! impl_xoshiro_u32 { |
175 | ($self:expr) => { |
176 | let t = $self.s[1] << 9; |
177 | |
178 | $self.s[2] ^= $self.s[0]; |
179 | $self.s[3] ^= $self.s[1]; |
180 | $self.s[1] ^= $self.s[2]; |
181 | $self.s[0] ^= $self.s[3]; |
182 | |
183 | $self.s[2] ^= t; |
184 | |
185 | $self.s[3] = $self.s[3].rotate_left(11); |
186 | } |
187 | } |
188 | |
189 | /// Implement the xoshiro iteration for `u64` output. |
190 | macro_rules! impl_xoshiro_u64 { |
191 | ($self:expr) => { |
192 | let t = $self.s[1] << 17; |
193 | |
194 | $self.s[2] ^= $self.s[0]; |
195 | $self.s[3] ^= $self.s[1]; |
196 | $self.s[1] ^= $self.s[2]; |
197 | $self.s[0] ^= $self.s[3]; |
198 | |
199 | $self.s[2] ^= t; |
200 | |
201 | $self.s[3] = $self.s[3].rotate_left(45); |
202 | } |
203 | } |
204 | |
205 | /// Implement the large-state xoshiro iteration. |
206 | macro_rules! impl_xoshiro_large { |
207 | ($self:expr) => { |
208 | let t = $self.s[1] << 11; |
209 | |
210 | $self.s[2] ^= $self.s[0]; |
211 | $self.s[5] ^= $self.s[1]; |
212 | $self.s[1] ^= $self.s[2]; |
213 | $self.s[7] ^= $self.s[3]; |
214 | $self.s[3] ^= $self.s[4]; |
215 | $self.s[4] ^= $self.s[5]; |
216 | $self.s[0] ^= $self.s[6]; |
217 | $self.s[6] ^= $self.s[7]; |
218 | |
219 | $self.s[6] ^= t; |
220 | |
221 | $self.s[7] = $self.s[7].rotate_left(21); |
222 | } |
223 | } |
224 | |
225 | /// Map an all-zero seed to a different one. |
226 | macro_rules! deal_with_zero_seed { |
227 | ($seed:expr, $Self:ident) => { |
228 | if $seed.iter().all(|&x| x == 0) { |
229 | return $Self::seed_from_u64(0); |
230 | } |
231 | } |
232 | } |
233 | |
234 | /// 512-bit seed for a generator. |
235 | /// |
236 | /// This wrapper is necessary, because some traits required for a seed are not |
237 | /// implemented on large arrays. |
238 | #[derive (Clone)] |
239 | pub struct Seed512(pub [u8; 64]); |
240 | |
241 | impl Seed512 { |
242 | /// Return an iterator over the seed. |
243 | pub fn iter(&self) -> core::slice::Iter<u8> { |
244 | self.0.iter() |
245 | } |
246 | } |
247 | |
248 | impl core::fmt::Debug for Seed512 { |
249 | fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { |
250 | self.0[..].fmt(f) |
251 | } |
252 | } |
253 | |
254 | impl Default for Seed512 { |
255 | fn default() -> Seed512 { |
256 | Seed512([0; 64]) |
257 | } |
258 | } |
259 | |
260 | impl AsMut<[u8]> for Seed512 { |
261 | fn as_mut(&mut self) -> &mut [u8] { |
262 | &mut self.0 |
263 | } |
264 | } |
265 | |
266 | |