| 1 | // Copyright 2018 Developers of the Rand project. |
| 2 | // |
| 3 | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or |
| 4 | // https://www.apache.org/licenses/LICENSE-2.0> or the MIT license |
| 5 | // <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your |
| 6 | // option. This file may not be copied, modified, or distributed |
| 7 | // except according to those terms. |
| 8 | |
| 9 | //! The implementations of the `Standard` distribution for integer types. |
| 10 | |
| 11 | use crate::distributions::{Distribution, Standard}; |
| 12 | use crate::Rng; |
| 13 | #[cfg (all(target_arch = "x86" , feature = "simd_support" ))] |
| 14 | use core::arch::x86::{__m128i, __m256i}; |
| 15 | #[cfg (all(target_arch = "x86_64" , feature = "simd_support" ))] |
| 16 | use core::arch::x86_64::{__m128i, __m256i}; |
| 17 | use core::num::{NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize, |
| 18 | NonZeroU128}; |
| 19 | #[cfg (feature = "simd_support" )] use packed_simd::*; |
| 20 | |
| 21 | impl Distribution<u8> for Standard { |
| 22 | #[inline ] |
| 23 | fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u8 { |
| 24 | rng.next_u32() as u8 |
| 25 | } |
| 26 | } |
| 27 | |
| 28 | impl Distribution<u16> for Standard { |
| 29 | #[inline ] |
| 30 | fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u16 { |
| 31 | rng.next_u32() as u16 |
| 32 | } |
| 33 | } |
| 34 | |
| 35 | impl Distribution<u32> for Standard { |
| 36 | #[inline ] |
| 37 | fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u32 { |
| 38 | rng.next_u32() |
| 39 | } |
| 40 | } |
| 41 | |
| 42 | impl Distribution<u64> for Standard { |
| 43 | #[inline ] |
| 44 | fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u64 { |
| 45 | rng.next_u64() |
| 46 | } |
| 47 | } |
| 48 | |
| 49 | impl Distribution<u128> for Standard { |
| 50 | #[inline ] |
| 51 | fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u128 { |
| 52 | // Use LE; we explicitly generate one value before the next. |
| 53 | let x = u128::from(rng.next_u64()); |
| 54 | let y = u128::from(rng.next_u64()); |
| 55 | (y << 64) | x |
| 56 | } |
| 57 | } |
| 58 | |
| 59 | impl Distribution<usize> for Standard { |
| 60 | #[inline ] |
| 61 | #[cfg (any(target_pointer_width = "32" , target_pointer_width = "16" ))] |
| 62 | fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> usize { |
| 63 | rng.next_u32() as usize |
| 64 | } |
| 65 | |
| 66 | #[inline ] |
| 67 | #[cfg (target_pointer_width = "64" )] |
| 68 | fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> usize { |
| 69 | rng.next_u64() as usize |
| 70 | } |
| 71 | } |
| 72 | |
| 73 | macro_rules! impl_int_from_uint { |
| 74 | ($ty:ty, $uty:ty) => { |
| 75 | impl Distribution<$ty> for Standard { |
| 76 | #[inline] |
| 77 | fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $ty { |
| 78 | rng.gen::<$uty>() as $ty |
| 79 | } |
| 80 | } |
| 81 | }; |
| 82 | } |
| 83 | |
| 84 | impl_int_from_uint! { i8, u8 } |
| 85 | impl_int_from_uint! { i16, u16 } |
| 86 | impl_int_from_uint! { i32, u32 } |
| 87 | impl_int_from_uint! { i64, u64 } |
| 88 | impl_int_from_uint! { i128, u128 } |
| 89 | impl_int_from_uint! { isize, usize } |
| 90 | |
| 91 | macro_rules! impl_nzint { |
| 92 | ($ty:ty, $new:path) => { |
| 93 | impl Distribution<$ty> for Standard { |
| 94 | fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $ty { |
| 95 | loop { |
| 96 | if let Some(nz) = $new(rng.gen()) { |
| 97 | break nz; |
| 98 | } |
| 99 | } |
| 100 | } |
| 101 | } |
| 102 | }; |
| 103 | } |
| 104 | |
| 105 | impl_nzint!(NonZeroU8, NonZeroU8::new); |
| 106 | impl_nzint!(NonZeroU16, NonZeroU16::new); |
| 107 | impl_nzint!(NonZeroU32, NonZeroU32::new); |
| 108 | impl_nzint!(NonZeroU64, NonZeroU64::new); |
| 109 | impl_nzint!(NonZeroU128, NonZeroU128::new); |
| 110 | impl_nzint!(NonZeroUsize, NonZeroUsize::new); |
| 111 | |
| 112 | #[cfg (feature = "simd_support" )] |
| 113 | macro_rules! simd_impl { |
| 114 | ($(($intrinsic:ident, $vec:ty),)+) => {$( |
| 115 | impl Distribution<$intrinsic> for Standard { |
| 116 | #[inline] |
| 117 | fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $intrinsic { |
| 118 | $intrinsic::from_bits(rng.gen::<$vec>()) |
| 119 | } |
| 120 | } |
| 121 | )+}; |
| 122 | |
| 123 | ($bits:expr,) => {}; |
| 124 | ($bits:expr, $ty:ty, $($ty_more:ty,)*) => { |
| 125 | simd_impl!($bits, $($ty_more,)*); |
| 126 | |
| 127 | impl Distribution<$ty> for Standard { |
| 128 | #[inline] |
| 129 | fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $ty { |
| 130 | let mut vec: $ty = Default::default(); |
| 131 | unsafe { |
| 132 | let ptr = &mut vec; |
| 133 | let b_ptr = &mut *(ptr as *mut $ty as *mut [u8; $bits/8]); |
| 134 | rng.fill_bytes(b_ptr); |
| 135 | } |
| 136 | vec.to_le() |
| 137 | } |
| 138 | } |
| 139 | }; |
| 140 | } |
| 141 | |
| 142 | #[cfg (feature = "simd_support" )] |
| 143 | simd_impl!(16, u8x2, i8x2,); |
| 144 | #[cfg (feature = "simd_support" )] |
| 145 | simd_impl!(32, u8x4, i8x4, u16x2, i16x2,); |
| 146 | #[cfg (feature = "simd_support" )] |
| 147 | simd_impl!(64, u8x8, i8x8, u16x4, i16x4, u32x2, i32x2,); |
| 148 | #[cfg (feature = "simd_support" )] |
| 149 | simd_impl!(128, u8x16, i8x16, u16x8, i16x8, u32x4, i32x4, u64x2, i64x2,); |
| 150 | #[cfg (feature = "simd_support" )] |
| 151 | simd_impl!(256, u8x32, i8x32, u16x16, i16x16, u32x8, i32x8, u64x4, i64x4,); |
| 152 | #[cfg (feature = "simd_support" )] |
| 153 | simd_impl!(512, u8x64, i8x64, u16x32, i16x32, u32x16, i32x16, u64x8, i64x8,); |
| 154 | #[cfg (all( |
| 155 | feature = "simd_support" , |
| 156 | any(target_arch = "x86" , target_arch = "x86_64" ) |
| 157 | ))] |
| 158 | simd_impl!((__m128i, u8x16), (__m256i, u8x32),); |
| 159 | |
| 160 | #[cfg (test)] |
| 161 | mod tests { |
| 162 | use super::*; |
| 163 | |
| 164 | #[test] |
| 165 | fn test_integers() { |
| 166 | let mut rng = crate::test::rng(806); |
| 167 | |
| 168 | rng.sample::<isize, _>(Standard); |
| 169 | rng.sample::<i8, _>(Standard); |
| 170 | rng.sample::<i16, _>(Standard); |
| 171 | rng.sample::<i32, _>(Standard); |
| 172 | rng.sample::<i64, _>(Standard); |
| 173 | rng.sample::<i128, _>(Standard); |
| 174 | |
| 175 | rng.sample::<usize, _>(Standard); |
| 176 | rng.sample::<u8, _>(Standard); |
| 177 | rng.sample::<u16, _>(Standard); |
| 178 | rng.sample::<u32, _>(Standard); |
| 179 | rng.sample::<u64, _>(Standard); |
| 180 | rng.sample::<u128, _>(Standard); |
| 181 | } |
| 182 | |
| 183 | #[test] |
| 184 | fn value_stability() { |
| 185 | fn test_samples<T: Copy + core::fmt::Debug + PartialEq>(zero: T, expected: &[T]) |
| 186 | where Standard: Distribution<T> { |
| 187 | let mut rng = crate::test::rng(807); |
| 188 | let mut buf = [zero; 3]; |
| 189 | for x in &mut buf { |
| 190 | *x = rng.sample(Standard); |
| 191 | } |
| 192 | assert_eq!(&buf, expected); |
| 193 | } |
| 194 | |
| 195 | test_samples(0u8, &[9, 247, 111]); |
| 196 | test_samples(0u16, &[32265, 42999, 38255]); |
| 197 | test_samples(0u32, &[2220326409, 2575017975, 2018088303]); |
| 198 | test_samples(0u64, &[ |
| 199 | 11059617991457472009, |
| 200 | 16096616328739788143, |
| 201 | 1487364411147516184, |
| 202 | ]); |
| 203 | test_samples(0u128, &[ |
| 204 | 296930161868957086625409848350820761097, |
| 205 | 145644820879247630242265036535529306392, |
| 206 | 111087889832015897993126088499035356354, |
| 207 | ]); |
| 208 | #[cfg (any(target_pointer_width = "32" , target_pointer_width = "16" ))] |
| 209 | test_samples(0usize, &[2220326409, 2575017975, 2018088303]); |
| 210 | #[cfg (target_pointer_width = "64" )] |
| 211 | test_samples(0usize, &[ |
| 212 | 11059617991457472009, |
| 213 | 16096616328739788143, |
| 214 | 1487364411147516184, |
| 215 | ]); |
| 216 | |
| 217 | test_samples(0i8, &[9, -9, 111]); |
| 218 | // Skip further i* types: they are simple reinterpretation of u* samples |
| 219 | |
| 220 | #[cfg (feature = "simd_support" )] |
| 221 | { |
| 222 | // We only test a sub-set of types here and make assumptions about the rest. |
| 223 | |
| 224 | test_samples(u8x2::default(), &[ |
| 225 | u8x2::new(9, 126), |
| 226 | u8x2::new(247, 167), |
| 227 | u8x2::new(111, 149), |
| 228 | ]); |
| 229 | test_samples(u8x4::default(), &[ |
| 230 | u8x4::new(9, 126, 87, 132), |
| 231 | u8x4::new(247, 167, 123, 153), |
| 232 | u8x4::new(111, 149, 73, 120), |
| 233 | ]); |
| 234 | test_samples(u8x8::default(), &[ |
| 235 | u8x8::new(9, 126, 87, 132, 247, 167, 123, 153), |
| 236 | u8x8::new(111, 149, 73, 120, 68, 171, 98, 223), |
| 237 | u8x8::new(24, 121, 1, 50, 13, 46, 164, 20), |
| 238 | ]); |
| 239 | |
| 240 | test_samples(i64x8::default(), &[ |
| 241 | i64x8::new( |
| 242 | -7387126082252079607, |
| 243 | -2350127744969763473, |
| 244 | 1487364411147516184, |
| 245 | 7895421560427121838, |
| 246 | 602190064936008898, |
| 247 | 6022086574635100741, |
| 248 | -5080089175222015595, |
| 249 | -4066367846667249123, |
| 250 | ), |
| 251 | i64x8::new( |
| 252 | 9180885022207963908, |
| 253 | 3095981199532211089, |
| 254 | 6586075293021332726, |
| 255 | 419343203796414657, |
| 256 | 3186951873057035255, |
| 257 | 5287129228749947252, |
| 258 | 444726432079249540, |
| 259 | -1587028029513790706, |
| 260 | ), |
| 261 | i64x8::new( |
| 262 | 6075236523189346388, |
| 263 | 1351763722368165432, |
| 264 | -6192309979959753740, |
| 265 | -7697775502176768592, |
| 266 | -4482022114172078123, |
| 267 | 7522501477800909500, |
| 268 | -1837258847956201231, |
| 269 | -586926753024886735, |
| 270 | ), |
| 271 | ]); |
| 272 | } |
| 273 | } |
| 274 | } |
| 275 | |