1// Copyright 2018-2020 Developers of the Rand project.
2// Copyright 2017 The Rust Project Developers.
3//
4// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
5// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
7// option. This file may not be copied, modified, or distributed
8// except according to those terms.
9
10//! A distribution uniformly sampling numbers within a given range.
11//!
12//! [`Uniform`] is the standard distribution to sample uniformly from a range;
13//! e.g. `Uniform::new_inclusive(1, 6)` can sample integers from 1 to 6, like a
14//! standard die. [`Rng::gen_range`] supports any type supported by
15//! [`Uniform`].
16//!
17//! This distribution is provided with support for several primitive types
18//! (all integer and floating-point types) as well as [`std::time::Duration`],
19//! and supports extension to user-defined types via a type-specific *back-end*
20//! implementation.
21//!
22//! The types [`UniformInt`], [`UniformFloat`] and [`UniformDuration`] are the
23//! back-ends supporting sampling from primitive integer and floating-point
24//! ranges as well as from [`std::time::Duration`]; these types do not normally
25//! need to be used directly (unless implementing a derived back-end).
26//!
27//! # Example usage
28//!
29//! ```
30//! use rand::{Rng, thread_rng};
31//! use rand::distributions::Uniform;
32//!
33//! let mut rng = thread_rng();
34//! let side = Uniform::new(-10.0, 10.0);
35//!
36//! // sample between 1 and 10 points
37//! for _ in 0..rng.gen_range(1..=10) {
38//! // sample a point from the square with sides -10 - 10 in two dimensions
39//! let (x, y) = (rng.sample(side), rng.sample(side));
40//! println!("Point: {}, {}", x, y);
41//! }
42//! ```
43//!
44//! # Extending `Uniform` to support a custom type
45//!
46//! To extend [`Uniform`] to support your own types, write a back-end which
47//! implements the [`UniformSampler`] trait, then implement the [`SampleUniform`]
48//! helper trait to "register" your back-end. See the `MyF32` example below.
49//!
50//! At a minimum, the back-end needs to store any parameters needed for sampling
51//! (e.g. the target range) and implement `new`, `new_inclusive` and `sample`.
52//! Those methods should include an assert to check the range is valid (i.e.
53//! `low < high`). The example below merely wraps another back-end.
54//!
55//! The `new`, `new_inclusive` and `sample_single` functions use arguments of
56//! type SampleBorrow<X> in order to support passing in values by reference or
57//! by value. In the implementation of these functions, you can choose to
58//! simply use the reference returned by [`SampleBorrow::borrow`], or you can choose
59//! to copy or clone the value, whatever is appropriate for your type.
60//!
61//! ```
62//! use rand::prelude::*;
63//! use rand::distributions::uniform::{Uniform, SampleUniform,
64//! UniformSampler, UniformFloat, SampleBorrow};
65//!
66//! struct MyF32(f32);
67//!
68//! #[derive(Clone, Copy, Debug)]
69//! struct UniformMyF32(UniformFloat<f32>);
70//!
71//! impl UniformSampler for UniformMyF32 {
72//! type X = MyF32;
73//! fn new<B1, B2>(low: B1, high: B2) -> Self
74//! where B1: SampleBorrow<Self::X> + Sized,
75//! B2: SampleBorrow<Self::X> + Sized
76//! {
77//! UniformMyF32(UniformFloat::<f32>::new(low.borrow().0, high.borrow().0))
78//! }
79//! fn new_inclusive<B1, B2>(low: B1, high: B2) -> Self
80//! where B1: SampleBorrow<Self::X> + Sized,
81//! B2: SampleBorrow<Self::X> + Sized
82//! {
83//! UniformMyF32(UniformFloat::<f32>::new_inclusive(
84//! low.borrow().0,
85//! high.borrow().0,
86//! ))
87//! }
88//! fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
89//! MyF32(self.0.sample(rng))
90//! }
91//! }
92//!
93//! impl SampleUniform for MyF32 {
94//! type Sampler = UniformMyF32;
95//! }
96//!
97//! let (low, high) = (MyF32(17.0f32), MyF32(22.0f32));
98//! let uniform = Uniform::new(low, high);
99//! let x = uniform.sample(&mut thread_rng());
100//! ```
101//!
102//! [`SampleUniform`]: crate::distributions::uniform::SampleUniform
103//! [`UniformSampler`]: crate::distributions::uniform::UniformSampler
104//! [`UniformInt`]: crate::distributions::uniform::UniformInt
105//! [`UniformFloat`]: crate::distributions::uniform::UniformFloat
106//! [`UniformDuration`]: crate::distributions::uniform::UniformDuration
107//! [`SampleBorrow::borrow`]: crate::distributions::uniform::SampleBorrow::borrow
108
109use core::time::Duration;
110use core::ops::{Range, RangeInclusive};
111
112use crate::distributions::float::IntoFloat;
113use crate::distributions::utils::{BoolAsSIMD, FloatAsSIMD, FloatSIMDUtils, WideningMultiply};
114use crate::distributions::Distribution;
115use crate::{Rng, RngCore};
116
117#[cfg(not(feature = "std"))]
118#[allow(unused_imports)] // rustc doesn't detect that this is actually used
119use crate::distributions::utils::Float;
120
121#[cfg(feature = "simd_support")] use packed_simd::*;
122
123#[cfg(feature = "serde1")]
124use serde::{Serialize, Deserialize};
125
126/// Sample values uniformly between two bounds.
127///
128/// [`Uniform::new`] and [`Uniform::new_inclusive`] construct a uniform
129/// distribution sampling from the given range; these functions may do extra
130/// work up front to make sampling of multiple values faster. If only one sample
131/// from the range is required, [`Rng::gen_range`] can be more efficient.
132///
133/// When sampling from a constant range, many calculations can happen at
134/// compile-time and all methods should be fast; for floating-point ranges and
135/// the full range of integer types this should have comparable performance to
136/// the `Standard` distribution.
137///
138/// Steps are taken to avoid bias which might be present in naive
139/// implementations; for example `rng.gen::<u8>() % 170` samples from the range
140/// `[0, 169]` but is twice as likely to select numbers less than 85 than other
141/// values. Further, the implementations here give more weight to the high-bits
142/// generated by the RNG than the low bits, since with some RNGs the low-bits
143/// are of lower quality than the high bits.
144///
145/// Implementations must sample in `[low, high)` range for
146/// `Uniform::new(low, high)`, i.e., excluding `high`. In particular, care must
147/// be taken to ensure that rounding never results values `< low` or `>= high`.
148///
149/// # Example
150///
151/// ```
152/// use rand::distributions::{Distribution, Uniform};
153///
154/// let between = Uniform::from(10..10000);
155/// let mut rng = rand::thread_rng();
156/// let mut sum = 0;
157/// for _ in 0..1000 {
158/// sum += between.sample(&mut rng);
159/// }
160/// println!("{}", sum);
161/// ```
162///
163/// For a single sample, [`Rng::gen_range`] may be preferred:
164///
165/// ```
166/// use rand::Rng;
167///
168/// let mut rng = rand::thread_rng();
169/// println!("{}", rng.gen_range(0..10));
170/// ```
171///
172/// [`new`]: Uniform::new
173/// [`new_inclusive`]: Uniform::new_inclusive
174/// [`Rng::gen_range`]: Rng::gen_range
175#[derive(Clone, Copy, Debug, PartialEq)]
176#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
177#[cfg_attr(feature = "serde1", serde(bound(serialize = "X::Sampler: Serialize")))]
178#[cfg_attr(feature = "serde1", serde(bound(deserialize = "X::Sampler: Deserialize<'de>")))]
179pub struct Uniform<X: SampleUniform>(X::Sampler);
180
181impl<X: SampleUniform> Uniform<X> {
182 /// Create a new `Uniform` instance which samples uniformly from the half
183 /// open range `[low, high)` (excluding `high`). Panics if `low >= high`.
184 pub fn new<B1, B2>(low: B1, high: B2) -> Uniform<X>
185 where
186 B1: SampleBorrow<X> + Sized,
187 B2: SampleBorrow<X> + Sized,
188 {
189 Uniform(X::Sampler::new(low, high))
190 }
191
192 /// Create a new `Uniform` instance which samples uniformly from the closed
193 /// range `[low, high]` (inclusive). Panics if `low > high`.
194 pub fn new_inclusive<B1, B2>(low: B1, high: B2) -> Uniform<X>
195 where
196 B1: SampleBorrow<X> + Sized,
197 B2: SampleBorrow<X> + Sized,
198 {
199 Uniform(X::Sampler::new_inclusive(low, high))
200 }
201}
202
203impl<X: SampleUniform> Distribution<X> for Uniform<X> {
204 fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> X {
205 self.0.sample(rng)
206 }
207}
208
209/// Helper trait for creating objects using the correct implementation of
210/// [`UniformSampler`] for the sampling type.
211///
212/// See the [module documentation] on how to implement [`Uniform`] range
213/// sampling for a custom type.
214///
215/// [module documentation]: crate::distributions::uniform
216pub trait SampleUniform: Sized {
217 /// The `UniformSampler` implementation supporting type `X`.
218 type Sampler: UniformSampler<X = Self>;
219}
220
221/// Helper trait handling actual uniform sampling.
222///
223/// See the [module documentation] on how to implement [`Uniform`] range
224/// sampling for a custom type.
225///
226/// Implementation of [`sample_single`] is optional, and is only useful when
227/// the implementation can be faster than `Self::new(low, high).sample(rng)`.
228///
229/// [module documentation]: crate::distributions::uniform
230/// [`sample_single`]: UniformSampler::sample_single
231pub trait UniformSampler: Sized {
232 /// The type sampled by this implementation.
233 type X;
234
235 /// Construct self, with inclusive lower bound and exclusive upper bound
236 /// `[low, high)`.
237 ///
238 /// Usually users should not call this directly but instead use
239 /// `Uniform::new`, which asserts that `low < high` before calling this.
240 fn new<B1, B2>(low: B1, high: B2) -> Self
241 where
242 B1: SampleBorrow<Self::X> + Sized,
243 B2: SampleBorrow<Self::X> + Sized;
244
245 /// Construct self, with inclusive bounds `[low, high]`.
246 ///
247 /// Usually users should not call this directly but instead use
248 /// `Uniform::new_inclusive`, which asserts that `low <= high` before
249 /// calling this.
250 fn new_inclusive<B1, B2>(low: B1, high: B2) -> Self
251 where
252 B1: SampleBorrow<Self::X> + Sized,
253 B2: SampleBorrow<Self::X> + Sized;
254
255 /// Sample a value.
256 fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X;
257
258 /// Sample a single value uniformly from a range with inclusive lower bound
259 /// and exclusive upper bound `[low, high)`.
260 ///
261 /// By default this is implemented using
262 /// `UniformSampler::new(low, high).sample(rng)`. However, for some types
263 /// more optimal implementations for single usage may be provided via this
264 /// method (which is the case for integers and floats).
265 /// Results may not be identical.
266 ///
267 /// Note that to use this method in a generic context, the type needs to be
268 /// retrieved via `SampleUniform::Sampler` as follows:
269 /// ```
270 /// use rand::{thread_rng, distributions::uniform::{SampleUniform, UniformSampler}};
271 /// # #[allow(unused)]
272 /// fn sample_from_range<T: SampleUniform>(lb: T, ub: T) -> T {
273 /// let mut rng = thread_rng();
274 /// <T as SampleUniform>::Sampler::sample_single(lb, ub, &mut rng)
275 /// }
276 /// ```
277 fn sample_single<R: Rng + ?Sized, B1, B2>(low: B1, high: B2, rng: &mut R) -> Self::X
278 where
279 B1: SampleBorrow<Self::X> + Sized,
280 B2: SampleBorrow<Self::X> + Sized,
281 {
282 let uniform: Self = UniformSampler::new(low, high);
283 uniform.sample(rng)
284 }
285
286 /// Sample a single value uniformly from a range with inclusive lower bound
287 /// and inclusive upper bound `[low, high]`.
288 ///
289 /// By default this is implemented using
290 /// `UniformSampler::new_inclusive(low, high).sample(rng)`. However, for
291 /// some types more optimal implementations for single usage may be provided
292 /// via this method.
293 /// Results may not be identical.
294 fn sample_single_inclusive<R: Rng + ?Sized, B1, B2>(low: B1, high: B2, rng: &mut R)
295 -> Self::X
296 where B1: SampleBorrow<Self::X> + Sized,
297 B2: SampleBorrow<Self::X> + Sized
298 {
299 let uniform: Self = UniformSampler::new_inclusive(low, high);
300 uniform.sample(rng)
301 }
302}
303
304impl<X: SampleUniform> From<Range<X>> for Uniform<X> {
305 fn from(r: ::core::ops::Range<X>) -> Uniform<X> {
306 Uniform::new(r.start, r.end)
307 }
308}
309
310impl<X: SampleUniform> From<RangeInclusive<X>> for Uniform<X> {
311 fn from(r: ::core::ops::RangeInclusive<X>) -> Uniform<X> {
312 Uniform::new_inclusive(r.start(), r.end())
313 }
314}
315
316
317/// Helper trait similar to [`Borrow`] but implemented
318/// only for SampleUniform and references to SampleUniform in
319/// order to resolve ambiguity issues.
320///
321/// [`Borrow`]: std::borrow::Borrow
322pub trait SampleBorrow<Borrowed> {
323 /// Immutably borrows from an owned value. See [`Borrow::borrow`]
324 ///
325 /// [`Borrow::borrow`]: std::borrow::Borrow::borrow
326 fn borrow(&self) -> &Borrowed;
327}
328impl<Borrowed> SampleBorrow<Borrowed> for Borrowed
329where Borrowed: SampleUniform
330{
331 #[inline(always)]
332 fn borrow(&self) -> &Borrowed {
333 self
334 }
335}
336impl<'a, Borrowed> SampleBorrow<Borrowed> for &'a Borrowed
337where Borrowed: SampleUniform
338{
339 #[inline(always)]
340 fn borrow(&self) -> &Borrowed {
341 *self
342 }
343}
344
345/// Range that supports generating a single sample efficiently.
346///
347/// Any type implementing this trait can be used to specify the sampled range
348/// for `Rng::gen_range`.
349pub trait SampleRange<T> {
350 /// Generate a sample from the given range.
351 fn sample_single<R: RngCore + ?Sized>(self, rng: &mut R) -> T;
352
353 /// Check whether the range is empty.
354 fn is_empty(&self) -> bool;
355}
356
357impl<T: SampleUniform + PartialOrd> SampleRange<T> for Range<T> {
358 #[inline]
359 fn sample_single<R: RngCore + ?Sized>(self, rng: &mut R) -> T {
360 T::Sampler::sample_single(self.start, self.end, rng)
361 }
362
363 #[inline]
364 fn is_empty(&self) -> bool {
365 !(self.start < self.end)
366 }
367}
368
369impl<T: SampleUniform + PartialOrd> SampleRange<T> for RangeInclusive<T> {
370 #[inline]
371 fn sample_single<R: RngCore + ?Sized>(self, rng: &mut R) -> T {
372 T::Sampler::sample_single_inclusive(self.start(), self.end(), rng)
373 }
374
375 #[inline]
376 fn is_empty(&self) -> bool {
377 !(self.start() <= self.end())
378 }
379}
380
381
382////////////////////////////////////////////////////////////////////////////////
383
384// What follows are all back-ends.
385
386
387/// The back-end implementing [`UniformSampler`] for integer types.
388///
389/// Unless you are implementing [`UniformSampler`] for your own type, this type
390/// should not be used directly, use [`Uniform`] instead.
391///
392/// # Implementation notes
393///
394/// For simplicity, we use the same generic struct `UniformInt<X>` for all
395/// integer types `X`. This gives us only one field type, `X`; to store unsigned
396/// values of this size, we take use the fact that these conversions are no-ops.
397///
398/// For a closed range, the number of possible numbers we should generate is
399/// `range = (high - low + 1)`. To avoid bias, we must ensure that the size of
400/// our sample space, `zone`, is a multiple of `range`; other values must be
401/// rejected (by replacing with a new random sample).
402///
403/// As a special case, we use `range = 0` to represent the full range of the
404/// result type (i.e. for `new_inclusive($ty::MIN, $ty::MAX)`).
405///
406/// The optimum `zone` is the largest product of `range` which fits in our
407/// (unsigned) target type. We calculate this by calculating how many numbers we
408/// must reject: `reject = (MAX + 1) % range = (MAX - range + 1) % range`. Any (large)
409/// product of `range` will suffice, thus in `sample_single` we multiply by a
410/// power of 2 via bit-shifting (faster but may cause more rejections).
411///
412/// The smallest integer PRNGs generate is `u32`. For 8- and 16-bit outputs we
413/// use `u32` for our `zone` and samples (because it's not slower and because
414/// it reduces the chance of having to reject a sample). In this case we cannot
415/// store `zone` in the target type since it is too large, however we know
416/// `ints_to_reject < range <= $unsigned::MAX`.
417///
418/// An alternative to using a modulus is widening multiply: After a widening
419/// multiply by `range`, the result is in the high word. Then comparing the low
420/// word against `zone` makes sure our distribution is uniform.
421#[derive(Clone, Copy, Debug, PartialEq)]
422#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
423pub struct UniformInt<X> {
424 low: X,
425 range: X,
426 z: X, // either ints_to_reject or zone depending on implementation
427}
428
429macro_rules! uniform_int_impl {
430 ($ty:ty, $unsigned:ident, $u_large:ident) => {
431 impl SampleUniform for $ty {
432 type Sampler = UniformInt<$ty>;
433 }
434
435 impl UniformSampler for UniformInt<$ty> {
436 // We play free and fast with unsigned vs signed here
437 // (when $ty is signed), but that's fine, since the
438 // contract of this macro is for $ty and $unsigned to be
439 // "bit-equal", so casting between them is a no-op.
440
441 type X = $ty;
442
443 #[inline] // if the range is constant, this helps LLVM to do the
444 // calculations at compile-time.
445 fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
446 where
447 B1: SampleBorrow<Self::X> + Sized,
448 B2: SampleBorrow<Self::X> + Sized,
449 {
450 let low = *low_b.borrow();
451 let high = *high_b.borrow();
452 assert!(low < high, "Uniform::new called with `low >= high`");
453 UniformSampler::new_inclusive(low, high - 1)
454 }
455
456 #[inline] // if the range is constant, this helps LLVM to do the
457 // calculations at compile-time.
458 fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
459 where
460 B1: SampleBorrow<Self::X> + Sized,
461 B2: SampleBorrow<Self::X> + Sized,
462 {
463 let low = *low_b.borrow();
464 let high = *high_b.borrow();
465 assert!(
466 low <= high,
467 "Uniform::new_inclusive called with `low > high`"
468 );
469 let unsigned_max = ::core::$u_large::MAX;
470
471 let range = high.wrapping_sub(low).wrapping_add(1) as $unsigned;
472 let ints_to_reject = if range > 0 {
473 let range = $u_large::from(range);
474 (unsigned_max - range + 1) % range
475 } else {
476 0
477 };
478
479 UniformInt {
480 low,
481 // These are really $unsigned values, but store as $ty:
482 range: range as $ty,
483 z: ints_to_reject as $unsigned as $ty,
484 }
485 }
486
487 #[inline]
488 fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
489 let range = self.range as $unsigned as $u_large;
490 if range > 0 {
491 let unsigned_max = ::core::$u_large::MAX;
492 let zone = unsigned_max - (self.z as $unsigned as $u_large);
493 loop {
494 let v: $u_large = rng.gen();
495 let (hi, lo) = v.wmul(range);
496 if lo <= zone {
497 return self.low.wrapping_add(hi as $ty);
498 }
499 }
500 } else {
501 // Sample from the entire integer range.
502 rng.gen()
503 }
504 }
505
506 #[inline]
507 fn sample_single<R: Rng + ?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R) -> Self::X
508 where
509 B1: SampleBorrow<Self::X> + Sized,
510 B2: SampleBorrow<Self::X> + Sized,
511 {
512 let low = *low_b.borrow();
513 let high = *high_b.borrow();
514 assert!(low < high, "UniformSampler::sample_single: low >= high");
515 Self::sample_single_inclusive(low, high - 1, rng)
516 }
517
518 #[inline]
519 fn sample_single_inclusive<R: Rng + ?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R) -> Self::X
520 where
521 B1: SampleBorrow<Self::X> + Sized,
522 B2: SampleBorrow<Self::X> + Sized,
523 {
524 let low = *low_b.borrow();
525 let high = *high_b.borrow();
526 assert!(low <= high, "UniformSampler::sample_single_inclusive: low > high");
527 let range = high.wrapping_sub(low).wrapping_add(1) as $unsigned as $u_large;
528 // If the above resulted in wrap-around to 0, the range is $ty::MIN..=$ty::MAX,
529 // and any integer will do.
530 if range == 0 {
531 return rng.gen();
532 }
533
534 let zone = if ::core::$unsigned::MAX <= ::core::u16::MAX as $unsigned {
535 // Using a modulus is faster than the approximation for
536 // i8 and i16. I suppose we trade the cost of one
537 // modulus for near-perfect branch prediction.
538 let unsigned_max: $u_large = ::core::$u_large::MAX;
539 let ints_to_reject = (unsigned_max - range + 1) % range;
540 unsigned_max - ints_to_reject
541 } else {
542 // conservative but fast approximation. `- 1` is necessary to allow the
543 // same comparison without bias.
544 (range << range.leading_zeros()).wrapping_sub(1)
545 };
546
547 loop {
548 let v: $u_large = rng.gen();
549 let (hi, lo) = v.wmul(range);
550 if lo <= zone {
551 return low.wrapping_add(hi as $ty);
552 }
553 }
554 }
555 }
556 };
557}
558
559uniform_int_impl! { i8, u8, u32 }
560uniform_int_impl! { i16, u16, u32 }
561uniform_int_impl! { i32, u32, u32 }
562uniform_int_impl! { i64, u64, u64 }
563uniform_int_impl! { i128, u128, u128 }
564uniform_int_impl! { isize, usize, usize }
565uniform_int_impl! { u8, u8, u32 }
566uniform_int_impl! { u16, u16, u32 }
567uniform_int_impl! { u32, u32, u32 }
568uniform_int_impl! { u64, u64, u64 }
569uniform_int_impl! { usize, usize, usize }
570uniform_int_impl! { u128, u128, u128 }
571
572#[cfg(feature = "simd_support")]
573macro_rules! uniform_simd_int_impl {
574 ($ty:ident, $unsigned:ident, $u_scalar:ident) => {
575 // The "pick the largest zone that can fit in an `u32`" optimization
576 // is less useful here. Multiple lanes complicate things, we don't
577 // know the PRNG's minimal output size, and casting to a larger vector
578 // is generally a bad idea for SIMD performance. The user can still
579 // implement it manually.
580
581 // TODO: look into `Uniform::<u32x4>::new(0u32, 100)` functionality
582 // perhaps `impl SampleUniform for $u_scalar`?
583 impl SampleUniform for $ty {
584 type Sampler = UniformInt<$ty>;
585 }
586
587 impl UniformSampler for UniformInt<$ty> {
588 type X = $ty;
589
590 #[inline] // if the range is constant, this helps LLVM to do the
591 // calculations at compile-time.
592 fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
593 where B1: SampleBorrow<Self::X> + Sized,
594 B2: SampleBorrow<Self::X> + Sized
595 {
596 let low = *low_b.borrow();
597 let high = *high_b.borrow();
598 assert!(low.lt(high).all(), "Uniform::new called with `low >= high`");
599 UniformSampler::new_inclusive(low, high - 1)
600 }
601
602 #[inline] // if the range is constant, this helps LLVM to do the
603 // calculations at compile-time.
604 fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
605 where B1: SampleBorrow<Self::X> + Sized,
606 B2: SampleBorrow<Self::X> + Sized
607 {
608 let low = *low_b.borrow();
609 let high = *high_b.borrow();
610 assert!(low.le(high).all(),
611 "Uniform::new_inclusive called with `low > high`");
612 let unsigned_max = ::core::$u_scalar::MAX;
613
614 // NOTE: these may need to be replaced with explicitly
615 // wrapping operations if `packed_simd` changes
616 let range: $unsigned = ((high - low) + 1).cast();
617 // `% 0` will panic at runtime.
618 let not_full_range = range.gt($unsigned::splat(0));
619 // replacing 0 with `unsigned_max` allows a faster `select`
620 // with bitwise OR
621 let modulo = not_full_range.select(range, $unsigned::splat(unsigned_max));
622 // wrapping addition
623 let ints_to_reject = (unsigned_max - range + 1) % modulo;
624 // When `range` is 0, `lo` of `v.wmul(range)` will always be
625 // zero which means only one sample is needed.
626 let zone = unsigned_max - ints_to_reject;
627
628 UniformInt {
629 low,
630 // These are really $unsigned values, but store as $ty:
631 range: range.cast(),
632 z: zone.cast(),
633 }
634 }
635
636 fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
637 let range: $unsigned = self.range.cast();
638 let zone: $unsigned = self.z.cast();
639
640 // This might seem very slow, generating a whole new
641 // SIMD vector for every sample rejection. For most uses
642 // though, the chance of rejection is small and provides good
643 // general performance. With multiple lanes, that chance is
644 // multiplied. To mitigate this, we replace only the lanes of
645 // the vector which fail, iteratively reducing the chance of
646 // rejection. The replacement method does however add a little
647 // overhead. Benchmarking or calculating probabilities might
648 // reveal contexts where this replacement method is slower.
649 let mut v: $unsigned = rng.gen();
650 loop {
651 let (hi, lo) = v.wmul(range);
652 let mask = lo.le(zone);
653 if mask.all() {
654 let hi: $ty = hi.cast();
655 // wrapping addition
656 let result = self.low + hi;
657 // `select` here compiles to a blend operation
658 // When `range.eq(0).none()` the compare and blend
659 // operations are avoided.
660 let v: $ty = v.cast();
661 return range.gt($unsigned::splat(0)).select(result, v);
662 }
663 // Replace only the failing lanes
664 v = mask.select(v, rng.gen());
665 }
666 }
667 }
668 };
669
670 // bulk implementation
671 ($(($unsigned:ident, $signed:ident),)+ $u_scalar:ident) => {
672 $(
673 uniform_simd_int_impl!($unsigned, $unsigned, $u_scalar);
674 uniform_simd_int_impl!($signed, $unsigned, $u_scalar);
675 )+
676 };
677}
678
679#[cfg(feature = "simd_support")]
680uniform_simd_int_impl! {
681 (u64x2, i64x2),
682 (u64x4, i64x4),
683 (u64x8, i64x8),
684 u64
685}
686
687#[cfg(feature = "simd_support")]
688uniform_simd_int_impl! {
689 (u32x2, i32x2),
690 (u32x4, i32x4),
691 (u32x8, i32x8),
692 (u32x16, i32x16),
693 u32
694}
695
696#[cfg(feature = "simd_support")]
697uniform_simd_int_impl! {
698 (u16x2, i16x2),
699 (u16x4, i16x4),
700 (u16x8, i16x8),
701 (u16x16, i16x16),
702 (u16x32, i16x32),
703 u16
704}
705
706#[cfg(feature = "simd_support")]
707uniform_simd_int_impl! {
708 (u8x2, i8x2),
709 (u8x4, i8x4),
710 (u8x8, i8x8),
711 (u8x16, i8x16),
712 (u8x32, i8x32),
713 (u8x64, i8x64),
714 u8
715}
716
717impl SampleUniform for char {
718 type Sampler = UniformChar;
719}
720
721/// The back-end implementing [`UniformSampler`] for `char`.
722///
723/// Unless you are implementing [`UniformSampler`] for your own type, this type
724/// should not be used directly, use [`Uniform`] instead.
725///
726/// This differs from integer range sampling since the range `0xD800..=0xDFFF`
727/// are used for surrogate pairs in UCS and UTF-16, and consequently are not
728/// valid Unicode code points. We must therefore avoid sampling values in this
729/// range.
730#[derive(Clone, Copy, Debug)]
731#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
732pub struct UniformChar {
733 sampler: UniformInt<u32>,
734}
735
736/// UTF-16 surrogate range start
737const CHAR_SURROGATE_START: u32 = 0xD800;
738/// UTF-16 surrogate range size
739const CHAR_SURROGATE_LEN: u32 = 0xE000 - CHAR_SURROGATE_START;
740
741/// Convert `char` to compressed `u32`
742fn char_to_comp_u32(c: char) -> u32 {
743 match c as u32 {
744 c if c >= CHAR_SURROGATE_START => c - CHAR_SURROGATE_LEN,
745 c => c,
746 }
747}
748
749impl UniformSampler for UniformChar {
750 type X = char;
751
752 #[inline] // if the range is constant, this helps LLVM to do the
753 // calculations at compile-time.
754 fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
755 where
756 B1: SampleBorrow<Self::X> + Sized,
757 B2: SampleBorrow<Self::X> + Sized,
758 {
759 let low = char_to_comp_u32(*low_b.borrow());
760 let high = char_to_comp_u32(*high_b.borrow());
761 let sampler = UniformInt::<u32>::new(low, high);
762 UniformChar { sampler }
763 }
764
765 #[inline] // if the range is constant, this helps LLVM to do the
766 // calculations at compile-time.
767 fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
768 where
769 B1: SampleBorrow<Self::X> + Sized,
770 B2: SampleBorrow<Self::X> + Sized,
771 {
772 let low = char_to_comp_u32(*low_b.borrow());
773 let high = char_to_comp_u32(*high_b.borrow());
774 let sampler = UniformInt::<u32>::new_inclusive(low, high);
775 UniformChar { sampler }
776 }
777
778 fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
779 let mut x = self.sampler.sample(rng);
780 if x >= CHAR_SURROGATE_START {
781 x += CHAR_SURROGATE_LEN;
782 }
783 // SAFETY: x must not be in surrogate range or greater than char::MAX.
784 // This relies on range constructors which accept char arguments.
785 // Validity of input char values is assumed.
786 unsafe { core::char::from_u32_unchecked(x) }
787 }
788}
789
790/// The back-end implementing [`UniformSampler`] for floating-point types.
791///
792/// Unless you are implementing [`UniformSampler`] for your own type, this type
793/// should not be used directly, use [`Uniform`] instead.
794///
795/// # Implementation notes
796///
797/// Instead of generating a float in the `[0, 1)` range using [`Standard`], the
798/// `UniformFloat` implementation converts the output of an PRNG itself. This
799/// way one or two steps can be optimized out.
800///
801/// The floats are first converted to a value in the `[1, 2)` interval using a
802/// transmute-based method, and then mapped to the expected range with a
803/// multiply and addition. Values produced this way have what equals 23 bits of
804/// random digits for an `f32`, and 52 for an `f64`.
805///
806/// [`new`]: UniformSampler::new
807/// [`new_inclusive`]: UniformSampler::new_inclusive
808/// [`Standard`]: crate::distributions::Standard
809#[derive(Clone, Copy, Debug, PartialEq)]
810#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
811pub struct UniformFloat<X> {
812 low: X,
813 scale: X,
814}
815
816macro_rules! uniform_float_impl {
817 ($ty:ty, $uty:ident, $f_scalar:ident, $u_scalar:ident, $bits_to_discard:expr) => {
818 impl SampleUniform for $ty {
819 type Sampler = UniformFloat<$ty>;
820 }
821
822 impl UniformSampler for UniformFloat<$ty> {
823 type X = $ty;
824
825 fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
826 where
827 B1: SampleBorrow<Self::X> + Sized,
828 B2: SampleBorrow<Self::X> + Sized,
829 {
830 let low = *low_b.borrow();
831 let high = *high_b.borrow();
832 debug_assert!(
833 low.all_finite(),
834 "Uniform::new called with `low` non-finite."
835 );
836 debug_assert!(
837 high.all_finite(),
838 "Uniform::new called with `high` non-finite."
839 );
840 assert!(low.all_lt(high), "Uniform::new called with `low >= high`");
841 let max_rand = <$ty>::splat(
842 (::core::$u_scalar::MAX >> $bits_to_discard).into_float_with_exponent(0) - 1.0,
843 );
844
845 let mut scale = high - low;
846 assert!(scale.all_finite(), "Uniform::new: range overflow");
847
848 loop {
849 let mask = (scale * max_rand + low).ge_mask(high);
850 if mask.none() {
851 break;
852 }
853 scale = scale.decrease_masked(mask);
854 }
855
856 debug_assert!(<$ty>::splat(0.0).all_le(scale));
857
858 UniformFloat { low, scale }
859 }
860
861 fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
862 where
863 B1: SampleBorrow<Self::X> + Sized,
864 B2: SampleBorrow<Self::X> + Sized,
865 {
866 let low = *low_b.borrow();
867 let high = *high_b.borrow();
868 debug_assert!(
869 low.all_finite(),
870 "Uniform::new_inclusive called with `low` non-finite."
871 );
872 debug_assert!(
873 high.all_finite(),
874 "Uniform::new_inclusive called with `high` non-finite."
875 );
876 assert!(
877 low.all_le(high),
878 "Uniform::new_inclusive called with `low > high`"
879 );
880 let max_rand = <$ty>::splat(
881 (::core::$u_scalar::MAX >> $bits_to_discard).into_float_with_exponent(0) - 1.0,
882 );
883
884 let mut scale = (high - low) / max_rand;
885 assert!(scale.all_finite(), "Uniform::new_inclusive: range overflow");
886
887 loop {
888 let mask = (scale * max_rand + low).gt_mask(high);
889 if mask.none() {
890 break;
891 }
892 scale = scale.decrease_masked(mask);
893 }
894
895 debug_assert!(<$ty>::splat(0.0).all_le(scale));
896
897 UniformFloat { low, scale }
898 }
899
900 fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
901 // Generate a value in the range [1, 2)
902 let value1_2 = (rng.gen::<$uty>() >> $bits_to_discard).into_float_with_exponent(0);
903
904 // Get a value in the range [0, 1) in order to avoid
905 // overflowing into infinity when multiplying with scale
906 let value0_1 = value1_2 - 1.0;
907
908 // We don't use `f64::mul_add`, because it is not available with
909 // `no_std`. Furthermore, it is slower for some targets (but
910 // faster for others). However, the order of multiplication and
911 // addition is important, because on some platforms (e.g. ARM)
912 // it will be optimized to a single (non-FMA) instruction.
913 value0_1 * self.scale + self.low
914 }
915
916 #[inline]
917 fn sample_single<R: Rng + ?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R) -> Self::X
918 where
919 B1: SampleBorrow<Self::X> + Sized,
920 B2: SampleBorrow<Self::X> + Sized,
921 {
922 let low = *low_b.borrow();
923 let high = *high_b.borrow();
924 debug_assert!(
925 low.all_finite(),
926 "UniformSampler::sample_single called with `low` non-finite."
927 );
928 debug_assert!(
929 high.all_finite(),
930 "UniformSampler::sample_single called with `high` non-finite."
931 );
932 assert!(
933 low.all_lt(high),
934 "UniformSampler::sample_single: low >= high"
935 );
936 let mut scale = high - low;
937 assert!(scale.all_finite(), "UniformSampler::sample_single: range overflow");
938
939 loop {
940 // Generate a value in the range [1, 2)
941 let value1_2 =
942 (rng.gen::<$uty>() >> $bits_to_discard).into_float_with_exponent(0);
943
944 // Get a value in the range [0, 1) in order to avoid
945 // overflowing into infinity when multiplying with scale
946 let value0_1 = value1_2 - 1.0;
947
948 // Doing multiply before addition allows some architectures
949 // to use a single instruction.
950 let res = value0_1 * scale + low;
951
952 debug_assert!(low.all_le(res) || !scale.all_finite());
953 if res.all_lt(high) {
954 return res;
955 }
956
957 // This handles a number of edge cases.
958 // * `low` or `high` is NaN. In this case `scale` and
959 // `res` are going to end up as NaN.
960 // * `low` is negative infinity and `high` is finite.
961 // `scale` is going to be infinite and `res` will be
962 // NaN.
963 // * `high` is positive infinity and `low` is finite.
964 // `scale` is going to be infinite and `res` will
965 // be infinite or NaN (if value0_1 is 0).
966 // * `low` is negative infinity and `high` is positive
967 // infinity. `scale` will be infinite and `res` will
968 // be NaN.
969 // * `low` and `high` are finite, but `high - low`
970 // overflows to infinite. `scale` will be infinite
971 // and `res` will be infinite or NaN (if value0_1 is 0).
972 // So if `high` or `low` are non-finite, we are guaranteed
973 // to fail the `res < high` check above and end up here.
974 //
975 // While we technically should check for non-finite `low`
976 // and `high` before entering the loop, by doing the checks
977 // here instead, we allow the common case to avoid these
978 // checks. But we are still guaranteed that if `low` or
979 // `high` are non-finite we'll end up here and can do the
980 // appropriate checks.
981 //
982 // Likewise `high - low` overflowing to infinity is also
983 // rare, so handle it here after the common case.
984 let mask = !scale.finite_mask();
985 if mask.any() {
986 assert!(
987 low.all_finite() && high.all_finite(),
988 "Uniform::sample_single: low and high must be finite"
989 );
990 scale = scale.decrease_masked(mask);
991 }
992 }
993 }
994 }
995 };
996}
997
998uniform_float_impl! { f32, u32, f32, u32, 32 - 23 }
999uniform_float_impl! { f64, u64, f64, u64, 64 - 52 }
1000
1001#[cfg(feature = "simd_support")]
1002uniform_float_impl! { f32x2, u32x2, f32, u32, 32 - 23 }
1003#[cfg(feature = "simd_support")]
1004uniform_float_impl! { f32x4, u32x4, f32, u32, 32 - 23 }
1005#[cfg(feature = "simd_support")]
1006uniform_float_impl! { f32x8, u32x8, f32, u32, 32 - 23 }
1007#[cfg(feature = "simd_support")]
1008uniform_float_impl! { f32x16, u32x16, f32, u32, 32 - 23 }
1009
1010#[cfg(feature = "simd_support")]
1011uniform_float_impl! { f64x2, u64x2, f64, u64, 64 - 52 }
1012#[cfg(feature = "simd_support")]
1013uniform_float_impl! { f64x4, u64x4, f64, u64, 64 - 52 }
1014#[cfg(feature = "simd_support")]
1015uniform_float_impl! { f64x8, u64x8, f64, u64, 64 - 52 }
1016
1017
1018/// The back-end implementing [`UniformSampler`] for `Duration`.
1019///
1020/// Unless you are implementing [`UniformSampler`] for your own types, this type
1021/// should not be used directly, use [`Uniform`] instead.
1022#[derive(Clone, Copy, Debug)]
1023#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
1024pub struct UniformDuration {
1025 mode: UniformDurationMode,
1026 offset: u32,
1027}
1028
1029#[derive(Debug, Copy, Clone)]
1030#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
1031enum UniformDurationMode {
1032 Small {
1033 secs: u64,
1034 nanos: Uniform<u32>,
1035 },
1036 Medium {
1037 nanos: Uniform<u64>,
1038 },
1039 Large {
1040 max_secs: u64,
1041 max_nanos: u32,
1042 secs: Uniform<u64>,
1043 },
1044}
1045
1046impl SampleUniform for Duration {
1047 type Sampler = UniformDuration;
1048}
1049
1050impl UniformSampler for UniformDuration {
1051 type X = Duration;
1052
1053 #[inline]
1054 fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
1055 where
1056 B1: SampleBorrow<Self::X> + Sized,
1057 B2: SampleBorrow<Self::X> + Sized,
1058 {
1059 let low = *low_b.borrow();
1060 let high = *high_b.borrow();
1061 assert!(low < high, "Uniform::new called with `low >= high`");
1062 UniformDuration::new_inclusive(low, high - Duration::new(0, 1))
1063 }
1064
1065 #[inline]
1066 fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
1067 where
1068 B1: SampleBorrow<Self::X> + Sized,
1069 B2: SampleBorrow<Self::X> + Sized,
1070 {
1071 let low = *low_b.borrow();
1072 let high = *high_b.borrow();
1073 assert!(
1074 low <= high,
1075 "Uniform::new_inclusive called with `low > high`"
1076 );
1077
1078 let low_s = low.as_secs();
1079 let low_n = low.subsec_nanos();
1080 let mut high_s = high.as_secs();
1081 let mut high_n = high.subsec_nanos();
1082
1083 if high_n < low_n {
1084 high_s -= 1;
1085 high_n += 1_000_000_000;
1086 }
1087
1088 let mode = if low_s == high_s {
1089 UniformDurationMode::Small {
1090 secs: low_s,
1091 nanos: Uniform::new_inclusive(low_n, high_n),
1092 }
1093 } else {
1094 let max = high_s
1095 .checked_mul(1_000_000_000)
1096 .and_then(|n| n.checked_add(u64::from(high_n)));
1097
1098 if let Some(higher_bound) = max {
1099 let lower_bound = low_s * 1_000_000_000 + u64::from(low_n);
1100 UniformDurationMode::Medium {
1101 nanos: Uniform::new_inclusive(lower_bound, higher_bound),
1102 }
1103 } else {
1104 // An offset is applied to simplify generation of nanoseconds
1105 let max_nanos = high_n - low_n;
1106 UniformDurationMode::Large {
1107 max_secs: high_s,
1108 max_nanos,
1109 secs: Uniform::new_inclusive(low_s, high_s),
1110 }
1111 }
1112 };
1113 UniformDuration {
1114 mode,
1115 offset: low_n,
1116 }
1117 }
1118
1119 #[inline]
1120 fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Duration {
1121 match self.mode {
1122 UniformDurationMode::Small { secs, nanos } => {
1123 let n = nanos.sample(rng);
1124 Duration::new(secs, n)
1125 }
1126 UniformDurationMode::Medium { nanos } => {
1127 let nanos = nanos.sample(rng);
1128 Duration::new(nanos / 1_000_000_000, (nanos % 1_000_000_000) as u32)
1129 }
1130 UniformDurationMode::Large {
1131 max_secs,
1132 max_nanos,
1133 secs,
1134 } => {
1135 // constant folding means this is at least as fast as `Rng::sample(Range)`
1136 let nano_range = Uniform::new(0, 1_000_000_000);
1137 loop {
1138 let s = secs.sample(rng);
1139 let n = nano_range.sample(rng);
1140 if !(s == max_secs && n > max_nanos) {
1141 let sum = n + self.offset;
1142 break Duration::new(s, sum);
1143 }
1144 }
1145 }
1146 }
1147 }
1148}
1149
1150#[cfg(test)]
1151mod tests {
1152 use super::*;
1153 use crate::rngs::mock::StepRng;
1154
1155 #[test]
1156 #[cfg(feature = "serde1")]
1157 fn test_serialization_uniform_duration() {
1158 let distr = UniformDuration::new(Duration::from_secs(10), Duration::from_secs(60));
1159 let de_distr: UniformDuration = bincode::deserialize(&bincode::serialize(&distr).unwrap()).unwrap();
1160 assert_eq!(
1161 distr.offset, de_distr.offset
1162 );
1163 match (distr.mode, de_distr.mode) {
1164 (UniformDurationMode::Small {secs: a_secs, nanos: a_nanos}, UniformDurationMode::Small {secs, nanos}) => {
1165 assert_eq!(a_secs, secs);
1166
1167 assert_eq!(a_nanos.0.low, nanos.0.low);
1168 assert_eq!(a_nanos.0.range, nanos.0.range);
1169 assert_eq!(a_nanos.0.z, nanos.0.z);
1170 }
1171 (UniformDurationMode::Medium {nanos: a_nanos} , UniformDurationMode::Medium {nanos}) => {
1172 assert_eq!(a_nanos.0.low, nanos.0.low);
1173 assert_eq!(a_nanos.0.range, nanos.0.range);
1174 assert_eq!(a_nanos.0.z, nanos.0.z);
1175 }
1176 (UniformDurationMode::Large {max_secs:a_max_secs, max_nanos:a_max_nanos, secs:a_secs}, UniformDurationMode::Large {max_secs, max_nanos, secs} ) => {
1177 assert_eq!(a_max_secs, max_secs);
1178 assert_eq!(a_max_nanos, max_nanos);
1179
1180 assert_eq!(a_secs.0.low, secs.0.low);
1181 assert_eq!(a_secs.0.range, secs.0.range);
1182 assert_eq!(a_secs.0.z, secs.0.z);
1183 }
1184 _ => panic!("`UniformDurationMode` was not serialized/deserialized correctly")
1185 }
1186 }
1187
1188 #[test]
1189 #[cfg(feature = "serde1")]
1190 fn test_uniform_serialization() {
1191 let unit_box: Uniform<i32> = Uniform::new(-1, 1);
1192 let de_unit_box: Uniform<i32> = bincode::deserialize(&bincode::serialize(&unit_box).unwrap()).unwrap();
1193
1194 assert_eq!(unit_box.0.low, de_unit_box.0.low);
1195 assert_eq!(unit_box.0.range, de_unit_box.0.range);
1196 assert_eq!(unit_box.0.z, de_unit_box.0.z);
1197
1198 let unit_box: Uniform<f32> = Uniform::new(-1., 1.);
1199 let de_unit_box: Uniform<f32> = bincode::deserialize(&bincode::serialize(&unit_box).unwrap()).unwrap();
1200
1201 assert_eq!(unit_box.0.low, de_unit_box.0.low);
1202 assert_eq!(unit_box.0.scale, de_unit_box.0.scale);
1203 }
1204
1205 #[should_panic]
1206 #[test]
1207 fn test_uniform_bad_limits_equal_int() {
1208 Uniform::new(10, 10);
1209 }
1210
1211 #[test]
1212 fn test_uniform_good_limits_equal_int() {
1213 let mut rng = crate::test::rng(804);
1214 let dist = Uniform::new_inclusive(10, 10);
1215 for _ in 0..20 {
1216 assert_eq!(rng.sample(dist), 10);
1217 }
1218 }
1219
1220 #[should_panic]
1221 #[test]
1222 fn test_uniform_bad_limits_flipped_int() {
1223 Uniform::new(10, 5);
1224 }
1225
1226 #[test]
1227 #[cfg_attr(miri, ignore)] // Miri is too slow
1228 fn test_integers() {
1229 use core::{i128, u128};
1230 use core::{i16, i32, i64, i8, isize};
1231 use core::{u16, u32, u64, u8, usize};
1232
1233 let mut rng = crate::test::rng(251);
1234 macro_rules! t {
1235 ($ty:ident, $v:expr, $le:expr, $lt:expr) => {{
1236 for &(low, high) in $v.iter() {
1237 let my_uniform = Uniform::new(low, high);
1238 for _ in 0..1000 {
1239 let v: $ty = rng.sample(my_uniform);
1240 assert!($le(low, v) && $lt(v, high));
1241 }
1242
1243 let my_uniform = Uniform::new_inclusive(low, high);
1244 for _ in 0..1000 {
1245 let v: $ty = rng.sample(my_uniform);
1246 assert!($le(low, v) && $le(v, high));
1247 }
1248
1249 let my_uniform = Uniform::new(&low, high);
1250 for _ in 0..1000 {
1251 let v: $ty = rng.sample(my_uniform);
1252 assert!($le(low, v) && $lt(v, high));
1253 }
1254
1255 let my_uniform = Uniform::new_inclusive(&low, &high);
1256 for _ in 0..1000 {
1257 let v: $ty = rng.sample(my_uniform);
1258 assert!($le(low, v) && $le(v, high));
1259 }
1260
1261 for _ in 0..1000 {
1262 let v = <$ty as SampleUniform>::Sampler::sample_single(low, high, &mut rng);
1263 assert!($le(low, v) && $lt(v, high));
1264 }
1265
1266 for _ in 0..1000 {
1267 let v = <$ty as SampleUniform>::Sampler::sample_single_inclusive(low, high, &mut rng);
1268 assert!($le(low, v) && $le(v, high));
1269 }
1270 }
1271 }};
1272
1273 // scalar bulk
1274 ($($ty:ident),*) => {{
1275 $(t!(
1276 $ty,
1277 [(0, 10), (10, 127), ($ty::MIN, $ty::MAX)],
1278 |x, y| x <= y,
1279 |x, y| x < y
1280 );)*
1281 }};
1282
1283 // simd bulk
1284 ($($ty:ident),* => $scalar:ident) => {{
1285 $(t!(
1286 $ty,
1287 [
1288 ($ty::splat(0), $ty::splat(10)),
1289 ($ty::splat(10), $ty::splat(127)),
1290 ($ty::splat($scalar::MIN), $ty::splat($scalar::MAX)),
1291 ],
1292 |x: $ty, y| x.le(y).all(),
1293 |x: $ty, y| x.lt(y).all()
1294 );)*
1295 }};
1296 }
1297 t!(i8, i16, i32, i64, isize, u8, u16, u32, u64, usize, i128, u128);
1298
1299 #[cfg(feature = "simd_support")]
1300 {
1301 t!(u8x2, u8x4, u8x8, u8x16, u8x32, u8x64 => u8);
1302 t!(i8x2, i8x4, i8x8, i8x16, i8x32, i8x64 => i8);
1303 t!(u16x2, u16x4, u16x8, u16x16, u16x32 => u16);
1304 t!(i16x2, i16x4, i16x8, i16x16, i16x32 => i16);
1305 t!(u32x2, u32x4, u32x8, u32x16 => u32);
1306 t!(i32x2, i32x4, i32x8, i32x16 => i32);
1307 t!(u64x2, u64x4, u64x8 => u64);
1308 t!(i64x2, i64x4, i64x8 => i64);
1309 }
1310 }
1311
1312 #[test]
1313 #[cfg_attr(miri, ignore)] // Miri is too slow
1314 fn test_char() {
1315 let mut rng = crate::test::rng(891);
1316 let mut max = core::char::from_u32(0).unwrap();
1317 for _ in 0..100 {
1318 let c = rng.gen_range('A'..='Z');
1319 assert!(('A'..='Z').contains(&c));
1320 max = max.max(c);
1321 }
1322 assert_eq!(max, 'Z');
1323 let d = Uniform::new(
1324 core::char::from_u32(0xD7F0).unwrap(),
1325 core::char::from_u32(0xE010).unwrap(),
1326 );
1327 for _ in 0..100 {
1328 let c = d.sample(&mut rng);
1329 assert!((c as u32) < 0xD800 || (c as u32) > 0xDFFF);
1330 }
1331 }
1332
1333 #[test]
1334 #[cfg_attr(miri, ignore)] // Miri is too slow
1335 fn test_floats() {
1336 let mut rng = crate::test::rng(252);
1337 let mut zero_rng = StepRng::new(0, 0);
1338 let mut max_rng = StepRng::new(0xffff_ffff_ffff_ffff, 0);
1339 macro_rules! t {
1340 ($ty:ty, $f_scalar:ident, $bits_shifted:expr) => {{
1341 let v: &[($f_scalar, $f_scalar)] = &[
1342 (0.0, 100.0),
1343 (-1e35, -1e25),
1344 (1e-35, 1e-25),
1345 (-1e35, 1e35),
1346 (<$f_scalar>::from_bits(0), <$f_scalar>::from_bits(3)),
1347 (-<$f_scalar>::from_bits(10), -<$f_scalar>::from_bits(1)),
1348 (-<$f_scalar>::from_bits(5), 0.0),
1349 (-<$f_scalar>::from_bits(7), -0.0),
1350 (0.1 * ::core::$f_scalar::MAX, ::core::$f_scalar::MAX),
1351 (-::core::$f_scalar::MAX * 0.2, ::core::$f_scalar::MAX * 0.7),
1352 ];
1353 for &(low_scalar, high_scalar) in v.iter() {
1354 for lane in 0..<$ty>::lanes() {
1355 let low = <$ty>::splat(0.0 as $f_scalar).replace(lane, low_scalar);
1356 let high = <$ty>::splat(1.0 as $f_scalar).replace(lane, high_scalar);
1357 let my_uniform = Uniform::new(low, high);
1358 let my_incl_uniform = Uniform::new_inclusive(low, high);
1359 for _ in 0..100 {
1360 let v = rng.sample(my_uniform).extract(lane);
1361 assert!(low_scalar <= v && v < high_scalar);
1362 let v = rng.sample(my_incl_uniform).extract(lane);
1363 assert!(low_scalar <= v && v <= high_scalar);
1364 let v = <$ty as SampleUniform>::Sampler
1365 ::sample_single(low, high, &mut rng).extract(lane);
1366 assert!(low_scalar <= v && v < high_scalar);
1367 }
1368
1369 assert_eq!(
1370 rng.sample(Uniform::new_inclusive(low, low)).extract(lane),
1371 low_scalar
1372 );
1373
1374 assert_eq!(zero_rng.sample(my_uniform).extract(lane), low_scalar);
1375 assert_eq!(zero_rng.sample(my_incl_uniform).extract(lane), low_scalar);
1376 assert_eq!(<$ty as SampleUniform>::Sampler
1377 ::sample_single(low, high, &mut zero_rng)
1378 .extract(lane), low_scalar);
1379 assert!(max_rng.sample(my_uniform).extract(lane) < high_scalar);
1380 assert!(max_rng.sample(my_incl_uniform).extract(lane) <= high_scalar);
1381
1382 // Don't run this test for really tiny differences between high and low
1383 // since for those rounding might result in selecting high for a very
1384 // long time.
1385 if (high_scalar - low_scalar) > 0.0001 {
1386 let mut lowering_max_rng = StepRng::new(
1387 0xffff_ffff_ffff_ffff,
1388 (-1i64 << $bits_shifted) as u64,
1389 );
1390 assert!(
1391 <$ty as SampleUniform>::Sampler
1392 ::sample_single(low, high, &mut lowering_max_rng)
1393 .extract(lane) < high_scalar
1394 );
1395 }
1396 }
1397 }
1398
1399 assert_eq!(
1400 rng.sample(Uniform::new_inclusive(
1401 ::core::$f_scalar::MAX,
1402 ::core::$f_scalar::MAX
1403 )),
1404 ::core::$f_scalar::MAX
1405 );
1406 assert_eq!(
1407 rng.sample(Uniform::new_inclusive(
1408 -::core::$f_scalar::MAX,
1409 -::core::$f_scalar::MAX
1410 )),
1411 -::core::$f_scalar::MAX
1412 );
1413 }};
1414 }
1415
1416 t!(f32, f32, 32 - 23);
1417 t!(f64, f64, 64 - 52);
1418 #[cfg(feature = "simd_support")]
1419 {
1420 t!(f32x2, f32, 32 - 23);
1421 t!(f32x4, f32, 32 - 23);
1422 t!(f32x8, f32, 32 - 23);
1423 t!(f32x16, f32, 32 - 23);
1424 t!(f64x2, f64, 64 - 52);
1425 t!(f64x4, f64, 64 - 52);
1426 t!(f64x8, f64, 64 - 52);
1427 }
1428 }
1429
1430 #[test]
1431 #[should_panic]
1432 fn test_float_overflow() {
1433 let _ = Uniform::from(::core::f64::MIN..::core::f64::MAX);
1434 }
1435
1436 #[test]
1437 #[should_panic]
1438 fn test_float_overflow_single() {
1439 let mut rng = crate::test::rng(252);
1440 rng.gen_range(::core::f64::MIN..::core::f64::MAX);
1441 }
1442
1443 #[test]
1444 #[cfg(all(
1445 feature = "std",
1446 not(target_arch = "wasm32"),
1447 not(target_arch = "asmjs")
1448 ))]
1449 fn test_float_assertions() {
1450 use super::SampleUniform;
1451 use std::panic::catch_unwind;
1452 fn range<T: SampleUniform>(low: T, high: T) {
1453 let mut rng = crate::test::rng(253);
1454 T::Sampler::sample_single(low, high, &mut rng);
1455 }
1456
1457 macro_rules! t {
1458 ($ty:ident, $f_scalar:ident) => {{
1459 let v: &[($f_scalar, $f_scalar)] = &[
1460 (::std::$f_scalar::NAN, 0.0),
1461 (1.0, ::std::$f_scalar::NAN),
1462 (::std::$f_scalar::NAN, ::std::$f_scalar::NAN),
1463 (1.0, 0.5),
1464 (::std::$f_scalar::MAX, -::std::$f_scalar::MAX),
1465 (::std::$f_scalar::INFINITY, ::std::$f_scalar::INFINITY),
1466 (
1467 ::std::$f_scalar::NEG_INFINITY,
1468 ::std::$f_scalar::NEG_INFINITY,
1469 ),
1470 (::std::$f_scalar::NEG_INFINITY, 5.0),
1471 (5.0, ::std::$f_scalar::INFINITY),
1472 (::std::$f_scalar::NAN, ::std::$f_scalar::INFINITY),
1473 (::std::$f_scalar::NEG_INFINITY, ::std::$f_scalar::NAN),
1474 (::std::$f_scalar::NEG_INFINITY, ::std::$f_scalar::INFINITY),
1475 ];
1476 for &(low_scalar, high_scalar) in v.iter() {
1477 for lane in 0..<$ty>::lanes() {
1478 let low = <$ty>::splat(0.0 as $f_scalar).replace(lane, low_scalar);
1479 let high = <$ty>::splat(1.0 as $f_scalar).replace(lane, high_scalar);
1480 assert!(catch_unwind(|| range(low, high)).is_err());
1481 assert!(catch_unwind(|| Uniform::new(low, high)).is_err());
1482 assert!(catch_unwind(|| Uniform::new_inclusive(low, high)).is_err());
1483 assert!(catch_unwind(|| range(low, low)).is_err());
1484 assert!(catch_unwind(|| Uniform::new(low, low)).is_err());
1485 }
1486 }
1487 }};
1488 }
1489
1490 t!(f32, f32);
1491 t!(f64, f64);
1492 #[cfg(feature = "simd_support")]
1493 {
1494 t!(f32x2, f32);
1495 t!(f32x4, f32);
1496 t!(f32x8, f32);
1497 t!(f32x16, f32);
1498 t!(f64x2, f64);
1499 t!(f64x4, f64);
1500 t!(f64x8, f64);
1501 }
1502 }
1503
1504
1505 #[test]
1506 #[cfg_attr(miri, ignore)] // Miri is too slow
1507 fn test_durations() {
1508 let mut rng = crate::test::rng(253);
1509
1510 let v = &[
1511 (Duration::new(10, 50000), Duration::new(100, 1234)),
1512 (Duration::new(0, 100), Duration::new(1, 50)),
1513 (
1514 Duration::new(0, 0),
1515 Duration::new(u64::max_value(), 999_999_999),
1516 ),
1517 ];
1518 for &(low, high) in v.iter() {
1519 let my_uniform = Uniform::new(low, high);
1520 for _ in 0..1000 {
1521 let v = rng.sample(my_uniform);
1522 assert!(low <= v && v < high);
1523 }
1524 }
1525 }
1526
1527 #[test]
1528 fn test_custom_uniform() {
1529 use crate::distributions::uniform::{
1530 SampleBorrow, SampleUniform, UniformFloat, UniformSampler,
1531 };
1532 #[derive(Clone, Copy, PartialEq, PartialOrd)]
1533 struct MyF32 {
1534 x: f32,
1535 }
1536 #[derive(Clone, Copy, Debug)]
1537 struct UniformMyF32(UniformFloat<f32>);
1538 impl UniformSampler for UniformMyF32 {
1539 type X = MyF32;
1540
1541 fn new<B1, B2>(low: B1, high: B2) -> Self
1542 where
1543 B1: SampleBorrow<Self::X> + Sized,
1544 B2: SampleBorrow<Self::X> + Sized,
1545 {
1546 UniformMyF32(UniformFloat::<f32>::new(low.borrow().x, high.borrow().x))
1547 }
1548
1549 fn new_inclusive<B1, B2>(low: B1, high: B2) -> Self
1550 where
1551 B1: SampleBorrow<Self::X> + Sized,
1552 B2: SampleBorrow<Self::X> + Sized,
1553 {
1554 UniformSampler::new(low, high)
1555 }
1556
1557 fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
1558 MyF32 {
1559 x: self.0.sample(rng),
1560 }
1561 }
1562 }
1563 impl SampleUniform for MyF32 {
1564 type Sampler = UniformMyF32;
1565 }
1566
1567 let (low, high) = (MyF32 { x: 17.0f32 }, MyF32 { x: 22.0f32 });
1568 let uniform = Uniform::new(low, high);
1569 let mut rng = crate::test::rng(804);
1570 for _ in 0..100 {
1571 let x: MyF32 = rng.sample(uniform);
1572 assert!(low <= x && x < high);
1573 }
1574 }
1575
1576 #[test]
1577 fn test_uniform_from_std_range() {
1578 let r = Uniform::from(2u32..7);
1579 assert_eq!(r.0.low, 2);
1580 assert_eq!(r.0.range, 5);
1581 let r = Uniform::from(2.0f64..7.0);
1582 assert_eq!(r.0.low, 2.0);
1583 assert_eq!(r.0.scale, 5.0);
1584 }
1585
1586 #[test]
1587 fn test_uniform_from_std_range_inclusive() {
1588 let r = Uniform::from(2u32..=6);
1589 assert_eq!(r.0.low, 2);
1590 assert_eq!(r.0.range, 5);
1591 let r = Uniform::from(2.0f64..=7.0);
1592 assert_eq!(r.0.low, 2.0);
1593 assert!(r.0.scale > 5.0);
1594 assert!(r.0.scale < 5.0 + 1e-14);
1595 }
1596
1597 #[test]
1598 fn value_stability() {
1599 fn test_samples<T: SampleUniform + Copy + core::fmt::Debug + PartialEq>(
1600 lb: T, ub: T, expected_single: &[T], expected_multiple: &[T],
1601 ) where Uniform<T>: Distribution<T> {
1602 let mut rng = crate::test::rng(897);
1603 let mut buf = [lb; 3];
1604
1605 for x in &mut buf {
1606 *x = T::Sampler::sample_single(lb, ub, &mut rng);
1607 }
1608 assert_eq!(&buf, expected_single);
1609
1610 let distr = Uniform::new(lb, ub);
1611 for x in &mut buf {
1612 *x = rng.sample(&distr);
1613 }
1614 assert_eq!(&buf, expected_multiple);
1615 }
1616
1617 // We test on a sub-set of types; possibly we should do more.
1618 // TODO: SIMD types
1619
1620 test_samples(11u8, 219, &[17, 66, 214], &[181, 93, 165]);
1621 test_samples(11u32, 219, &[17, 66, 214], &[181, 93, 165]);
1622
1623 test_samples(0f32, 1e-2f32, &[0.0003070104, 0.0026630748, 0.00979833], &[
1624 0.008194133,
1625 0.00398172,
1626 0.007428536,
1627 ]);
1628 test_samples(
1629 -1e10f64,
1630 1e10f64,
1631 &[-4673848682.871551, 6388267422.932352, 4857075081.198343],
1632 &[1173375212.1808167, 1917642852.109581, 2365076174.3153973],
1633 );
1634
1635 test_samples(
1636 Duration::new(2, 0),
1637 Duration::new(4, 0),
1638 &[
1639 Duration::new(2, 532615131),
1640 Duration::new(3, 638826742),
1641 Duration::new(3, 485707508),
1642 ],
1643 &[
1644 Duration::new(3, 117337521),
1645 Duration::new(3, 191764285),
1646 Duration::new(3, 236507617),
1647 ],
1648 );
1649 }
1650
1651 #[test]
1652 fn uniform_distributions_can_be_compared() {
1653 assert_eq!(Uniform::new(1.0, 2.0), Uniform::new(1.0, 2.0));
1654
1655 // To cover UniformInt
1656 assert_eq!(Uniform::new(1 as u32, 2 as u32), Uniform::new(1 as u32, 2 as u32));
1657 }
1658}
1659