1 | /*! |
2 | A collection of bounded numeric types. |
3 | |
4 | Includes: |
5 | |
6 | - [`FiniteF32`] |
7 | - [`FiniteF64`] |
8 | - [`NonZeroPositiveF32`] |
9 | - [`NonZeroPositiveF64`] |
10 | - [`PositiveF32`] |
11 | - [`PositiveF64`] |
12 | - [`NormalizedF32`] |
13 | - [`NormalizedF64`] |
14 | |
15 | Unlike `f32`/`f64`, all float types implement `Ord`, `PartialOrd` and `Hash`, |
16 | since it's guaranteed that they all are finite. |
17 | */ |
18 | |
19 | #![no_std ] |
20 | #![deny (missing_docs)] |
21 | #![deny (missing_copy_implementations)] |
22 | #![deny (missing_debug_implementations)] |
23 | |
24 | macro_rules! impl_display { |
25 | ($t:ident) => { |
26 | impl core::fmt::Display for $t { |
27 | #[inline] |
28 | fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { |
29 | write!(f, "{}" , self.get()) |
30 | } |
31 | } |
32 | }; |
33 | } |
34 | |
35 | #[cfg (feature = "approx-eq" )] |
36 | pub use float_cmp::{ApproxEq, ApproxEqUlps, Ulps}; |
37 | |
38 | #[cfg (feature = "approx-eq" )] |
39 | macro_rules! impl_approx_32 { |
40 | ($t:ident) => { |
41 | impl float_cmp::ApproxEq for $t { |
42 | type Margin = float_cmp::F32Margin; |
43 | |
44 | #[inline] |
45 | fn approx_eq<M: Into<Self::Margin>>(self, other: Self, margin: M) -> bool { |
46 | self.0.approx_eq(other.0, margin) |
47 | } |
48 | } |
49 | |
50 | impl float_cmp::ApproxEqUlps for $t { |
51 | type Flt = f32; |
52 | |
53 | #[inline] |
54 | fn approx_eq_ulps(&self, other: &Self, ulps: i32) -> bool { |
55 | self.0.approx_eq_ulps(&other.0, ulps) |
56 | } |
57 | } |
58 | }; |
59 | } |
60 | |
61 | #[cfg (not(feature = "approx-eq" ))] |
62 | macro_rules! impl_approx_32 { |
63 | ($t:ident) => {}; |
64 | } |
65 | |
66 | #[cfg (feature = "approx-eq" )] |
67 | macro_rules! impl_approx_64 { |
68 | ($t:ident) => { |
69 | #[cfg(feature = "approx-eq" )] |
70 | impl float_cmp::ApproxEq for $t { |
71 | type Margin = float_cmp::F64Margin; |
72 | |
73 | #[inline] |
74 | fn approx_eq<M: Into<Self::Margin>>(self, other: Self, margin: M) -> bool { |
75 | self.0.approx_eq(other.0, margin) |
76 | } |
77 | } |
78 | |
79 | #[cfg(feature = "approx-eq" )] |
80 | impl float_cmp::ApproxEqUlps for $t { |
81 | type Flt = f64; |
82 | |
83 | #[inline] |
84 | fn approx_eq_ulps(&self, other: &Self, ulps: i64) -> bool { |
85 | self.0.approx_eq_ulps(&other.0, ulps) |
86 | } |
87 | } |
88 | }; |
89 | } |
90 | |
91 | #[cfg (not(feature = "approx-eq" ))] |
92 | macro_rules! impl_approx_64 { |
93 | ($t:ident) => {}; |
94 | } |
95 | |
96 | /// An immutable, finite `f32`. |
97 | /// |
98 | /// Unlike `f32`, implements `Ord`, `PartialOrd` and `Hash`. |
99 | #[derive (Copy, Clone, Default, Debug)] |
100 | #[repr (transparent)] |
101 | pub struct FiniteF32(f32); |
102 | |
103 | impl FiniteF32 { |
104 | /// Creates a finite `f32`. |
105 | /// |
106 | /// Returns `None` for NaN and infinity. |
107 | #[inline ] |
108 | pub fn new(n: f32) -> Option<Self> { |
109 | if n.is_finite() { |
110 | Some(FiniteF32(n)) |
111 | } else { |
112 | None |
113 | } |
114 | } |
115 | |
116 | /// Creates a finite `f32` without checking the value. |
117 | /// |
118 | /// # Safety |
119 | /// |
120 | /// `n` must be finite. |
121 | #[inline ] |
122 | pub const unsafe fn new_unchecked(n: f32) -> Self { |
123 | FiniteF32(n) |
124 | } |
125 | |
126 | /// Returns the value as a primitive type. |
127 | #[inline ] |
128 | pub const fn get(&self) -> f32 { |
129 | self.0 |
130 | } |
131 | } |
132 | |
133 | impl Eq for FiniteF32 {} |
134 | |
135 | impl PartialEq for FiniteF32 { |
136 | #[inline ] |
137 | fn eq(&self, other: &Self) -> bool { |
138 | self.0 == other.0 |
139 | } |
140 | } |
141 | |
142 | impl Ord for FiniteF32 { |
143 | #[inline ] |
144 | fn cmp(&self, other: &Self) -> core::cmp::Ordering { |
145 | if self.0 < other.0 { |
146 | core::cmp::Ordering::Less |
147 | } else if self.0 > other.0 { |
148 | core::cmp::Ordering::Greater |
149 | } else { |
150 | core::cmp::Ordering::Equal |
151 | } |
152 | } |
153 | } |
154 | |
155 | impl PartialOrd for FiniteF32 { |
156 | #[inline ] |
157 | fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> { |
158 | Some(self.cmp(other)) |
159 | } |
160 | } |
161 | |
162 | impl core::hash::Hash for FiniteF32 { |
163 | #[inline ] |
164 | fn hash<H: core::hash::Hasher>(&self, state: &mut H) { |
165 | self.0.to_bits().hash(state); |
166 | } |
167 | } |
168 | |
169 | impl PartialEq<f32> for FiniteF32 { |
170 | #[inline ] |
171 | fn eq(&self, other: &f32) -> bool { |
172 | self.get() == *other |
173 | } |
174 | } |
175 | |
176 | impl_display!(FiniteF32); |
177 | impl_approx_32!(FiniteF32); |
178 | |
179 | /// An immutable, finite `f64`. |
180 | /// |
181 | /// Unlike `f64`, implements `Ord`, `PartialOrd` and `Hash`. |
182 | #[derive (Copy, Clone, Default, Debug)] |
183 | #[repr (transparent)] |
184 | pub struct FiniteF64(f64); |
185 | |
186 | impl FiniteF64 { |
187 | /// Creates a finite `f64`. |
188 | /// |
189 | /// Returns `None` for NaN and infinity. |
190 | #[inline ] |
191 | pub fn new(n: f64) -> Option<Self> { |
192 | if n.is_finite() { |
193 | Some(FiniteF64(n)) |
194 | } else { |
195 | None |
196 | } |
197 | } |
198 | |
199 | /// Creates a finite `f64` without checking the value. |
200 | /// |
201 | /// # Safety |
202 | /// |
203 | /// `n` must be finite. |
204 | #[inline ] |
205 | pub const unsafe fn new_unchecked(n: f64) -> Self { |
206 | FiniteF64(n) |
207 | } |
208 | |
209 | /// Returns the value as a primitive type. |
210 | #[inline ] |
211 | pub const fn get(&self) -> f64 { |
212 | self.0 |
213 | } |
214 | } |
215 | |
216 | impl Eq for FiniteF64 {} |
217 | |
218 | impl PartialEq for FiniteF64 { |
219 | #[inline ] |
220 | fn eq(&self, other: &Self) -> bool { |
221 | self.0 == other.0 |
222 | } |
223 | } |
224 | |
225 | impl Ord for FiniteF64 { |
226 | #[inline ] |
227 | fn cmp(&self, other: &Self) -> core::cmp::Ordering { |
228 | if self.0 < other.0 { |
229 | core::cmp::Ordering::Less |
230 | } else if self.0 > other.0 { |
231 | core::cmp::Ordering::Greater |
232 | } else { |
233 | core::cmp::Ordering::Equal |
234 | } |
235 | } |
236 | } |
237 | |
238 | impl PartialOrd for FiniteF64 { |
239 | #[inline ] |
240 | fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> { |
241 | Some(self.cmp(other)) |
242 | } |
243 | } |
244 | |
245 | impl core::hash::Hash for FiniteF64 { |
246 | #[inline ] |
247 | fn hash<H: core::hash::Hasher>(&self, state: &mut H) { |
248 | self.0.to_bits().hash(state); |
249 | } |
250 | } |
251 | |
252 | impl PartialEq<f64> for FiniteF64 { |
253 | #[inline ] |
254 | fn eq(&self, other: &f64) -> bool { |
255 | self.get() == *other |
256 | } |
257 | } |
258 | |
259 | impl_display!(FiniteF64); |
260 | impl_approx_64!(FiniteF64); |
261 | |
262 | /// An immutable, finite `f32` that is known to be >= 0. |
263 | #[derive (Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Default, Debug)] |
264 | #[repr (transparent)] |
265 | pub struct PositiveF32(FiniteF32); |
266 | |
267 | impl PositiveF32 { |
268 | /// A `PositiveF32` value initialized with zero. |
269 | pub const ZERO: Self = PositiveF32(FiniteF32(0.0)); |
270 | |
271 | /// Creates a new `PositiveF32` if the given value is >= 0. |
272 | /// |
273 | /// Returns `None` for negative, NaN and infinity. |
274 | #[inline ] |
275 | pub fn new(n: f32) -> Option<Self> { |
276 | if n.is_finite() && n >= 0.0 { |
277 | Some(PositiveF32(FiniteF32(n))) |
278 | } else { |
279 | None |
280 | } |
281 | } |
282 | |
283 | /// Creates a new `PositiveF32` without checking the value. |
284 | /// |
285 | /// # Safety |
286 | /// |
287 | /// `n` must be finite and >= 0. |
288 | #[inline ] |
289 | pub const unsafe fn new_unchecked(n: f32) -> Self { |
290 | PositiveF32(FiniteF32(n)) |
291 | } |
292 | |
293 | /// Returns the value as a primitive type. |
294 | #[inline ] |
295 | pub const fn get(&self) -> f32 { |
296 | self.0.get() |
297 | } |
298 | |
299 | /// Returns the value as a `FiniteF32`. |
300 | #[inline ] |
301 | pub const fn get_finite(&self) -> FiniteF32 { |
302 | self.0 |
303 | } |
304 | } |
305 | |
306 | impl PartialEq<f32> for PositiveF32 { |
307 | #[inline ] |
308 | fn eq(&self, other: &f32) -> bool { |
309 | self.get() == *other |
310 | } |
311 | } |
312 | |
313 | impl_display!(PositiveF32); |
314 | impl_approx_32!(PositiveF32); |
315 | |
316 | /// An immutable, finite `f64` that is known to be >= 0. |
317 | #[derive (Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Default, Debug)] |
318 | #[repr (transparent)] |
319 | pub struct PositiveF64(FiniteF64); |
320 | |
321 | impl PositiveF64 { |
322 | /// A `PositiveF64` value initialized with zero. |
323 | pub const ZERO: Self = PositiveF64(FiniteF64(0.0)); |
324 | |
325 | /// Creates a new `PositiveF64` if the given value is >= 0. |
326 | /// |
327 | /// Returns `None` for negative, NaN and infinity. |
328 | #[inline ] |
329 | pub fn new(n: f64) -> Option<Self> { |
330 | if n.is_finite() && n >= 0.0 { |
331 | Some(PositiveF64(FiniteF64(n))) |
332 | } else { |
333 | None |
334 | } |
335 | } |
336 | |
337 | /// Creates a new `PositiveF64` without checking the value. |
338 | /// |
339 | /// # Safety |
340 | /// |
341 | /// `n` must be finite and >= 0. |
342 | #[inline ] |
343 | pub const unsafe fn new_unchecked(n: f64) -> Self { |
344 | PositiveF64(FiniteF64(n)) |
345 | } |
346 | |
347 | /// Returns the value as a primitive type. |
348 | #[inline ] |
349 | pub const fn get(&self) -> f64 { |
350 | self.0.get() |
351 | } |
352 | |
353 | /// Returns the value as a `FiniteF64`. |
354 | #[inline ] |
355 | pub const fn get_finite(&self) -> FiniteF64 { |
356 | self.0 |
357 | } |
358 | } |
359 | |
360 | impl PartialEq<f64> for PositiveF64 { |
361 | #[inline ] |
362 | fn eq(&self, other: &f64) -> bool { |
363 | self.get() == *other |
364 | } |
365 | } |
366 | |
367 | impl_display!(PositiveF64); |
368 | impl_approx_64!(PositiveF64); |
369 | |
370 | /// An immutable, finite `f32` that is known to be > 0. |
371 | #[derive (Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)] |
372 | #[repr (transparent)] |
373 | pub struct NonZeroPositiveF32(FiniteF32); |
374 | |
375 | impl NonZeroPositiveF32 { |
376 | /// Creates a new `NonZeroPositiveF32` if the given value is > 0. |
377 | /// |
378 | /// Returns `None` for negative, zero, NaN and infinity. |
379 | #[inline ] |
380 | pub fn new(n: f32) -> Option<Self> { |
381 | if n.is_finite() && n > 0.0 { |
382 | Some(NonZeroPositiveF32(FiniteF32(n))) |
383 | } else { |
384 | None |
385 | } |
386 | } |
387 | |
388 | /// Creates a new `NonZeroPositiveF32` without checking the value. |
389 | /// |
390 | /// # Safety |
391 | /// |
392 | /// `n` must be finite and > 0. |
393 | #[inline ] |
394 | pub const unsafe fn new_unchecked(n: f32) -> Self { |
395 | NonZeroPositiveF32(FiniteF32(n)) |
396 | } |
397 | |
398 | /// Returns the value as a primitive type. |
399 | #[inline ] |
400 | pub const fn get(&self) -> f32 { |
401 | self.0.get() |
402 | } |
403 | |
404 | /// Returns the value as a `FiniteF32`. |
405 | #[inline ] |
406 | pub const fn get_finite(&self) -> FiniteF32 { |
407 | self.0 |
408 | } |
409 | } |
410 | |
411 | impl PartialEq<f32> for NonZeroPositiveF32 { |
412 | #[inline ] |
413 | fn eq(&self, other: &f32) -> bool { |
414 | self.get() == *other |
415 | } |
416 | } |
417 | |
418 | impl_display!(NonZeroPositiveF32); |
419 | impl_approx_32!(NonZeroPositiveF32); |
420 | |
421 | /// An immutable, finite `f64` that is known to be > 0. |
422 | #[derive (Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)] |
423 | #[repr (transparent)] |
424 | pub struct NonZeroPositiveF64(FiniteF64); |
425 | |
426 | impl NonZeroPositiveF64 { |
427 | /// Creates a new `NonZeroPositiveF64` if the given value is > 0. |
428 | /// |
429 | /// Returns `None` for negative, zero, NaN and infinity. |
430 | #[inline ] |
431 | pub fn new(n: f64) -> Option<Self> { |
432 | if n.is_finite() && n > 0.0 { |
433 | Some(NonZeroPositiveF64(FiniteF64(n))) |
434 | } else { |
435 | None |
436 | } |
437 | } |
438 | |
439 | /// Creates a new `NonZeroPositiveF64` without checking the value. |
440 | /// |
441 | /// # Safety |
442 | /// |
443 | /// `n` must be finite and > 0. |
444 | #[inline ] |
445 | pub const unsafe fn new_unchecked(n: f64) -> Self { |
446 | NonZeroPositiveF64(FiniteF64(n)) |
447 | } |
448 | |
449 | /// Returns the value as a primitive type. |
450 | #[inline ] |
451 | pub const fn get(&self) -> f64 { |
452 | self.0.get() |
453 | } |
454 | |
455 | /// Returns the value as a `FiniteF64`. |
456 | #[inline ] |
457 | pub const fn get_finite(&self) -> FiniteF64 { |
458 | self.0 |
459 | } |
460 | } |
461 | |
462 | impl PartialEq<f64> for NonZeroPositiveF64 { |
463 | #[inline ] |
464 | fn eq(&self, other: &f64) -> bool { |
465 | self.get() == *other |
466 | } |
467 | } |
468 | |
469 | impl_display!(NonZeroPositiveF64); |
470 | impl_approx_64!(NonZeroPositiveF64); |
471 | |
472 | /// An immutable, finite `f32` in a 0..=1 range. |
473 | #[derive (Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)] |
474 | #[repr (transparent)] |
475 | pub struct NormalizedF32(FiniteF32); |
476 | |
477 | impl NormalizedF32 { |
478 | /// A `NormalizedF32` value initialized with zero. |
479 | pub const ZERO: Self = NormalizedF32(FiniteF32(0.0)); |
480 | /// A `NormalizedF32` value initialized with one. |
481 | pub const ONE: Self = NormalizedF32(FiniteF32(1.0)); |
482 | |
483 | /// Creates a `NormalizedF32` if the given value is in a 0..=1 range. |
484 | #[inline ] |
485 | pub fn new(n: f32) -> Option<Self> { |
486 | if n.is_finite() && n >= 0.0 && n <= 1.0 { |
487 | Some(NormalizedF32(FiniteF32(n))) |
488 | } else { |
489 | None |
490 | } |
491 | } |
492 | |
493 | /// Creates a new `NormalizedF32` without checking the value. |
494 | /// |
495 | /// # Safety |
496 | /// |
497 | /// `n` must be in 0..=1 range. |
498 | #[inline ] |
499 | pub const unsafe fn new_unchecked(n: f32) -> Self { |
500 | NormalizedF32(FiniteF32(n)) |
501 | } |
502 | |
503 | /// Creates a `NormalizedF32` clamping the given value to a 0..=1 range. |
504 | /// |
505 | /// Returns zero in case of NaN or infinity. |
506 | #[inline ] |
507 | pub fn new_clamped(n: f32) -> Self { |
508 | if n.is_finite() { |
509 | NormalizedF32(FiniteF32(clamp_f32(0.0, n, 1.0))) |
510 | } else { |
511 | Self::ZERO |
512 | } |
513 | } |
514 | |
515 | /// Creates a `NormalizedF32` by dividing the given value by 255. |
516 | #[inline ] |
517 | pub fn new_u8(n: u8) -> Self { |
518 | NormalizedF32(FiniteF32(f32::from(n) / 255.0)) |
519 | } |
520 | |
521 | /// Creates a `NormalizedF64` by dividing the given value by 65535. |
522 | #[inline ] |
523 | pub fn new_u16(n: u16) -> Self { |
524 | NormalizedF32(FiniteF32(f32::from(n) / 65535.0)) |
525 | } |
526 | |
527 | /// Returns the value as a primitive type. |
528 | #[inline ] |
529 | pub const fn get(self) -> f32 { |
530 | self.0.get() |
531 | } |
532 | |
533 | /// Returns the value as a `FiniteF32`. |
534 | #[inline ] |
535 | pub const fn get_finite(&self) -> FiniteF32 { |
536 | self.0 |
537 | } |
538 | |
539 | /// Returns the value as a `u8`. |
540 | #[inline ] |
541 | pub fn to_u8(&self) -> u8 { |
542 | ((self.0).0 * 255.0 + 0.5) as u8 |
543 | } |
544 | |
545 | /// Returns the value as a `u16`. |
546 | #[inline ] |
547 | pub fn to_u16(&self) -> u16 { |
548 | ((self.0).0 * 65535.0 + 0.5) as u16 |
549 | } |
550 | } |
551 | |
552 | impl core::ops::Mul<NormalizedF32> for NormalizedF32 { |
553 | type Output = Self; |
554 | |
555 | #[inline ] |
556 | fn mul(self, rhs: Self) -> Self::Output { |
557 | Self::new_clamped((self.0).0 * (rhs.0).0) |
558 | } |
559 | } |
560 | |
561 | impl PartialEq<f32> for NormalizedF32 { |
562 | #[inline ] |
563 | fn eq(&self, other: &f32) -> bool { |
564 | self.get() == *other |
565 | } |
566 | } |
567 | |
568 | impl_display!(NormalizedF32); |
569 | impl_approx_32!(NormalizedF32); |
570 | |
571 | /// An immutable, finite `f64` in a 0..=1 range. |
572 | #[derive (Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)] |
573 | #[repr (transparent)] |
574 | pub struct NormalizedF64(FiniteF64); |
575 | |
576 | impl NormalizedF64 { |
577 | /// A `NormalizedF64` value initialized with zero. |
578 | pub const ZERO: Self = NormalizedF64(FiniteF64(0.0)); |
579 | /// A `NormalizedF64` value initialized with one. |
580 | pub const ONE: Self = NormalizedF64(FiniteF64(1.0)); |
581 | |
582 | /// Creates a `NormalizedF64` if the given value is in a 0..=1 range. |
583 | #[inline ] |
584 | pub fn new(n: f64) -> Option<Self> { |
585 | if n >= 0.0 && n <= 1.0 { |
586 | Some(NormalizedF64(FiniteF64(n))) |
587 | } else { |
588 | None |
589 | } |
590 | } |
591 | |
592 | /// Creates a new `NormalizedF64` without checking the value. |
593 | /// |
594 | /// # Safety |
595 | /// |
596 | /// `n` must be in 0..=1 range. |
597 | #[inline ] |
598 | pub const unsafe fn new_unchecked(n: f64) -> Self { |
599 | NormalizedF64(FiniteF64(n)) |
600 | } |
601 | |
602 | /// Creates a `NormalizedF64` clamping the given value to a 0..=1 range. |
603 | /// |
604 | /// Returns zero in case of NaN or infinity. |
605 | #[inline ] |
606 | pub fn new_clamped(n: f64) -> Self { |
607 | if n.is_finite() { |
608 | NormalizedF64(FiniteF64(clamp_f64(0.0, n, 1.0))) |
609 | } else { |
610 | Self::ZERO |
611 | } |
612 | } |
613 | |
614 | /// Creates a `NormalizedF64` by dividing the given value by 255. |
615 | #[inline ] |
616 | pub fn new_u8(n: u8) -> Self { |
617 | NormalizedF64(FiniteF64(f64::from(n) / 255.0)) |
618 | } |
619 | |
620 | /// Creates a `NormalizedF64` by dividing the given value by 65535. |
621 | #[inline ] |
622 | pub fn new_u16(n: u16) -> Self { |
623 | NormalizedF64(FiniteF64(f64::from(n) / 65535.0)) |
624 | } |
625 | |
626 | /// Returns the value as a primitive type. |
627 | #[inline ] |
628 | pub const fn get(self) -> f64 { |
629 | self.0.get() |
630 | } |
631 | |
632 | /// Returns the value as a `FiniteF64`. |
633 | #[inline ] |
634 | pub const fn get_finite(&self) -> FiniteF64 { |
635 | self.0 |
636 | } |
637 | |
638 | /// Returns the value as a `u8`. |
639 | #[inline ] |
640 | pub fn to_u8(&self) -> u8 { |
641 | ((self.0).0 * 255.0 + 0.5) as u8 |
642 | } |
643 | |
644 | /// Returns the value as a `u16`. |
645 | #[inline ] |
646 | pub fn to_u16(&self) -> u16 { |
647 | ((self.0).0 * 65535.0 + 0.5) as u16 |
648 | } |
649 | } |
650 | |
651 | impl core::ops::Mul<NormalizedF64> for NormalizedF64 { |
652 | type Output = Self; |
653 | |
654 | #[inline ] |
655 | fn mul(self, rhs: Self) -> Self::Output { |
656 | Self::new_clamped((self.0).0 * (rhs.0).0) |
657 | } |
658 | } |
659 | |
660 | impl PartialEq<f64> for NormalizedF64 { |
661 | #[inline ] |
662 | fn eq(&self, other: &f64) -> bool { |
663 | self.get() == *other |
664 | } |
665 | } |
666 | |
667 | impl_display!(NormalizedF64); |
668 | impl_approx_64!(NormalizedF64); |
669 | |
670 | #[inline ] |
671 | fn clamp_f32(min: f32, val: f32, max: f32) -> f32 { |
672 | max.min(val).max(min) |
673 | } |
674 | |
675 | #[inline ] |
676 | fn clamp_f64(min: f64, val: f64, max: f64) -> f64 { |
677 | max.min(val).max(min) |
678 | } |
679 | |
680 | #[cfg (test)] |
681 | mod tests { |
682 | use super::*; |
683 | |
684 | #[test ] |
685 | fn finite_f32() { |
686 | assert_eq!(FiniteF32::new(0.0).map(|n| n.get()), Some(0.0)); |
687 | assert_eq!(FiniteF32::new(core::f32::NAN), None); |
688 | assert_eq!(FiniteF32::new(core::f32::INFINITY), None); |
689 | assert_eq!(FiniteF32::new(core::f32::NEG_INFINITY), None); |
690 | } |
691 | |
692 | #[test ] |
693 | fn positive_f32() { |
694 | assert_eq!(NonZeroPositiveF32::new(-1.0).map(|n| n.get()), None); |
695 | assert_eq!(NonZeroPositiveF32::new(0.0).map(|n| n.get()), None); |
696 | assert_eq!(NonZeroPositiveF32::new(1.0).map(|n| n.get()), Some(1.0)); |
697 | assert_eq!( |
698 | NonZeroPositiveF32::new(core::f32::EPSILON).map(|n| n.get()), |
699 | Some(core::f32::EPSILON) |
700 | ); |
701 | assert_eq!( |
702 | NonZeroPositiveF32::new(-core::f32::EPSILON).map(|n| n.get()), |
703 | None |
704 | ); |
705 | assert_eq!(NonZeroPositiveF32::new(core::f32::NAN), None); |
706 | assert_eq!(NonZeroPositiveF32::new(core::f32::INFINITY), None); |
707 | assert_eq!(NonZeroPositiveF32::new(core::f32::NEG_INFINITY), None); |
708 | } |
709 | |
710 | #[test ] |
711 | fn positive_f64() { |
712 | assert_eq!(NonZeroPositiveF32::new(-1.0).map(|n| n.get()), None); |
713 | assert_eq!(NonZeroPositiveF64::new(0.0).map(|n| n.get()), None); |
714 | assert_eq!(NonZeroPositiveF64::new(1.0).map(|n| n.get()), Some(1.0)); |
715 | assert_eq!( |
716 | NonZeroPositiveF64::new(core::f64::EPSILON).map(|n| n.get()), |
717 | Some(core::f64::EPSILON) |
718 | ); |
719 | assert_eq!( |
720 | NonZeroPositiveF64::new(-core::f64::EPSILON).map(|n| n.get()), |
721 | None |
722 | ); |
723 | assert_eq!(NonZeroPositiveF64::new(core::f64::NAN), None); |
724 | assert_eq!(NonZeroPositiveF64::new(core::f64::INFINITY), None); |
725 | assert_eq!(NonZeroPositiveF64::new(core::f64::NEG_INFINITY), None); |
726 | } |
727 | |
728 | #[test ] |
729 | fn norm_f32() { |
730 | assert_eq!(NormalizedF32::new(-0.5), None); |
731 | assert_eq!( |
732 | NormalizedF32::new(-core::f32::EPSILON).map(|n| n.get()), |
733 | None |
734 | ); |
735 | assert_eq!(NormalizedF32::new(0.0).map(|n| n.get()), Some(0.0)); |
736 | assert_eq!(NormalizedF32::new(0.5).map(|n| n.get()), Some(0.5)); |
737 | assert_eq!(NormalizedF32::new(1.0).map(|n| n.get()), Some(1.0)); |
738 | assert_eq!(NormalizedF32::new(1.5), None); |
739 | assert_eq!(NormalizedF32::new(core::f32::NAN), None); |
740 | assert_eq!(NormalizedF32::new(core::f32::INFINITY), None); |
741 | assert_eq!(NormalizedF32::new(core::f32::NEG_INFINITY), None); |
742 | } |
743 | |
744 | #[test ] |
745 | fn clamped_norm_f32() { |
746 | assert_eq!(NormalizedF32::new_clamped(-0.5).get(), 0.0); |
747 | assert_eq!(NormalizedF32::new_clamped(0.5).get(), 0.5); |
748 | assert_eq!(NormalizedF32::new_clamped(1.5).get(), 1.0); |
749 | assert_eq!(NormalizedF32::new_clamped(core::f32::NAN).get(), 0.0); |
750 | assert_eq!(NormalizedF32::new_clamped(core::f32::INFINITY).get(), 0.0); |
751 | assert_eq!( |
752 | NormalizedF32::new_clamped(core::f32::NEG_INFINITY).get(), |
753 | 0.0 |
754 | ); |
755 | } |
756 | |
757 | #[test ] |
758 | fn norm_f64() { |
759 | assert_eq!(NormalizedF64::new(-0.5), None); |
760 | assert_eq!( |
761 | NormalizedF64::new(-core::f64::EPSILON).map(|n| n.get()), |
762 | None |
763 | ); |
764 | assert_eq!(NormalizedF64::new(0.0).map(|n| n.get()), Some(0.0)); |
765 | assert_eq!(NormalizedF64::new(0.5).map(|n| n.get()), Some(0.5)); |
766 | assert_eq!(NormalizedF64::new(1.0).map(|n| n.get()), Some(1.0)); |
767 | assert_eq!(NormalizedF64::new(1.5), None); |
768 | assert_eq!(NormalizedF64::new(core::f64::NAN), None); |
769 | assert_eq!(NormalizedF64::new(core::f64::INFINITY), None); |
770 | assert_eq!(NormalizedF64::new(core::f64::NEG_INFINITY), None); |
771 | } |
772 | |
773 | #[test ] |
774 | fn clamped_norm_f64() { |
775 | assert_eq!(NormalizedF64::new_clamped(-0.5).get(), 0.0); |
776 | assert_eq!(NormalizedF64::new_clamped(0.5).get(), 0.5); |
777 | assert_eq!(NormalizedF64::new_clamped(1.5).get(), 1.0); |
778 | assert_eq!(NormalizedF64::new_clamped(core::f64::NAN).get(), 0.0); |
779 | assert_eq!(NormalizedF64::new_clamped(core::f64::INFINITY).get(), 0.0); |
780 | assert_eq!( |
781 | NormalizedF64::new_clamped(core::f64::NEG_INFINITY).get(), |
782 | 0.0 |
783 | ); |
784 | } |
785 | } |
786 | |