1 | //! Constants for the `f16` half-precision floating point type. |
2 | //! |
3 | //! *[See also the `f16` primitive type][f16].* |
4 | //! |
5 | //! Mathematically significant numbers are provided in the `consts` sub-module. |
6 | //! |
7 | //! For the constants defined directly in this module |
8 | //! (as distinct from those defined in the `consts` sub-module), |
9 | //! new code should instead use the associated constants |
10 | //! defined directly on the `f16` type. |
11 | |
12 | #![unstable (feature = "f16" , issue = "116909" )] |
13 | |
14 | use crate::convert::FloatToInt; |
15 | use crate::num::FpCategory; |
16 | #[cfg (not(test))] |
17 | use crate::num::libm; |
18 | use crate::panic::const_assert; |
19 | use crate::{intrinsics, mem}; |
20 | |
21 | /// Basic mathematical constants. |
22 | #[unstable (feature = "f16" , issue = "116909" )] |
23 | pub mod consts { |
24 | // FIXME: replace with mathematical constants from cmath. |
25 | |
26 | /// Archimedes' constant (π) |
27 | #[unstable (feature = "f16" , issue = "116909" )] |
28 | pub const PI: f16 = 3.14159265358979323846264338327950288_f16; |
29 | |
30 | /// The full circle constant (τ) |
31 | /// |
32 | /// Equal to 2π. |
33 | #[unstable (feature = "f16" , issue = "116909" )] |
34 | pub const TAU: f16 = 6.28318530717958647692528676655900577_f16; |
35 | |
36 | /// The golden ratio (φ) |
37 | #[unstable (feature = "f16" , issue = "116909" )] |
38 | // Also, #[unstable(feature = "more_float_constants", issue = "103883")] |
39 | pub const PHI: f16 = 1.618033988749894848204586834365638118_f16; |
40 | |
41 | /// The Euler-Mascheroni constant (γ) |
42 | #[unstable (feature = "f16" , issue = "116909" )] |
43 | // Also, #[unstable(feature = "more_float_constants", issue = "103883")] |
44 | pub const EGAMMA: f16 = 0.577215664901532860606512090082402431_f16; |
45 | |
46 | /// π/2 |
47 | #[unstable (feature = "f16" , issue = "116909" )] |
48 | pub const FRAC_PI_2: f16 = 1.57079632679489661923132169163975144_f16; |
49 | |
50 | /// π/3 |
51 | #[unstable (feature = "f16" , issue = "116909" )] |
52 | pub const FRAC_PI_3: f16 = 1.04719755119659774615421446109316763_f16; |
53 | |
54 | /// π/4 |
55 | #[unstable (feature = "f16" , issue = "116909" )] |
56 | pub const FRAC_PI_4: f16 = 0.785398163397448309615660845819875721_f16; |
57 | |
58 | /// π/6 |
59 | #[unstable (feature = "f16" , issue = "116909" )] |
60 | pub const FRAC_PI_6: f16 = 0.52359877559829887307710723054658381_f16; |
61 | |
62 | /// π/8 |
63 | #[unstable (feature = "f16" , issue = "116909" )] |
64 | pub const FRAC_PI_8: f16 = 0.39269908169872415480783042290993786_f16; |
65 | |
66 | /// 1/π |
67 | #[unstable (feature = "f16" , issue = "116909" )] |
68 | pub const FRAC_1_PI: f16 = 0.318309886183790671537767526745028724_f16; |
69 | |
70 | /// 1/sqrt(π) |
71 | #[unstable (feature = "f16" , issue = "116909" )] |
72 | // Also, #[unstable(feature = "more_float_constants", issue = "103883")] |
73 | pub const FRAC_1_SQRT_PI: f16 = 0.564189583547756286948079451560772586_f16; |
74 | |
75 | /// 1/sqrt(2π) |
76 | #[doc (alias = "FRAC_1_SQRT_TAU" )] |
77 | #[unstable (feature = "f16" , issue = "116909" )] |
78 | // Also, #[unstable(feature = "more_float_constants", issue = "103883")] |
79 | pub const FRAC_1_SQRT_2PI: f16 = 0.398942280401432677939946059934381868_f16; |
80 | |
81 | /// 2/π |
82 | #[unstable (feature = "f16" , issue = "116909" )] |
83 | pub const FRAC_2_PI: f16 = 0.636619772367581343075535053490057448_f16; |
84 | |
85 | /// 2/sqrt(π) |
86 | #[unstable (feature = "f16" , issue = "116909" )] |
87 | pub const FRAC_2_SQRT_PI: f16 = 1.12837916709551257389615890312154517_f16; |
88 | |
89 | /// sqrt(2) |
90 | #[unstable (feature = "f16" , issue = "116909" )] |
91 | pub const SQRT_2: f16 = 1.41421356237309504880168872420969808_f16; |
92 | |
93 | /// 1/sqrt(2) |
94 | #[unstable (feature = "f16" , issue = "116909" )] |
95 | pub const FRAC_1_SQRT_2: f16 = 0.707106781186547524400844362104849039_f16; |
96 | |
97 | /// sqrt(3) |
98 | #[unstable (feature = "f16" , issue = "116909" )] |
99 | // Also, #[unstable(feature = "more_float_constants", issue = "103883")] |
100 | pub const SQRT_3: f16 = 1.732050807568877293527446341505872367_f16; |
101 | |
102 | /// 1/sqrt(3) |
103 | #[unstable (feature = "f16" , issue = "116909" )] |
104 | // Also, #[unstable(feature = "more_float_constants", issue = "103883")] |
105 | pub const FRAC_1_SQRT_3: f16 = 0.577350269189625764509148780501957456_f16; |
106 | |
107 | /// Euler's number (e) |
108 | #[unstable (feature = "f16" , issue = "116909" )] |
109 | pub const E: f16 = 2.71828182845904523536028747135266250_f16; |
110 | |
111 | /// log<sub>2</sub>(10) |
112 | #[unstable (feature = "f16" , issue = "116909" )] |
113 | pub const LOG2_10: f16 = 3.32192809488736234787031942948939018_f16; |
114 | |
115 | /// log<sub>2</sub>(e) |
116 | #[unstable (feature = "f16" , issue = "116909" )] |
117 | pub const LOG2_E: f16 = 1.44269504088896340735992468100189214_f16; |
118 | |
119 | /// log<sub>10</sub>(2) |
120 | #[unstable (feature = "f16" , issue = "116909" )] |
121 | pub const LOG10_2: f16 = 0.301029995663981195213738894724493027_f16; |
122 | |
123 | /// log<sub>10</sub>(e) |
124 | #[unstable (feature = "f16" , issue = "116909" )] |
125 | pub const LOG10_E: f16 = 0.434294481903251827651128918916605082_f16; |
126 | |
127 | /// ln(2) |
128 | #[unstable (feature = "f16" , issue = "116909" )] |
129 | pub const LN_2: f16 = 0.693147180559945309417232121458176568_f16; |
130 | |
131 | /// ln(10) |
132 | #[unstable (feature = "f16" , issue = "116909" )] |
133 | pub const LN_10: f16 = 2.30258509299404568401799145468436421_f16; |
134 | } |
135 | |
136 | impl f16 { |
137 | // FIXME(f16_f128): almost all methods in this `impl` are missing examples and a const |
138 | // implementation. Add these once we can run code on all platforms and have f16/f128 in CTFE. |
139 | |
140 | /// The radix or base of the internal representation of `f16`. |
141 | #[unstable (feature = "f16" , issue = "116909" )] |
142 | pub const RADIX: u32 = 2; |
143 | |
144 | /// Number of significant digits in base 2. |
145 | /// |
146 | /// Note that the size of the mantissa in the bitwise representation is one |
147 | /// smaller than this since the leading 1 is not stored explicitly. |
148 | #[unstable (feature = "f16" , issue = "116909" )] |
149 | pub const MANTISSA_DIGITS: u32 = 11; |
150 | |
151 | /// Approximate number of significant digits in base 10. |
152 | /// |
153 | /// This is the maximum <i>x</i> such that any decimal number with <i>x</i> |
154 | /// significant digits can be converted to `f16` and back without loss. |
155 | /// |
156 | /// Equal to floor(log<sub>10</sub> 2<sup>[`MANTISSA_DIGITS`] − 1</sup>). |
157 | /// |
158 | /// [`MANTISSA_DIGITS`]: f16::MANTISSA_DIGITS |
159 | #[unstable (feature = "f16" , issue = "116909" )] |
160 | pub const DIGITS: u32 = 3; |
161 | |
162 | /// [Machine epsilon] value for `f16`. |
163 | /// |
164 | /// This is the difference between `1.0` and the next larger representable number. |
165 | /// |
166 | /// Equal to 2<sup>1 − [`MANTISSA_DIGITS`]</sup>. |
167 | /// |
168 | /// [Machine epsilon]: https://en.wikipedia.org/wiki/Machine_epsilon |
169 | /// [`MANTISSA_DIGITS`]: f16::MANTISSA_DIGITS |
170 | #[unstable (feature = "f16" , issue = "116909" )] |
171 | #[rustc_diagnostic_item = "f16_epsilon" ] |
172 | pub const EPSILON: f16 = 9.7656e-4_f16; |
173 | |
174 | /// Smallest finite `f16` value. |
175 | /// |
176 | /// Equal to −[`MAX`]. |
177 | /// |
178 | /// [`MAX`]: f16::MAX |
179 | #[unstable (feature = "f16" , issue = "116909" )] |
180 | pub const MIN: f16 = -6.5504e+4_f16; |
181 | /// Smallest positive normal `f16` value. |
182 | /// |
183 | /// Equal to 2<sup>[`MIN_EXP`] − 1</sup>. |
184 | /// |
185 | /// [`MIN_EXP`]: f16::MIN_EXP |
186 | #[unstable (feature = "f16" , issue = "116909" )] |
187 | pub const MIN_POSITIVE: f16 = 6.1035e-5_f16; |
188 | /// Largest finite `f16` value. |
189 | /// |
190 | /// Equal to |
191 | /// (1 − 2<sup>−[`MANTISSA_DIGITS`]</sup>) 2<sup>[`MAX_EXP`]</sup>. |
192 | /// |
193 | /// [`MANTISSA_DIGITS`]: f16::MANTISSA_DIGITS |
194 | /// [`MAX_EXP`]: f16::MAX_EXP |
195 | #[unstable (feature = "f16" , issue = "116909" )] |
196 | pub const MAX: f16 = 6.5504e+4_f16; |
197 | |
198 | /// One greater than the minimum possible *normal* power of 2 exponent |
199 | /// for a significand bounded by 1 ≤ x < 2 (i.e. the IEEE definition). |
200 | /// |
201 | /// This corresponds to the exact minimum possible *normal* power of 2 exponent |
202 | /// for a significand bounded by 0.5 ≤ x < 1 (i.e. the C definition). |
203 | /// In other words, all normal numbers representable by this type are |
204 | /// greater than or equal to 0.5 × 2<sup><i>MIN_EXP</i></sup>. |
205 | #[unstable (feature = "f16" , issue = "116909" )] |
206 | pub const MIN_EXP: i32 = -13; |
207 | /// One greater than the maximum possible power of 2 exponent |
208 | /// for a significand bounded by 1 ≤ x < 2 (i.e. the IEEE definition). |
209 | /// |
210 | /// This corresponds to the exact maximum possible power of 2 exponent |
211 | /// for a significand bounded by 0.5 ≤ x < 1 (i.e. the C definition). |
212 | /// In other words, all numbers representable by this type are |
213 | /// strictly less than 2<sup><i>MAX_EXP</i></sup>. |
214 | #[unstable (feature = "f16" , issue = "116909" )] |
215 | pub const MAX_EXP: i32 = 16; |
216 | |
217 | /// Minimum <i>x</i> for which 10<sup><i>x</i></sup> is normal. |
218 | /// |
219 | /// Equal to ceil(log<sub>10</sub> [`MIN_POSITIVE`]). |
220 | /// |
221 | /// [`MIN_POSITIVE`]: f16::MIN_POSITIVE |
222 | #[unstable (feature = "f16" , issue = "116909" )] |
223 | pub const MIN_10_EXP: i32 = -4; |
224 | /// Maximum <i>x</i> for which 10<sup><i>x</i></sup> is normal. |
225 | /// |
226 | /// Equal to floor(log<sub>10</sub> [`MAX`]). |
227 | /// |
228 | /// [`MAX`]: f16::MAX |
229 | #[unstable (feature = "f16" , issue = "116909" )] |
230 | pub const MAX_10_EXP: i32 = 4; |
231 | |
232 | /// Not a Number (NaN). |
233 | /// |
234 | /// Note that IEEE 754 doesn't define just a single NaN value; a plethora of bit patterns are |
235 | /// considered to be NaN. Furthermore, the standard makes a difference between a "signaling" and |
236 | /// a "quiet" NaN, and allows inspecting its "payload" (the unspecified bits in the bit pattern) |
237 | /// and its sign. See the [specification of NaN bit patterns](f32#nan-bit-patterns) for more |
238 | /// info. |
239 | /// |
240 | /// This constant is guaranteed to be a quiet NaN (on targets that follow the Rust assumptions |
241 | /// that the quiet/signaling bit being set to 1 indicates a quiet NaN). Beyond that, nothing is |
242 | /// guaranteed about the specific bit pattern chosen here: both payload and sign are arbitrary. |
243 | /// The concrete bit pattern may change across Rust versions and target platforms. |
244 | #[allow (clippy::eq_op)] |
245 | #[rustc_diagnostic_item = "f16_nan" ] |
246 | #[unstable (feature = "f16" , issue = "116909" )] |
247 | pub const NAN: f16 = 0.0_f16 / 0.0_f16; |
248 | |
249 | /// Infinity (∞). |
250 | #[unstable (feature = "f16" , issue = "116909" )] |
251 | pub const INFINITY: f16 = 1.0_f16 / 0.0_f16; |
252 | |
253 | /// Negative infinity (−∞). |
254 | #[unstable (feature = "f16" , issue = "116909" )] |
255 | pub const NEG_INFINITY: f16 = -1.0_f16 / 0.0_f16; |
256 | |
257 | /// Sign bit |
258 | pub(crate) const SIGN_MASK: u16 = 0x8000; |
259 | |
260 | /// Exponent mask |
261 | pub(crate) const EXP_MASK: u16 = 0x7c00; |
262 | |
263 | /// Mantissa mask |
264 | pub(crate) const MAN_MASK: u16 = 0x03ff; |
265 | |
266 | /// Minimum representable positive value (min subnormal) |
267 | const TINY_BITS: u16 = 0x1; |
268 | |
269 | /// Minimum representable negative value (min negative subnormal) |
270 | const NEG_TINY_BITS: u16 = Self::TINY_BITS | Self::SIGN_MASK; |
271 | |
272 | /// Returns `true` if this value is NaN. |
273 | /// |
274 | /// ``` |
275 | /// #![feature(f16)] |
276 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
277 | /// |
278 | /// let nan = f16::NAN; |
279 | /// let f = 7.0_f16; |
280 | /// |
281 | /// assert!(nan.is_nan()); |
282 | /// assert!(!f.is_nan()); |
283 | /// # } |
284 | /// ``` |
285 | #[inline ] |
286 | #[must_use ] |
287 | #[unstable (feature = "f16" , issue = "116909" )] |
288 | #[allow (clippy::eq_op)] // > if you intended to check if the operand is NaN, use `.is_nan()` instead :) |
289 | pub const fn is_nan(self) -> bool { |
290 | self != self |
291 | } |
292 | |
293 | /// Returns `true` if this value is positive infinity or negative infinity, and |
294 | /// `false` otherwise. |
295 | /// |
296 | /// ``` |
297 | /// #![feature(f16)] |
298 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
299 | /// |
300 | /// let f = 7.0f16; |
301 | /// let inf = f16::INFINITY; |
302 | /// let neg_inf = f16::NEG_INFINITY; |
303 | /// let nan = f16::NAN; |
304 | /// |
305 | /// assert!(!f.is_infinite()); |
306 | /// assert!(!nan.is_infinite()); |
307 | /// |
308 | /// assert!(inf.is_infinite()); |
309 | /// assert!(neg_inf.is_infinite()); |
310 | /// # } |
311 | /// ``` |
312 | #[inline ] |
313 | #[must_use ] |
314 | #[unstable (feature = "f16" , issue = "116909" )] |
315 | pub const fn is_infinite(self) -> bool { |
316 | (self == f16::INFINITY) | (self == f16::NEG_INFINITY) |
317 | } |
318 | |
319 | /// Returns `true` if this number is neither infinite nor NaN. |
320 | /// |
321 | /// ``` |
322 | /// #![feature(f16)] |
323 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
324 | /// |
325 | /// let f = 7.0f16; |
326 | /// let inf: f16 = f16::INFINITY; |
327 | /// let neg_inf: f16 = f16::NEG_INFINITY; |
328 | /// let nan: f16 = f16::NAN; |
329 | /// |
330 | /// assert!(f.is_finite()); |
331 | /// |
332 | /// assert!(!nan.is_finite()); |
333 | /// assert!(!inf.is_finite()); |
334 | /// assert!(!neg_inf.is_finite()); |
335 | /// # } |
336 | /// ``` |
337 | #[inline ] |
338 | #[must_use ] |
339 | #[unstable (feature = "f16" , issue = "116909" )] |
340 | #[rustc_const_unstable (feature = "f16" , issue = "116909" )] |
341 | pub const fn is_finite(self) -> bool { |
342 | // There's no need to handle NaN separately: if self is NaN, |
343 | // the comparison is not true, exactly as desired. |
344 | self.abs() < Self::INFINITY |
345 | } |
346 | |
347 | /// Returns `true` if the number is [subnormal]. |
348 | /// |
349 | /// ``` |
350 | /// #![feature(f16)] |
351 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
352 | /// |
353 | /// let min = f16::MIN_POSITIVE; // 6.1035e-5 |
354 | /// let max = f16::MAX; |
355 | /// let lower_than_min = 1.0e-7_f16; |
356 | /// let zero = 0.0_f16; |
357 | /// |
358 | /// assert!(!min.is_subnormal()); |
359 | /// assert!(!max.is_subnormal()); |
360 | /// |
361 | /// assert!(!zero.is_subnormal()); |
362 | /// assert!(!f16::NAN.is_subnormal()); |
363 | /// assert!(!f16::INFINITY.is_subnormal()); |
364 | /// // Values between `0` and `min` are Subnormal. |
365 | /// assert!(lower_than_min.is_subnormal()); |
366 | /// # } |
367 | /// ``` |
368 | /// [subnormal]: https://en.wikipedia.org/wiki/Denormal_number |
369 | #[inline ] |
370 | #[must_use ] |
371 | #[unstable (feature = "f16" , issue = "116909" )] |
372 | pub const fn is_subnormal(self) -> bool { |
373 | matches!(self.classify(), FpCategory::Subnormal) |
374 | } |
375 | |
376 | /// Returns `true` if the number is neither zero, infinite, [subnormal], or NaN. |
377 | /// |
378 | /// ``` |
379 | /// #![feature(f16)] |
380 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
381 | /// |
382 | /// let min = f16::MIN_POSITIVE; // 6.1035e-5 |
383 | /// let max = f16::MAX; |
384 | /// let lower_than_min = 1.0e-7_f16; |
385 | /// let zero = 0.0_f16; |
386 | /// |
387 | /// assert!(min.is_normal()); |
388 | /// assert!(max.is_normal()); |
389 | /// |
390 | /// assert!(!zero.is_normal()); |
391 | /// assert!(!f16::NAN.is_normal()); |
392 | /// assert!(!f16::INFINITY.is_normal()); |
393 | /// // Values between `0` and `min` are Subnormal. |
394 | /// assert!(!lower_than_min.is_normal()); |
395 | /// # } |
396 | /// ``` |
397 | /// [subnormal]: https://en.wikipedia.org/wiki/Denormal_number |
398 | #[inline ] |
399 | #[must_use ] |
400 | #[unstable (feature = "f16" , issue = "116909" )] |
401 | pub const fn is_normal(self) -> bool { |
402 | matches!(self.classify(), FpCategory::Normal) |
403 | } |
404 | |
405 | /// Returns the floating point category of the number. If only one property |
406 | /// is going to be tested, it is generally faster to use the specific |
407 | /// predicate instead. |
408 | /// |
409 | /// ``` |
410 | /// #![feature(f16)] |
411 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
412 | /// |
413 | /// use std::num::FpCategory; |
414 | /// |
415 | /// let num = 12.4_f16; |
416 | /// let inf = f16::INFINITY; |
417 | /// |
418 | /// assert_eq!(num.classify(), FpCategory::Normal); |
419 | /// assert_eq!(inf.classify(), FpCategory::Infinite); |
420 | /// # } |
421 | /// ``` |
422 | #[inline ] |
423 | #[unstable (feature = "f16" , issue = "116909" )] |
424 | pub const fn classify(self) -> FpCategory { |
425 | let b = self.to_bits(); |
426 | match (b & Self::MAN_MASK, b & Self::EXP_MASK) { |
427 | (0, Self::EXP_MASK) => FpCategory::Infinite, |
428 | (_, Self::EXP_MASK) => FpCategory::Nan, |
429 | (0, 0) => FpCategory::Zero, |
430 | (_, 0) => FpCategory::Subnormal, |
431 | _ => FpCategory::Normal, |
432 | } |
433 | } |
434 | |
435 | /// Returns `true` if `self` has a positive sign, including `+0.0`, NaNs with |
436 | /// positive sign bit and positive infinity. |
437 | /// |
438 | /// Note that IEEE 754 doesn't assign any meaning to the sign bit in case of |
439 | /// a NaN, and as Rust doesn't guarantee that the bit pattern of NaNs are |
440 | /// conserved over arithmetic operations, the result of `is_sign_positive` on |
441 | /// a NaN might produce an unexpected or non-portable result. See the [specification |
442 | /// of NaN bit patterns](f32#nan-bit-patterns) for more info. Use `self.signum() == 1.0` |
443 | /// if you need fully portable behavior (will return `false` for all NaNs). |
444 | /// |
445 | /// ``` |
446 | /// #![feature(f16)] |
447 | /// # // FIXME(f16_f128): LLVM crashes on s390x, llvm/llvm-project#50374 |
448 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
449 | /// |
450 | /// let f = 7.0_f16; |
451 | /// let g = -7.0_f16; |
452 | /// |
453 | /// assert!(f.is_sign_positive()); |
454 | /// assert!(!g.is_sign_positive()); |
455 | /// # } |
456 | /// ``` |
457 | #[inline ] |
458 | #[must_use ] |
459 | #[unstable (feature = "f16" , issue = "116909" )] |
460 | pub const fn is_sign_positive(self) -> bool { |
461 | !self.is_sign_negative() |
462 | } |
463 | |
464 | /// Returns `true` if `self` has a negative sign, including `-0.0`, NaNs with |
465 | /// negative sign bit and negative infinity. |
466 | /// |
467 | /// Note that IEEE 754 doesn't assign any meaning to the sign bit in case of |
468 | /// a NaN, and as Rust doesn't guarantee that the bit pattern of NaNs are |
469 | /// conserved over arithmetic operations, the result of `is_sign_negative` on |
470 | /// a NaN might produce an unexpected or non-portable result. See the [specification |
471 | /// of NaN bit patterns](f32#nan-bit-patterns) for more info. Use `self.signum() == -1.0` |
472 | /// if you need fully portable behavior (will return `false` for all NaNs). |
473 | /// |
474 | /// ``` |
475 | /// #![feature(f16)] |
476 | /// # // FIXME(f16_f128): LLVM crashes on s390x, llvm/llvm-project#50374 |
477 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
478 | /// |
479 | /// let f = 7.0_f16; |
480 | /// let g = -7.0_f16; |
481 | /// |
482 | /// assert!(!f.is_sign_negative()); |
483 | /// assert!(g.is_sign_negative()); |
484 | /// # } |
485 | /// ``` |
486 | #[inline ] |
487 | #[must_use ] |
488 | #[unstable (feature = "f16" , issue = "116909" )] |
489 | pub const fn is_sign_negative(self) -> bool { |
490 | // IEEE754 says: isSignMinus(x) is true if and only if x has negative sign. isSignMinus |
491 | // applies to zeros and NaNs as well. |
492 | // SAFETY: This is just transmuting to get the sign bit, it's fine. |
493 | (self.to_bits() & (1 << 15)) != 0 |
494 | } |
495 | |
496 | /// Returns the least number greater than `self`. |
497 | /// |
498 | /// Let `TINY` be the smallest representable positive `f16`. Then, |
499 | /// - if `self.is_nan()`, this returns `self`; |
500 | /// - if `self` is [`NEG_INFINITY`], this returns [`MIN`]; |
501 | /// - if `self` is `-TINY`, this returns -0.0; |
502 | /// - if `self` is -0.0 or +0.0, this returns `TINY`; |
503 | /// - if `self` is [`MAX`] or [`INFINITY`], this returns [`INFINITY`]; |
504 | /// - otherwise the unique least value greater than `self` is returned. |
505 | /// |
506 | /// The identity `x.next_up() == -(-x).next_down()` holds for all non-NaN `x`. When `x` |
507 | /// is finite `x == x.next_up().next_down()` also holds. |
508 | /// |
509 | /// ```rust |
510 | /// #![feature(f16)] |
511 | /// # // FIXME(f16_f128): ABI issues on MSVC |
512 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
513 | /// |
514 | /// // f16::EPSILON is the difference between 1.0 and the next number up. |
515 | /// assert_eq!(1.0f16.next_up(), 1.0 + f16::EPSILON); |
516 | /// // But not for most numbers. |
517 | /// assert!(0.1f16.next_up() < 0.1 + f16::EPSILON); |
518 | /// assert_eq!(4356f16.next_up(), 4360.0); |
519 | /// # } |
520 | /// ``` |
521 | /// |
522 | /// This operation corresponds to IEEE-754 `nextUp`. |
523 | /// |
524 | /// [`NEG_INFINITY`]: Self::NEG_INFINITY |
525 | /// [`INFINITY`]: Self::INFINITY |
526 | /// [`MIN`]: Self::MIN |
527 | /// [`MAX`]: Self::MAX |
528 | #[inline ] |
529 | #[doc (alias = "nextUp" )] |
530 | #[unstable (feature = "f16" , issue = "116909" )] |
531 | pub const fn next_up(self) -> Self { |
532 | // Some targets violate Rust's assumption of IEEE semantics, e.g. by flushing |
533 | // denormals to zero. This is in general unsound and unsupported, but here |
534 | // we do our best to still produce the correct result on such targets. |
535 | let bits = self.to_bits(); |
536 | if self.is_nan() || bits == Self::INFINITY.to_bits() { |
537 | return self; |
538 | } |
539 | |
540 | let abs = bits & !Self::SIGN_MASK; |
541 | let next_bits = if abs == 0 { |
542 | Self::TINY_BITS |
543 | } else if bits == abs { |
544 | bits + 1 |
545 | } else { |
546 | bits - 1 |
547 | }; |
548 | Self::from_bits(next_bits) |
549 | } |
550 | |
551 | /// Returns the greatest number less than `self`. |
552 | /// |
553 | /// Let `TINY` be the smallest representable positive `f16`. Then, |
554 | /// - if `self.is_nan()`, this returns `self`; |
555 | /// - if `self` is [`INFINITY`], this returns [`MAX`]; |
556 | /// - if `self` is `TINY`, this returns 0.0; |
557 | /// - if `self` is -0.0 or +0.0, this returns `-TINY`; |
558 | /// - if `self` is [`MIN`] or [`NEG_INFINITY`], this returns [`NEG_INFINITY`]; |
559 | /// - otherwise the unique greatest value less than `self` is returned. |
560 | /// |
561 | /// The identity `x.next_down() == -(-x).next_up()` holds for all non-NaN `x`. When `x` |
562 | /// is finite `x == x.next_down().next_up()` also holds. |
563 | /// |
564 | /// ```rust |
565 | /// #![feature(f16)] |
566 | /// # // FIXME(f16_f128): ABI issues on MSVC |
567 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
568 | /// |
569 | /// let x = 1.0f16; |
570 | /// // Clamp value into range [0, 1). |
571 | /// let clamped = x.clamp(0.0, 1.0f16.next_down()); |
572 | /// assert!(clamped < 1.0); |
573 | /// assert_eq!(clamped.next_up(), 1.0); |
574 | /// # } |
575 | /// ``` |
576 | /// |
577 | /// This operation corresponds to IEEE-754 `nextDown`. |
578 | /// |
579 | /// [`NEG_INFINITY`]: Self::NEG_INFINITY |
580 | /// [`INFINITY`]: Self::INFINITY |
581 | /// [`MIN`]: Self::MIN |
582 | /// [`MAX`]: Self::MAX |
583 | #[inline ] |
584 | #[doc (alias = "nextDown" )] |
585 | #[unstable (feature = "f16" , issue = "116909" )] |
586 | pub const fn next_down(self) -> Self { |
587 | // Some targets violate Rust's assumption of IEEE semantics, e.g. by flushing |
588 | // denormals to zero. This is in general unsound and unsupported, but here |
589 | // we do our best to still produce the correct result on such targets. |
590 | let bits = self.to_bits(); |
591 | if self.is_nan() || bits == Self::NEG_INFINITY.to_bits() { |
592 | return self; |
593 | } |
594 | |
595 | let abs = bits & !Self::SIGN_MASK; |
596 | let next_bits = if abs == 0 { |
597 | Self::NEG_TINY_BITS |
598 | } else if bits == abs { |
599 | bits - 1 |
600 | } else { |
601 | bits + 1 |
602 | }; |
603 | Self::from_bits(next_bits) |
604 | } |
605 | |
606 | /// Takes the reciprocal (inverse) of a number, `1/x`. |
607 | /// |
608 | /// ``` |
609 | /// #![feature(f16)] |
610 | /// # // FIXME(f16_f128): extendhfsf2, truncsfhf2, __gnu_h2f_ieee, __gnu_f2h_ieee missing for many platforms |
611 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
612 | /// |
613 | /// let x = 2.0_f16; |
614 | /// let abs_difference = (x.recip() - (1.0 / x)).abs(); |
615 | /// |
616 | /// assert!(abs_difference <= f16::EPSILON); |
617 | /// # } |
618 | /// ``` |
619 | #[inline ] |
620 | #[unstable (feature = "f16" , issue = "116909" )] |
621 | #[must_use = "this returns the result of the operation, without modifying the original" ] |
622 | pub const fn recip(self) -> Self { |
623 | 1.0 / self |
624 | } |
625 | |
626 | /// Converts radians to degrees. |
627 | /// |
628 | /// ``` |
629 | /// #![feature(f16)] |
630 | /// # // FIXME(f16_f128): extendhfsf2, truncsfhf2, __gnu_h2f_ieee, __gnu_f2h_ieee missing for many platforms |
631 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
632 | /// |
633 | /// let angle = std::f16::consts::PI; |
634 | /// |
635 | /// let abs_difference = (angle.to_degrees() - 180.0).abs(); |
636 | /// assert!(abs_difference <= 0.5); |
637 | /// # } |
638 | /// ``` |
639 | #[inline ] |
640 | #[unstable (feature = "f16" , issue = "116909" )] |
641 | #[must_use = "this returns the result of the operation, without modifying the original" ] |
642 | pub const fn to_degrees(self) -> Self { |
643 | // Use a literal for better precision. |
644 | const PIS_IN_180: f16 = 57.2957795130823208767981548141051703_f16; |
645 | self * PIS_IN_180 |
646 | } |
647 | |
648 | /// Converts degrees to radians. |
649 | /// |
650 | /// ``` |
651 | /// #![feature(f16)] |
652 | /// # // FIXME(f16_f128): extendhfsf2, truncsfhf2, __gnu_h2f_ieee, __gnu_f2h_ieee missing for many platforms |
653 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
654 | /// |
655 | /// let angle = 180.0f16; |
656 | /// |
657 | /// let abs_difference = (angle.to_radians() - std::f16::consts::PI).abs(); |
658 | /// |
659 | /// assert!(abs_difference <= 0.01); |
660 | /// # } |
661 | /// ``` |
662 | #[inline ] |
663 | #[unstable (feature = "f16" , issue = "116909" )] |
664 | #[must_use = "this returns the result of the operation, without modifying the original" ] |
665 | pub const fn to_radians(self) -> f16 { |
666 | // Use a literal for better precision. |
667 | const RADS_PER_DEG: f16 = 0.017453292519943295769236907684886_f16; |
668 | self * RADS_PER_DEG |
669 | } |
670 | |
671 | /// Returns the maximum of the two numbers, ignoring NaN. |
672 | /// |
673 | /// If one of the arguments is NaN, then the other argument is returned. |
674 | /// This follows the IEEE 754-2008 semantics for maxNum, except for handling of signaling NaNs; |
675 | /// this function handles all NaNs the same way and avoids maxNum's problems with associativity. |
676 | /// This also matches the behavior of libm’s fmax. In particular, if the inputs compare equal |
677 | /// (such as for the case of `+0.0` and `-0.0`), either input may be returned non-deterministically. |
678 | /// |
679 | /// ``` |
680 | /// #![feature(f16)] |
681 | /// # #[cfg (target_arch = "aarch64" )] { // FIXME(f16_F128): rust-lang/rust#123885 |
682 | /// |
683 | /// let x = 1.0f16; |
684 | /// let y = 2.0f16; |
685 | /// |
686 | /// assert_eq!(x.max(y), y); |
687 | /// # } |
688 | /// ``` |
689 | #[inline ] |
690 | #[unstable (feature = "f16" , issue = "116909" )] |
691 | #[rustc_const_unstable (feature = "f16" , issue = "116909" )] |
692 | #[must_use = "this returns the result of the comparison, without modifying either input" ] |
693 | pub const fn max(self, other: f16) -> f16 { |
694 | intrinsics::maxnumf16(self, other) |
695 | } |
696 | |
697 | /// Returns the minimum of the two numbers, ignoring NaN. |
698 | /// |
699 | /// If one of the arguments is NaN, then the other argument is returned. |
700 | /// This follows the IEEE 754-2008 semantics for minNum, except for handling of signaling NaNs; |
701 | /// this function handles all NaNs the same way and avoids minNum's problems with associativity. |
702 | /// This also matches the behavior of libm’s fmin. In particular, if the inputs compare equal |
703 | /// (such as for the case of `+0.0` and `-0.0`), either input may be returned non-deterministically. |
704 | /// |
705 | /// ``` |
706 | /// #![feature(f16)] |
707 | /// # #[cfg (target_arch = "aarch64" )] { // FIXME(f16_F128): rust-lang/rust#123885 |
708 | /// |
709 | /// let x = 1.0f16; |
710 | /// let y = 2.0f16; |
711 | /// |
712 | /// assert_eq!(x.min(y), x); |
713 | /// # } |
714 | /// ``` |
715 | #[inline ] |
716 | #[unstable (feature = "f16" , issue = "116909" )] |
717 | #[rustc_const_unstable (feature = "f16" , issue = "116909" )] |
718 | #[must_use = "this returns the result of the comparison, without modifying either input" ] |
719 | pub const fn min(self, other: f16) -> f16 { |
720 | intrinsics::minnumf16(self, other) |
721 | } |
722 | |
723 | /// Returns the maximum of the two numbers, propagating NaN. |
724 | /// |
725 | /// This returns NaN when *either* argument is NaN, as opposed to |
726 | /// [`f16::max`] which only returns NaN when *both* arguments are NaN. |
727 | /// |
728 | /// ``` |
729 | /// #![feature(f16)] |
730 | /// #![feature(float_minimum_maximum)] |
731 | /// # #[cfg (target_arch = "aarch64" )] { // FIXME(f16_F128): rust-lang/rust#123885 |
732 | /// |
733 | /// let x = 1.0f16; |
734 | /// let y = 2.0f16; |
735 | /// |
736 | /// assert_eq!(x.maximum(y), y); |
737 | /// assert!(x.maximum(f16::NAN).is_nan()); |
738 | /// # } |
739 | /// ``` |
740 | /// |
741 | /// If one of the arguments is NaN, then NaN is returned. Otherwise this returns the greater |
742 | /// of the two numbers. For this operation, -0.0 is considered to be less than +0.0. |
743 | /// Note that this follows the semantics specified in IEEE 754-2019. |
744 | /// |
745 | /// Also note that "propagation" of NaNs here doesn't necessarily mean that the bitpattern of a NaN |
746 | /// operand is conserved; see the [specification of NaN bit patterns](f32#nan-bit-patterns) for more info. |
747 | #[inline ] |
748 | #[unstable (feature = "f16" , issue = "116909" )] |
749 | // #[unstable(feature = "float_minimum_maximum", issue = "91079")] |
750 | #[must_use = "this returns the result of the comparison, without modifying either input" ] |
751 | pub const fn maximum(self, other: f16) -> f16 { |
752 | intrinsics::maximumf16(self, other) |
753 | } |
754 | |
755 | /// Returns the minimum of the two numbers, propagating NaN. |
756 | /// |
757 | /// This returns NaN when *either* argument is NaN, as opposed to |
758 | /// [`f16::min`] which only returns NaN when *both* arguments are NaN. |
759 | /// |
760 | /// ``` |
761 | /// #![feature(f16)] |
762 | /// #![feature(float_minimum_maximum)] |
763 | /// # #[cfg (target_arch = "aarch64" )] { // FIXME(f16_F128): rust-lang/rust#123885 |
764 | /// |
765 | /// let x = 1.0f16; |
766 | /// let y = 2.0f16; |
767 | /// |
768 | /// assert_eq!(x.minimum(y), x); |
769 | /// assert!(x.minimum(f16::NAN).is_nan()); |
770 | /// # } |
771 | /// ``` |
772 | /// |
773 | /// If one of the arguments is NaN, then NaN is returned. Otherwise this returns the lesser |
774 | /// of the two numbers. For this operation, -0.0 is considered to be less than +0.0. |
775 | /// Note that this follows the semantics specified in IEEE 754-2019. |
776 | /// |
777 | /// Also note that "propagation" of NaNs here doesn't necessarily mean that the bitpattern of a NaN |
778 | /// operand is conserved; see the [specification of NaN bit patterns](f32#nan-bit-patterns) for more info. |
779 | #[inline ] |
780 | #[unstable (feature = "f16" , issue = "116909" )] |
781 | // #[unstable(feature = "float_minimum_maximum", issue = "91079")] |
782 | #[must_use = "this returns the result of the comparison, without modifying either input" ] |
783 | pub const fn minimum(self, other: f16) -> f16 { |
784 | intrinsics::minimumf16(self, other) |
785 | } |
786 | |
787 | /// Calculates the midpoint (average) between `self` and `rhs`. |
788 | /// |
789 | /// This returns NaN when *either* argument is NaN or if a combination of |
790 | /// +inf and -inf is provided as arguments. |
791 | /// |
792 | /// # Examples |
793 | /// |
794 | /// ``` |
795 | /// #![feature(f16)] |
796 | /// # #[cfg (target_arch = "aarch64" )] { // FIXME(f16_F128): rust-lang/rust#123885 |
797 | /// |
798 | /// assert_eq!(1f16.midpoint(4.0), 2.5); |
799 | /// assert_eq!((-5.5f16).midpoint(8.0), 1.25); |
800 | /// # } |
801 | /// ``` |
802 | #[inline ] |
803 | #[doc (alias = "average" )] |
804 | #[unstable (feature = "f16" , issue = "116909" )] |
805 | #[rustc_const_unstable (feature = "f16" , issue = "116909" )] |
806 | pub const fn midpoint(self, other: f16) -> f16 { |
807 | const LO: f16 = f16::MIN_POSITIVE * 2.; |
808 | const HI: f16 = f16::MAX / 2.; |
809 | |
810 | let (a, b) = (self, other); |
811 | let abs_a = a.abs(); |
812 | let abs_b = b.abs(); |
813 | |
814 | if abs_a <= HI && abs_b <= HI { |
815 | // Overflow is impossible |
816 | (a + b) / 2. |
817 | } else if abs_a < LO { |
818 | // Not safe to halve `a` (would underflow) |
819 | a + (b / 2.) |
820 | } else if abs_b < LO { |
821 | // Not safe to halve `b` (would underflow) |
822 | (a / 2.) + b |
823 | } else { |
824 | // Safe to halve `a` and `b` |
825 | (a / 2.) + (b / 2.) |
826 | } |
827 | } |
828 | |
829 | /// Rounds toward zero and converts to any primitive integer type, |
830 | /// assuming that the value is finite and fits in that type. |
831 | /// |
832 | /// ``` |
833 | /// #![feature(f16)] |
834 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
835 | /// |
836 | /// let value = 4.6_f16; |
837 | /// let rounded = unsafe { value.to_int_unchecked::<u16>() }; |
838 | /// assert_eq!(rounded, 4); |
839 | /// |
840 | /// let value = -128.9_f16; |
841 | /// let rounded = unsafe { value.to_int_unchecked::<i8>() }; |
842 | /// assert_eq!(rounded, i8::MIN); |
843 | /// # } |
844 | /// ``` |
845 | /// |
846 | /// # Safety |
847 | /// |
848 | /// The value must: |
849 | /// |
850 | /// * Not be `NaN` |
851 | /// * Not be infinite |
852 | /// * Be representable in the return type `Int`, after truncating off its fractional part |
853 | #[inline ] |
854 | #[unstable (feature = "f16" , issue = "116909" )] |
855 | #[must_use = "this returns the result of the operation, without modifying the original" ] |
856 | pub unsafe fn to_int_unchecked<Int>(self) -> Int |
857 | where |
858 | Self: FloatToInt<Int>, |
859 | { |
860 | // SAFETY: the caller must uphold the safety contract for |
861 | // `FloatToInt::to_int_unchecked`. |
862 | unsafe { FloatToInt::<Int>::to_int_unchecked(self) } |
863 | } |
864 | |
865 | /// Raw transmutation to `u16`. |
866 | /// |
867 | /// This is currently identical to `transmute::<f16, u16>(self)` on all platforms. |
868 | /// |
869 | /// See [`from_bits`](#method.from_bits) for some discussion of the |
870 | /// portability of this operation (there are almost no issues). |
871 | /// |
872 | /// Note that this function is distinct from `as` casting, which attempts to |
873 | /// preserve the *numeric* value, and not the bitwise value. |
874 | /// |
875 | /// ``` |
876 | /// #![feature(f16)] |
877 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
878 | /// |
879 | /// # // FIXME(f16_f128): enable this once const casting works |
880 | /// # // assert_ne!((1f16).to_bits(), 1f16 as u128); // to_bits() is not casting! |
881 | /// assert_eq!((12.5f16).to_bits(), 0x4a40); |
882 | /// # } |
883 | /// ``` |
884 | #[inline ] |
885 | #[unstable (feature = "f16" , issue = "116909" )] |
886 | #[must_use = "this returns the result of the operation, without modifying the original" ] |
887 | #[allow (unnecessary_transmutes)] |
888 | pub const fn to_bits(self) -> u16 { |
889 | // SAFETY: `u16` is a plain old datatype so we can always transmute to it. |
890 | unsafe { mem::transmute(self) } |
891 | } |
892 | |
893 | /// Raw transmutation from `u16`. |
894 | /// |
895 | /// This is currently identical to `transmute::<u16, f16>(v)` on all platforms. |
896 | /// It turns out this is incredibly portable, for two reasons: |
897 | /// |
898 | /// * Floats and Ints have the same endianness on all supported platforms. |
899 | /// * IEEE 754 very precisely specifies the bit layout of floats. |
900 | /// |
901 | /// However there is one caveat: prior to the 2008 version of IEEE 754, how |
902 | /// to interpret the NaN signaling bit wasn't actually specified. Most platforms |
903 | /// (notably x86 and ARM) picked the interpretation that was ultimately |
904 | /// standardized in 2008, but some didn't (notably MIPS). As a result, all |
905 | /// signaling NaNs on MIPS are quiet NaNs on x86, and vice-versa. |
906 | /// |
907 | /// Rather than trying to preserve signaling-ness cross-platform, this |
908 | /// implementation favors preserving the exact bits. This means that |
909 | /// any payloads encoded in NaNs will be preserved even if the result of |
910 | /// this method is sent over the network from an x86 machine to a MIPS one. |
911 | /// |
912 | /// If the results of this method are only manipulated by the same |
913 | /// architecture that produced them, then there is no portability concern. |
914 | /// |
915 | /// If the input isn't NaN, then there is no portability concern. |
916 | /// |
917 | /// If you don't care about signalingness (very likely), then there is no |
918 | /// portability concern. |
919 | /// |
920 | /// Note that this function is distinct from `as` casting, which attempts to |
921 | /// preserve the *numeric* value, and not the bitwise value. |
922 | /// |
923 | /// ``` |
924 | /// #![feature(f16)] |
925 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
926 | /// |
927 | /// let v = f16::from_bits(0x4a40); |
928 | /// assert_eq!(v, 12.5); |
929 | /// # } |
930 | /// ``` |
931 | #[inline ] |
932 | #[must_use ] |
933 | #[unstable (feature = "f16" , issue = "116909" )] |
934 | #[allow (unnecessary_transmutes)] |
935 | pub const fn from_bits(v: u16) -> Self { |
936 | // It turns out the safety issues with sNaN were overblown! Hooray! |
937 | // SAFETY: `u16` is a plain old datatype so we can always transmute from it. |
938 | unsafe { mem::transmute(v) } |
939 | } |
940 | |
941 | /// Returns the memory representation of this floating point number as a byte array in |
942 | /// big-endian (network) byte order. |
943 | /// |
944 | /// See [`from_bits`](Self::from_bits) for some discussion of the |
945 | /// portability of this operation (there are almost no issues). |
946 | /// |
947 | /// # Examples |
948 | /// |
949 | /// ``` |
950 | /// #![feature(f16)] |
951 | /// # // FIXME(f16_f128): LLVM crashes on s390x, llvm/llvm-project#50374 |
952 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
953 | /// |
954 | /// let bytes = 12.5f16.to_be_bytes(); |
955 | /// assert_eq!(bytes, [0x4a, 0x40]); |
956 | /// # } |
957 | /// ``` |
958 | #[inline ] |
959 | #[unstable (feature = "f16" , issue = "116909" )] |
960 | #[must_use = "this returns the result of the operation, without modifying the original" ] |
961 | pub const fn to_be_bytes(self) -> [u8; 2] { |
962 | self.to_bits().to_be_bytes() |
963 | } |
964 | |
965 | /// Returns the memory representation of this floating point number as a byte array in |
966 | /// little-endian byte order. |
967 | /// |
968 | /// See [`from_bits`](Self::from_bits) for some discussion of the |
969 | /// portability of this operation (there are almost no issues). |
970 | /// |
971 | /// # Examples |
972 | /// |
973 | /// ``` |
974 | /// #![feature(f16)] |
975 | /// # // FIXME(f16_f128): LLVM crashes on s390x, llvm/llvm-project#50374 |
976 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
977 | /// |
978 | /// let bytes = 12.5f16.to_le_bytes(); |
979 | /// assert_eq!(bytes, [0x40, 0x4a]); |
980 | /// # } |
981 | /// ``` |
982 | #[inline ] |
983 | #[unstable (feature = "f16" , issue = "116909" )] |
984 | #[must_use = "this returns the result of the operation, without modifying the original" ] |
985 | pub const fn to_le_bytes(self) -> [u8; 2] { |
986 | self.to_bits().to_le_bytes() |
987 | } |
988 | |
989 | /// Returns the memory representation of this floating point number as a byte array in |
990 | /// native byte order. |
991 | /// |
992 | /// As the target platform's native endianness is used, portable code |
993 | /// should use [`to_be_bytes`] or [`to_le_bytes`], as appropriate, instead. |
994 | /// |
995 | /// [`to_be_bytes`]: f16::to_be_bytes |
996 | /// [`to_le_bytes`]: f16::to_le_bytes |
997 | /// |
998 | /// See [`from_bits`](Self::from_bits) for some discussion of the |
999 | /// portability of this operation (there are almost no issues). |
1000 | /// |
1001 | /// # Examples |
1002 | /// |
1003 | /// ``` |
1004 | /// #![feature(f16)] |
1005 | /// # // FIXME(f16_f128): LLVM crashes on s390x, llvm/llvm-project#50374 |
1006 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
1007 | /// |
1008 | /// let bytes = 12.5f16.to_ne_bytes(); |
1009 | /// assert_eq!( |
1010 | /// bytes, |
1011 | /// if cfg!(target_endian = "big" ) { |
1012 | /// [0x4a, 0x40] |
1013 | /// } else { |
1014 | /// [0x40, 0x4a] |
1015 | /// } |
1016 | /// ); |
1017 | /// # } |
1018 | /// ``` |
1019 | #[inline ] |
1020 | #[unstable (feature = "f16" , issue = "116909" )] |
1021 | #[must_use = "this returns the result of the operation, without modifying the original" ] |
1022 | pub const fn to_ne_bytes(self) -> [u8; 2] { |
1023 | self.to_bits().to_ne_bytes() |
1024 | } |
1025 | |
1026 | /// Creates a floating point value from its representation as a byte array in big endian. |
1027 | /// |
1028 | /// See [`from_bits`](Self::from_bits) for some discussion of the |
1029 | /// portability of this operation (there are almost no issues). |
1030 | /// |
1031 | /// # Examples |
1032 | /// |
1033 | /// ``` |
1034 | /// #![feature(f16)] |
1035 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
1036 | /// |
1037 | /// let value = f16::from_be_bytes([0x4a, 0x40]); |
1038 | /// assert_eq!(value, 12.5); |
1039 | /// # } |
1040 | /// ``` |
1041 | #[inline ] |
1042 | #[must_use ] |
1043 | #[unstable (feature = "f16" , issue = "116909" )] |
1044 | pub const fn from_be_bytes(bytes: [u8; 2]) -> Self { |
1045 | Self::from_bits(u16::from_be_bytes(bytes)) |
1046 | } |
1047 | |
1048 | /// Creates a floating point value from its representation as a byte array in little endian. |
1049 | /// |
1050 | /// See [`from_bits`](Self::from_bits) for some discussion of the |
1051 | /// portability of this operation (there are almost no issues). |
1052 | /// |
1053 | /// # Examples |
1054 | /// |
1055 | /// ``` |
1056 | /// #![feature(f16)] |
1057 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
1058 | /// |
1059 | /// let value = f16::from_le_bytes([0x40, 0x4a]); |
1060 | /// assert_eq!(value, 12.5); |
1061 | /// # } |
1062 | /// ``` |
1063 | #[inline ] |
1064 | #[must_use ] |
1065 | #[unstable (feature = "f16" , issue = "116909" )] |
1066 | pub const fn from_le_bytes(bytes: [u8; 2]) -> Self { |
1067 | Self::from_bits(u16::from_le_bytes(bytes)) |
1068 | } |
1069 | |
1070 | /// Creates a floating point value from its representation as a byte array in native endian. |
1071 | /// |
1072 | /// As the target platform's native endianness is used, portable code |
1073 | /// likely wants to use [`from_be_bytes`] or [`from_le_bytes`], as |
1074 | /// appropriate instead. |
1075 | /// |
1076 | /// [`from_be_bytes`]: f16::from_be_bytes |
1077 | /// [`from_le_bytes`]: f16::from_le_bytes |
1078 | /// |
1079 | /// See [`from_bits`](Self::from_bits) for some discussion of the |
1080 | /// portability of this operation (there are almost no issues). |
1081 | /// |
1082 | /// # Examples |
1083 | /// |
1084 | /// ``` |
1085 | /// #![feature(f16)] |
1086 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
1087 | /// |
1088 | /// let value = f16::from_ne_bytes(if cfg!(target_endian = "big" ) { |
1089 | /// [0x4a, 0x40] |
1090 | /// } else { |
1091 | /// [0x40, 0x4a] |
1092 | /// }); |
1093 | /// assert_eq!(value, 12.5); |
1094 | /// # } |
1095 | /// ``` |
1096 | #[inline ] |
1097 | #[must_use ] |
1098 | #[unstable (feature = "f16" , issue = "116909" )] |
1099 | pub const fn from_ne_bytes(bytes: [u8; 2]) -> Self { |
1100 | Self::from_bits(u16::from_ne_bytes(bytes)) |
1101 | } |
1102 | |
1103 | /// Returns the ordering between `self` and `other`. |
1104 | /// |
1105 | /// Unlike the standard partial comparison between floating point numbers, |
1106 | /// this comparison always produces an ordering in accordance to |
1107 | /// the `totalOrder` predicate as defined in the IEEE 754 (2008 revision) |
1108 | /// floating point standard. The values are ordered in the following sequence: |
1109 | /// |
1110 | /// - negative quiet NaN |
1111 | /// - negative signaling NaN |
1112 | /// - negative infinity |
1113 | /// - negative numbers |
1114 | /// - negative subnormal numbers |
1115 | /// - negative zero |
1116 | /// - positive zero |
1117 | /// - positive subnormal numbers |
1118 | /// - positive numbers |
1119 | /// - positive infinity |
1120 | /// - positive signaling NaN |
1121 | /// - positive quiet NaN. |
1122 | /// |
1123 | /// The ordering established by this function does not always agree with the |
1124 | /// [`PartialOrd`] and [`PartialEq`] implementations of `f16`. For example, |
1125 | /// they consider negative and positive zero equal, while `total_cmp` |
1126 | /// doesn't. |
1127 | /// |
1128 | /// The interpretation of the signaling NaN bit follows the definition in |
1129 | /// the IEEE 754 standard, which may not match the interpretation by some of |
1130 | /// the older, non-conformant (e.g. MIPS) hardware implementations. |
1131 | /// |
1132 | /// # Example |
1133 | /// |
1134 | /// ``` |
1135 | /// #![feature(f16)] |
1136 | /// # // FIXME(f16_f128): extendhfsf2, truncsfhf2, __gnu_h2f_ieee, __gnu_f2h_ieee missing for many platforms |
1137 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
1138 | /// |
1139 | /// struct GoodBoy { |
1140 | /// name: &'static str, |
1141 | /// weight: f16, |
1142 | /// } |
1143 | /// |
1144 | /// let mut bois = vec![ |
1145 | /// GoodBoy { name: "Pucci" , weight: 0.1 }, |
1146 | /// GoodBoy { name: "Woofer" , weight: 99.0 }, |
1147 | /// GoodBoy { name: "Yapper" , weight: 10.0 }, |
1148 | /// GoodBoy { name: "Chonk" , weight: f16::INFINITY }, |
1149 | /// GoodBoy { name: "Abs. Unit" , weight: f16::NAN }, |
1150 | /// GoodBoy { name: "Floaty" , weight: -5.0 }, |
1151 | /// ]; |
1152 | /// |
1153 | /// bois.sort_by(|a, b| a.weight.total_cmp(&b.weight)); |
1154 | /// |
1155 | /// // `f16::NAN` could be positive or negative, which will affect the sort order. |
1156 | /// if f16::NAN.is_sign_negative() { |
1157 | /// bois.into_iter().map(|b| b.weight) |
1158 | /// .zip([f16::NAN, -5.0, 0.1, 10.0, 99.0, f16::INFINITY].iter()) |
1159 | /// .for_each(|(a, b)| assert_eq!(a.to_bits(), b.to_bits())) |
1160 | /// } else { |
1161 | /// bois.into_iter().map(|b| b.weight) |
1162 | /// .zip([-5.0, 0.1, 10.0, 99.0, f16::INFINITY, f16::NAN].iter()) |
1163 | /// .for_each(|(a, b)| assert_eq!(a.to_bits(), b.to_bits())) |
1164 | /// } |
1165 | /// # } |
1166 | /// ``` |
1167 | #[inline ] |
1168 | #[must_use ] |
1169 | #[unstable (feature = "f16" , issue = "116909" )] |
1170 | pub fn total_cmp(&self, other: &Self) -> crate::cmp::Ordering { |
1171 | let mut left = self.to_bits() as i16; |
1172 | let mut right = other.to_bits() as i16; |
1173 | |
1174 | // In case of negatives, flip all the bits except the sign |
1175 | // to achieve a similar layout as two's complement integers |
1176 | // |
1177 | // Why does this work? IEEE 754 floats consist of three fields: |
1178 | // Sign bit, exponent and mantissa. The set of exponent and mantissa |
1179 | // fields as a whole have the property that their bitwise order is |
1180 | // equal to the numeric magnitude where the magnitude is defined. |
1181 | // The magnitude is not normally defined on NaN values, but |
1182 | // IEEE 754 totalOrder defines the NaN values also to follow the |
1183 | // bitwise order. This leads to order explained in the doc comment. |
1184 | // However, the representation of magnitude is the same for negative |
1185 | // and positive numbers – only the sign bit is different. |
1186 | // To easily compare the floats as signed integers, we need to |
1187 | // flip the exponent and mantissa bits in case of negative numbers. |
1188 | // We effectively convert the numbers to "two's complement" form. |
1189 | // |
1190 | // To do the flipping, we construct a mask and XOR against it. |
1191 | // We branchlessly calculate an "all-ones except for the sign bit" |
1192 | // mask from negative-signed values: right shifting sign-extends |
1193 | // the integer, so we "fill" the mask with sign bits, and then |
1194 | // convert to unsigned to push one more zero bit. |
1195 | // On positive values, the mask is all zeros, so it's a no-op. |
1196 | left ^= (((left >> 15) as u16) >> 1) as i16; |
1197 | right ^= (((right >> 15) as u16) >> 1) as i16; |
1198 | |
1199 | left.cmp(&right) |
1200 | } |
1201 | |
1202 | /// Restrict a value to a certain interval unless it is NaN. |
1203 | /// |
1204 | /// Returns `max` if `self` is greater than `max`, and `min` if `self` is |
1205 | /// less than `min`. Otherwise this returns `self`. |
1206 | /// |
1207 | /// Note that this function returns NaN if the initial value was NaN as |
1208 | /// well. |
1209 | /// |
1210 | /// # Panics |
1211 | /// |
1212 | /// Panics if `min > max`, `min` is NaN, or `max` is NaN. |
1213 | /// |
1214 | /// # Examples |
1215 | /// |
1216 | /// ``` |
1217 | /// #![feature(f16)] |
1218 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
1219 | /// |
1220 | /// assert!((-3.0f16).clamp(-2.0, 1.0) == -2.0); |
1221 | /// assert!((0.0f16).clamp(-2.0, 1.0) == 0.0); |
1222 | /// assert!((2.0f16).clamp(-2.0, 1.0) == 1.0); |
1223 | /// assert!((f16::NAN).clamp(-2.0, 1.0).is_nan()); |
1224 | /// # } |
1225 | /// ``` |
1226 | #[inline ] |
1227 | #[unstable (feature = "f16" , issue = "116909" )] |
1228 | #[must_use = "method returns a new number and does not mutate the original value" ] |
1229 | pub const fn clamp(mut self, min: f16, max: f16) -> f16 { |
1230 | const_assert!( |
1231 | min <= max, |
1232 | "min > max, or either was NaN" , |
1233 | "min > max, or either was NaN. min = {min:?}, max = {max:?}" , |
1234 | min: f16, |
1235 | max: f16, |
1236 | ); |
1237 | |
1238 | if self < min { |
1239 | self = min; |
1240 | } |
1241 | if self > max { |
1242 | self = max; |
1243 | } |
1244 | self |
1245 | } |
1246 | |
1247 | /// Computes the absolute value of `self`. |
1248 | /// |
1249 | /// This function always returns the precise result. |
1250 | /// |
1251 | /// # Examples |
1252 | /// |
1253 | /// ``` |
1254 | /// #![feature(f16)] |
1255 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
1256 | /// |
1257 | /// let x = 3.5_f16; |
1258 | /// let y = -3.5_f16; |
1259 | /// |
1260 | /// assert_eq!(x.abs(), x); |
1261 | /// assert_eq!(y.abs(), -y); |
1262 | /// |
1263 | /// assert!(f16::NAN.abs().is_nan()); |
1264 | /// # } |
1265 | /// ``` |
1266 | #[inline ] |
1267 | #[unstable (feature = "f16" , issue = "116909" )] |
1268 | #[rustc_const_unstable (feature = "f16" , issue = "116909" )] |
1269 | #[must_use = "method returns a new number and does not mutate the original value" ] |
1270 | pub const fn abs(self) -> Self { |
1271 | // FIXME(f16_f128): replace with `intrinsics::fabsf16` when available |
1272 | Self::from_bits(self.to_bits() & !(1 << 15)) |
1273 | } |
1274 | |
1275 | /// Returns a number that represents the sign of `self`. |
1276 | /// |
1277 | /// - `1.0` if the number is positive, `+0.0` or `INFINITY` |
1278 | /// - `-1.0` if the number is negative, `-0.0` or `NEG_INFINITY` |
1279 | /// - NaN if the number is NaN |
1280 | /// |
1281 | /// # Examples |
1282 | /// |
1283 | /// ``` |
1284 | /// #![feature(f16)] |
1285 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
1286 | /// |
1287 | /// let f = 3.5_f16; |
1288 | /// |
1289 | /// assert_eq!(f.signum(), 1.0); |
1290 | /// assert_eq!(f16::NEG_INFINITY.signum(), -1.0); |
1291 | /// |
1292 | /// assert!(f16::NAN.signum().is_nan()); |
1293 | /// # } |
1294 | /// ``` |
1295 | #[inline ] |
1296 | #[unstable (feature = "f16" , issue = "116909" )] |
1297 | #[rustc_const_unstable (feature = "f16" , issue = "116909" )] |
1298 | #[must_use = "method returns a new number and does not mutate the original value" ] |
1299 | pub const fn signum(self) -> f16 { |
1300 | if self.is_nan() { Self::NAN } else { 1.0_f16.copysign(self) } |
1301 | } |
1302 | |
1303 | /// Returns a number composed of the magnitude of `self` and the sign of |
1304 | /// `sign`. |
1305 | /// |
1306 | /// Equal to `self` if the sign of `self` and `sign` are the same, otherwise equal to `-self`. |
1307 | /// If `self` is a NaN, then a NaN with the same payload as `self` and the sign bit of `sign` is |
1308 | /// returned. |
1309 | /// |
1310 | /// If `sign` is a NaN, then this operation will still carry over its sign into the result. Note |
1311 | /// that IEEE 754 doesn't assign any meaning to the sign bit in case of a NaN, and as Rust |
1312 | /// doesn't guarantee that the bit pattern of NaNs are conserved over arithmetic operations, the |
1313 | /// result of `copysign` with `sign` being a NaN might produce an unexpected or non-portable |
1314 | /// result. See the [specification of NaN bit patterns](primitive@f32#nan-bit-patterns) for more |
1315 | /// info. |
1316 | /// |
1317 | /// # Examples |
1318 | /// |
1319 | /// ``` |
1320 | /// #![feature(f16)] |
1321 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
1322 | /// |
1323 | /// let f = 3.5_f16; |
1324 | /// |
1325 | /// assert_eq!(f.copysign(0.42), 3.5_f16); |
1326 | /// assert_eq!(f.copysign(-0.42), -3.5_f16); |
1327 | /// assert_eq!((-f).copysign(0.42), 3.5_f16); |
1328 | /// assert_eq!((-f).copysign(-0.42), -3.5_f16); |
1329 | /// |
1330 | /// assert!(f16::NAN.copysign(1.0).is_nan()); |
1331 | /// # } |
1332 | /// ``` |
1333 | #[inline ] |
1334 | #[unstable (feature = "f16" , issue = "116909" )] |
1335 | #[rustc_const_unstable (feature = "f16" , issue = "116909" )] |
1336 | #[must_use = "method returns a new number and does not mutate the original value" ] |
1337 | pub const fn copysign(self, sign: f16) -> f16 { |
1338 | // SAFETY: this is actually a safe intrinsic |
1339 | unsafe { intrinsics::copysignf16(self, sign) } |
1340 | } |
1341 | |
1342 | /// Float addition that allows optimizations based on algebraic rules. |
1343 | /// |
1344 | /// See [algebraic operators](primitive@f32#algebraic-operators) for more info. |
1345 | #[must_use = "method returns a new number and does not mutate the original value" ] |
1346 | #[unstable (feature = "float_algebraic" , issue = "136469" )] |
1347 | #[rustc_const_unstable (feature = "float_algebraic" , issue = "136469" )] |
1348 | #[inline ] |
1349 | pub const fn algebraic_add(self, rhs: f16) -> f16 { |
1350 | intrinsics::fadd_algebraic(self, rhs) |
1351 | } |
1352 | |
1353 | /// Float subtraction that allows optimizations based on algebraic rules. |
1354 | /// |
1355 | /// See [algebraic operators](primitive@f32#algebraic-operators) for more info. |
1356 | #[must_use = "method returns a new number and does not mutate the original value" ] |
1357 | #[unstable (feature = "float_algebraic" , issue = "136469" )] |
1358 | #[rustc_const_unstable (feature = "float_algebraic" , issue = "136469" )] |
1359 | #[inline ] |
1360 | pub const fn algebraic_sub(self, rhs: f16) -> f16 { |
1361 | intrinsics::fsub_algebraic(self, rhs) |
1362 | } |
1363 | |
1364 | /// Float multiplication that allows optimizations based on algebraic rules. |
1365 | /// |
1366 | /// See [algebraic operators](primitive@f32#algebraic-operators) for more info. |
1367 | #[must_use = "method returns a new number and does not mutate the original value" ] |
1368 | #[unstable (feature = "float_algebraic" , issue = "136469" )] |
1369 | #[rustc_const_unstable (feature = "float_algebraic" , issue = "136469" )] |
1370 | #[inline ] |
1371 | pub const fn algebraic_mul(self, rhs: f16) -> f16 { |
1372 | intrinsics::fmul_algebraic(self, rhs) |
1373 | } |
1374 | |
1375 | /// Float division that allows optimizations based on algebraic rules. |
1376 | /// |
1377 | /// See [algebraic operators](primitive@f32#algebraic-operators) for more info. |
1378 | #[must_use = "method returns a new number and does not mutate the original value" ] |
1379 | #[unstable (feature = "float_algebraic" , issue = "136469" )] |
1380 | #[rustc_const_unstable (feature = "float_algebraic" , issue = "136469" )] |
1381 | #[inline ] |
1382 | pub const fn algebraic_div(self, rhs: f16) -> f16 { |
1383 | intrinsics::fdiv_algebraic(self, rhs) |
1384 | } |
1385 | |
1386 | /// Float remainder that allows optimizations based on algebraic rules. |
1387 | /// |
1388 | /// See [algebraic operators](primitive@f32#algebraic-operators) for more info. |
1389 | #[must_use = "method returns a new number and does not mutate the original value" ] |
1390 | #[unstable (feature = "float_algebraic" , issue = "136469" )] |
1391 | #[rustc_const_unstable (feature = "float_algebraic" , issue = "136469" )] |
1392 | #[inline ] |
1393 | pub const fn algebraic_rem(self, rhs: f16) -> f16 { |
1394 | intrinsics::frem_algebraic(self, rhs) |
1395 | } |
1396 | } |
1397 | |
1398 | // Functions in this module fall into `core_float_math` |
1399 | // #[unstable(feature = "core_float_math", issue = "137578")] |
1400 | #[cfg (not(test))] |
1401 | #[doc (test(attr(feature(cfg_target_has_reliable_f16_f128), expect(internal_features))))] |
1402 | impl f16 { |
1403 | /// Returns the largest integer less than or equal to `self`. |
1404 | /// |
1405 | /// This function always returns the precise result. |
1406 | /// |
1407 | /// # Examples |
1408 | /// |
1409 | /// ``` |
1410 | /// #![feature(f16)] |
1411 | /// # #[cfg (not(miri))] |
1412 | /// # #[cfg (target_has_reliable_f16_math)] { |
1413 | /// |
1414 | /// let f = 3.7_f16; |
1415 | /// let g = 3.0_f16; |
1416 | /// let h = -3.7_f16; |
1417 | /// |
1418 | /// assert_eq!(f.floor(), 3.0); |
1419 | /// assert_eq!(g.floor(), 3.0); |
1420 | /// assert_eq!(h.floor(), -4.0); |
1421 | /// # } |
1422 | /// ``` |
1423 | #[inline ] |
1424 | #[rustc_allow_incoherent_impl ] |
1425 | #[unstable (feature = "f16" , issue = "116909" )] |
1426 | #[rustc_const_unstable (feature = "f16" , issue = "116909" )] |
1427 | // #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] |
1428 | #[must_use = "method returns a new number and does not mutate the original value" ] |
1429 | pub const fn floor(self) -> f16 { |
1430 | // SAFETY: intrinsic with no preconditions |
1431 | unsafe { intrinsics::floorf16(self) } |
1432 | } |
1433 | |
1434 | /// Returns the smallest integer greater than or equal to `self`. |
1435 | /// |
1436 | /// This function always returns the precise result. |
1437 | /// |
1438 | /// # Examples |
1439 | /// |
1440 | /// ``` |
1441 | /// #![feature(f16)] |
1442 | /// # #[cfg (not(miri))] |
1443 | /// # #[cfg (target_has_reliable_f16_math)] { |
1444 | /// |
1445 | /// let f = 3.01_f16; |
1446 | /// let g = 4.0_f16; |
1447 | /// |
1448 | /// assert_eq!(f.ceil(), 4.0); |
1449 | /// assert_eq!(g.ceil(), 4.0); |
1450 | /// # } |
1451 | /// ``` |
1452 | #[inline ] |
1453 | #[doc (alias = "ceiling" )] |
1454 | #[rustc_allow_incoherent_impl ] |
1455 | #[unstable (feature = "f16" , issue = "116909" )] |
1456 | #[rustc_const_unstable (feature = "f16" , issue = "116909" )] |
1457 | // #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] |
1458 | #[must_use = "method returns a new number and does not mutate the original value" ] |
1459 | pub const fn ceil(self) -> f16 { |
1460 | // SAFETY: intrinsic with no preconditions |
1461 | unsafe { intrinsics::ceilf16(self) } |
1462 | } |
1463 | |
1464 | /// Returns the nearest integer to `self`. If a value is half-way between two |
1465 | /// integers, round away from `0.0`. |
1466 | /// |
1467 | /// This function always returns the precise result. |
1468 | /// |
1469 | /// # Examples |
1470 | /// |
1471 | /// ``` |
1472 | /// #![feature(f16)] |
1473 | /// # #[cfg (not(miri))] |
1474 | /// # #[cfg (target_has_reliable_f16_math)] { |
1475 | /// |
1476 | /// let f = 3.3_f16; |
1477 | /// let g = -3.3_f16; |
1478 | /// let h = -3.7_f16; |
1479 | /// let i = 3.5_f16; |
1480 | /// let j = 4.5_f16; |
1481 | /// |
1482 | /// assert_eq!(f.round(), 3.0); |
1483 | /// assert_eq!(g.round(), -3.0); |
1484 | /// assert_eq!(h.round(), -4.0); |
1485 | /// assert_eq!(i.round(), 4.0); |
1486 | /// assert_eq!(j.round(), 5.0); |
1487 | /// # } |
1488 | /// ``` |
1489 | #[inline ] |
1490 | #[rustc_allow_incoherent_impl ] |
1491 | #[unstable (feature = "f16" , issue = "116909" )] |
1492 | #[rustc_const_unstable (feature = "f16" , issue = "116909" )] |
1493 | // #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] |
1494 | #[must_use = "method returns a new number and does not mutate the original value" ] |
1495 | pub const fn round(self) -> f16 { |
1496 | // SAFETY: intrinsic with no preconditions |
1497 | unsafe { intrinsics::roundf16(self) } |
1498 | } |
1499 | |
1500 | /// Returns the nearest integer to a number. Rounds half-way cases to the number |
1501 | /// with an even least significant digit. |
1502 | /// |
1503 | /// This function always returns the precise result. |
1504 | /// |
1505 | /// # Examples |
1506 | /// |
1507 | /// ``` |
1508 | /// #![feature(f16)] |
1509 | /// # #[cfg (not(miri))] |
1510 | /// # #[cfg (target_has_reliable_f16_math)] { |
1511 | /// |
1512 | /// let f = 3.3_f16; |
1513 | /// let g = -3.3_f16; |
1514 | /// let h = 3.5_f16; |
1515 | /// let i = 4.5_f16; |
1516 | /// |
1517 | /// assert_eq!(f.round_ties_even(), 3.0); |
1518 | /// assert_eq!(g.round_ties_even(), -3.0); |
1519 | /// assert_eq!(h.round_ties_even(), 4.0); |
1520 | /// assert_eq!(i.round_ties_even(), 4.0); |
1521 | /// # } |
1522 | /// ``` |
1523 | #[inline ] |
1524 | #[rustc_allow_incoherent_impl ] |
1525 | #[unstable (feature = "f16" , issue = "116909" )] |
1526 | #[rustc_const_unstable (feature = "f16" , issue = "116909" )] |
1527 | // #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] |
1528 | #[must_use = "method returns a new number and does not mutate the original value" ] |
1529 | pub const fn round_ties_even(self) -> f16 { |
1530 | intrinsics::round_ties_even_f16(self) |
1531 | } |
1532 | |
1533 | /// Returns the integer part of `self`. |
1534 | /// This means that non-integer numbers are always truncated towards zero. |
1535 | /// |
1536 | /// This function always returns the precise result. |
1537 | /// |
1538 | /// # Examples |
1539 | /// |
1540 | /// ``` |
1541 | /// #![feature(f16)] |
1542 | /// # #[cfg (not(miri))] |
1543 | /// # #[cfg (target_has_reliable_f16_math)] { |
1544 | /// |
1545 | /// let f = 3.7_f16; |
1546 | /// let g = 3.0_f16; |
1547 | /// let h = -3.7_f16; |
1548 | /// |
1549 | /// assert_eq!(f.trunc(), 3.0); |
1550 | /// assert_eq!(g.trunc(), 3.0); |
1551 | /// assert_eq!(h.trunc(), -3.0); |
1552 | /// # } |
1553 | /// ``` |
1554 | #[inline ] |
1555 | #[doc (alias = "truncate" )] |
1556 | #[rustc_allow_incoherent_impl ] |
1557 | #[unstable (feature = "f16" , issue = "116909" )] |
1558 | #[rustc_const_unstable (feature = "f16" , issue = "116909" )] |
1559 | // #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] |
1560 | #[must_use = "method returns a new number and does not mutate the original value" ] |
1561 | pub const fn trunc(self) -> f16 { |
1562 | // SAFETY: intrinsic with no preconditions |
1563 | unsafe { intrinsics::truncf16(self) } |
1564 | } |
1565 | |
1566 | /// Returns the fractional part of `self`. |
1567 | /// |
1568 | /// This function always returns the precise result. |
1569 | /// |
1570 | /// # Examples |
1571 | /// |
1572 | /// ``` |
1573 | /// #![feature(f16)] |
1574 | /// # #[cfg (not(miri))] |
1575 | /// # #[cfg (target_has_reliable_f16_math)] { |
1576 | /// |
1577 | /// let x = 3.6_f16; |
1578 | /// let y = -3.6_f16; |
1579 | /// let abs_difference_x = (x.fract() - 0.6).abs(); |
1580 | /// let abs_difference_y = (y.fract() - (-0.6)).abs(); |
1581 | /// |
1582 | /// assert!(abs_difference_x <= f16::EPSILON); |
1583 | /// assert!(abs_difference_y <= f16::EPSILON); |
1584 | /// # } |
1585 | /// ``` |
1586 | #[inline ] |
1587 | #[rustc_allow_incoherent_impl ] |
1588 | #[unstable (feature = "f16" , issue = "116909" )] |
1589 | #[rustc_const_unstable (feature = "f16" , issue = "116909" )] |
1590 | // #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] |
1591 | #[must_use = "method returns a new number and does not mutate the original value" ] |
1592 | pub const fn fract(self) -> f16 { |
1593 | self - self.trunc() |
1594 | } |
1595 | |
1596 | /// Fused multiply-add. Computes `(self * a) + b` with only one rounding |
1597 | /// error, yielding a more accurate result than an unfused multiply-add. |
1598 | /// |
1599 | /// Using `mul_add` *may* be more performant than an unfused multiply-add if |
1600 | /// the target architecture has a dedicated `fma` CPU instruction. However, |
1601 | /// this is not always true, and will be heavily dependant on designing |
1602 | /// algorithms with specific target hardware in mind. |
1603 | /// |
1604 | /// # Precision |
1605 | /// |
1606 | /// The result of this operation is guaranteed to be the rounded |
1607 | /// infinite-precision result. It is specified by IEEE 754 as |
1608 | /// `fusedMultiplyAdd` and guaranteed not to change. |
1609 | /// |
1610 | /// # Examples |
1611 | /// |
1612 | /// ``` |
1613 | /// #![feature(f16)] |
1614 | /// # #[cfg (not(miri))] |
1615 | /// # #[cfg (target_has_reliable_f16_math)] { |
1616 | /// |
1617 | /// let m = 10.0_f16; |
1618 | /// let x = 4.0_f16; |
1619 | /// let b = 60.0_f16; |
1620 | /// |
1621 | /// assert_eq!(m.mul_add(x, b), 100.0); |
1622 | /// assert_eq!(m * x + b, 100.0); |
1623 | /// |
1624 | /// let one_plus_eps = 1.0_f16 + f16::EPSILON; |
1625 | /// let one_minus_eps = 1.0_f16 - f16::EPSILON; |
1626 | /// let minus_one = -1.0_f16; |
1627 | /// |
1628 | /// // The exact result (1 + eps) * (1 - eps) = 1 - eps * eps. |
1629 | /// assert_eq!(one_plus_eps.mul_add(one_minus_eps, minus_one), -f16::EPSILON * f16::EPSILON); |
1630 | /// // Different rounding with the non-fused multiply and add. |
1631 | /// assert_eq!(one_plus_eps * one_minus_eps + minus_one, 0.0); |
1632 | /// # } |
1633 | /// ``` |
1634 | #[inline ] |
1635 | #[rustc_allow_incoherent_impl ] |
1636 | #[unstable (feature = "f16" , issue = "116909" )] |
1637 | #[doc (alias = "fmaf16" , alias = "fusedMultiplyAdd" )] |
1638 | #[must_use = "method returns a new number and does not mutate the original value" ] |
1639 | pub fn mul_add(self, a: f16, b: f16) -> f16 { |
1640 | // SAFETY: intrinsic with no preconditions |
1641 | unsafe { intrinsics::fmaf16(self, a, b) } |
1642 | } |
1643 | |
1644 | /// Calculates Euclidean division, the matching method for `rem_euclid`. |
1645 | /// |
1646 | /// This computes the integer `n` such that |
1647 | /// `self = n * rhs + self.rem_euclid(rhs)`. |
1648 | /// In other words, the result is `self / rhs` rounded to the integer `n` |
1649 | /// such that `self >= n * rhs`. |
1650 | /// |
1651 | /// # Precision |
1652 | /// |
1653 | /// The result of this operation is guaranteed to be the rounded |
1654 | /// infinite-precision result. |
1655 | /// |
1656 | /// # Examples |
1657 | /// |
1658 | /// ``` |
1659 | /// #![feature(f16)] |
1660 | /// # #[cfg (not(miri))] |
1661 | /// # #[cfg (target_has_reliable_f16_math)] { |
1662 | /// |
1663 | /// let a: f16 = 7.0; |
1664 | /// let b = 4.0; |
1665 | /// assert_eq!(a.div_euclid(b), 1.0); // 7.0 > 4.0 * 1.0 |
1666 | /// assert_eq!((-a).div_euclid(b), -2.0); // -7.0 >= 4.0 * -2.0 |
1667 | /// assert_eq!(a.div_euclid(-b), -1.0); // 7.0 >= -4.0 * -1.0 |
1668 | /// assert_eq!((-a).div_euclid(-b), 2.0); // -7.0 >= -4.0 * 2.0 |
1669 | /// # } |
1670 | /// ``` |
1671 | #[inline ] |
1672 | #[rustc_allow_incoherent_impl ] |
1673 | #[unstable (feature = "f16" , issue = "116909" )] |
1674 | #[must_use = "method returns a new number and does not mutate the original value" ] |
1675 | pub fn div_euclid(self, rhs: f16) -> f16 { |
1676 | let q = (self / rhs).trunc(); |
1677 | if self % rhs < 0.0 { |
1678 | return if rhs > 0.0 { q - 1.0 } else { q + 1.0 }; |
1679 | } |
1680 | q |
1681 | } |
1682 | |
1683 | /// Calculates the least nonnegative remainder of `self (mod rhs)`. |
1684 | /// |
1685 | /// In particular, the return value `r` satisfies `0.0 <= r < rhs.abs()` in |
1686 | /// most cases. However, due to a floating point round-off error it can |
1687 | /// result in `r == rhs.abs()`, violating the mathematical definition, if |
1688 | /// `self` is much smaller than `rhs.abs()` in magnitude and `self < 0.0`. |
1689 | /// This result is not an element of the function's codomain, but it is the |
1690 | /// closest floating point number in the real numbers and thus fulfills the |
1691 | /// property `self == self.div_euclid(rhs) * rhs + self.rem_euclid(rhs)` |
1692 | /// approximately. |
1693 | /// |
1694 | /// # Precision |
1695 | /// |
1696 | /// The result of this operation is guaranteed to be the rounded |
1697 | /// infinite-precision result. |
1698 | /// |
1699 | /// # Examples |
1700 | /// |
1701 | /// ``` |
1702 | /// #![feature(f16)] |
1703 | /// # #[cfg (not(miri))] |
1704 | /// # #[cfg (target_has_reliable_f16_math)] { |
1705 | /// |
1706 | /// let a: f16 = 7.0; |
1707 | /// let b = 4.0; |
1708 | /// assert_eq!(a.rem_euclid(b), 3.0); |
1709 | /// assert_eq!((-a).rem_euclid(b), 1.0); |
1710 | /// assert_eq!(a.rem_euclid(-b), 3.0); |
1711 | /// assert_eq!((-a).rem_euclid(-b), 1.0); |
1712 | /// // limitation due to round-off error |
1713 | /// assert!((-f16::EPSILON).rem_euclid(3.0) != 0.0); |
1714 | /// # } |
1715 | /// ``` |
1716 | #[inline ] |
1717 | #[rustc_allow_incoherent_impl ] |
1718 | #[doc (alias = "modulo" , alias = "mod" )] |
1719 | #[unstable (feature = "f16" , issue = "116909" )] |
1720 | #[must_use = "method returns a new number and does not mutate the original value" ] |
1721 | pub fn rem_euclid(self, rhs: f16) -> f16 { |
1722 | let r = self % rhs; |
1723 | if r < 0.0 { r + rhs.abs() } else { r } |
1724 | } |
1725 | |
1726 | /// Raises a number to an integer power. |
1727 | /// |
1728 | /// Using this function is generally faster than using `powf`. |
1729 | /// It might have a different sequence of rounding operations than `powf`, |
1730 | /// so the results are not guaranteed to agree. |
1731 | /// |
1732 | /// # Unspecified precision |
1733 | /// |
1734 | /// The precision of this function is non-deterministic. This means it varies by platform, |
1735 | /// Rust version, and can even differ within the same execution from one invocation to the next. |
1736 | /// |
1737 | /// # Examples |
1738 | /// |
1739 | /// ``` |
1740 | /// #![feature(f16)] |
1741 | /// # #[cfg (not(miri))] |
1742 | /// # #[cfg (target_has_reliable_f16_math)] { |
1743 | /// |
1744 | /// let x = 2.0_f16; |
1745 | /// let abs_difference = (x.powi(2) - (x * x)).abs(); |
1746 | /// assert!(abs_difference <= f16::EPSILON); |
1747 | /// |
1748 | /// assert_eq!(f16::powi(f16::NAN, 0), 1.0); |
1749 | /// # } |
1750 | /// ``` |
1751 | #[inline ] |
1752 | #[rustc_allow_incoherent_impl ] |
1753 | #[unstable (feature = "f16" , issue = "116909" )] |
1754 | #[must_use = "method returns a new number and does not mutate the original value" ] |
1755 | pub fn powi(self, n: i32) -> f16 { |
1756 | // SAFETY: intrinsic with no preconditions |
1757 | unsafe { intrinsics::powif16(self, n) } |
1758 | } |
1759 | |
1760 | /// Returns the square root of a number. |
1761 | /// |
1762 | /// Returns NaN if `self` is a negative number other than `-0.0`. |
1763 | /// |
1764 | /// # Precision |
1765 | /// |
1766 | /// The result of this operation is guaranteed to be the rounded |
1767 | /// infinite-precision result. It is specified by IEEE 754 as `squareRoot` |
1768 | /// and guaranteed not to change. |
1769 | /// |
1770 | /// # Examples |
1771 | /// |
1772 | /// ``` |
1773 | /// #![feature(f16)] |
1774 | /// # #[cfg (not(miri))] |
1775 | /// # #[cfg (target_has_reliable_f16_math)] { |
1776 | /// |
1777 | /// let positive = 4.0_f16; |
1778 | /// let negative = -4.0_f16; |
1779 | /// let negative_zero = -0.0_f16; |
1780 | /// |
1781 | /// assert_eq!(positive.sqrt(), 2.0); |
1782 | /// assert!(negative.sqrt().is_nan()); |
1783 | /// assert!(negative_zero.sqrt() == negative_zero); |
1784 | /// # } |
1785 | /// ``` |
1786 | #[inline ] |
1787 | #[doc (alias = "squareRoot" )] |
1788 | #[rustc_allow_incoherent_impl ] |
1789 | #[unstable (feature = "f16" , issue = "116909" )] |
1790 | #[must_use = "method returns a new number and does not mutate the original value" ] |
1791 | pub fn sqrt(self) -> f16 { |
1792 | // SAFETY: intrinsic with no preconditions |
1793 | unsafe { intrinsics::sqrtf16(self) } |
1794 | } |
1795 | |
1796 | /// Returns the cube root of a number. |
1797 | /// |
1798 | /// # Unspecified precision |
1799 | /// |
1800 | /// The precision of this function is non-deterministic. This means it varies by platform, |
1801 | /// Rust version, and can even differ within the same execution from one invocation to the next. |
1802 | /// |
1803 | /// This function currently corresponds to the `cbrtf` from libc on Unix |
1804 | /// and Windows. Note that this might change in the future. |
1805 | /// |
1806 | /// # Examples |
1807 | /// |
1808 | /// ``` |
1809 | /// #![feature(f16)] |
1810 | /// # #[cfg (not(miri))] |
1811 | /// # #[cfg (target_has_reliable_f16_math)] { |
1812 | /// |
1813 | /// let x = 8.0f16; |
1814 | /// |
1815 | /// // x^(1/3) - 2 == 0 |
1816 | /// let abs_difference = (x.cbrt() - 2.0).abs(); |
1817 | /// |
1818 | /// assert!(abs_difference <= f16::EPSILON); |
1819 | /// # } |
1820 | /// ``` |
1821 | #[inline ] |
1822 | #[rustc_allow_incoherent_impl ] |
1823 | #[unstable (feature = "f16" , issue = "116909" )] |
1824 | #[must_use = "method returns a new number and does not mutate the original value" ] |
1825 | pub fn cbrt(self) -> f16 { |
1826 | libm::cbrtf(self as f32) as f16 |
1827 | } |
1828 | } |
1829 | |