1 | //! Constants for the `f128` quadruple-precision floating point type. |
2 | //! |
3 | //! *[See also the `f128` primitive type][f128].* |
4 | //! |
5 | //! Mathematically significant numbers are provided in the `consts` sub-module. |
6 | //! |
7 | //! For the constants defined directly in this module |
8 | //! (as distinct from those defined in the `consts` sub-module), |
9 | //! new code should instead use the associated constants |
10 | //! defined directly on the `f128` type. |
11 | |
12 | #![unstable (feature = "f128" , issue = "116909" )] |
13 | |
14 | use crate::convert::FloatToInt; |
15 | use crate::num::FpCategory; |
16 | use crate::panic::const_assert; |
17 | use crate::{intrinsics, mem}; |
18 | |
19 | /// Basic mathematical constants. |
20 | #[unstable (feature = "f128" , issue = "116909" )] |
21 | pub mod consts { |
22 | // FIXME: replace with mathematical constants from cmath. |
23 | |
24 | /// Archimedes' constant (π) |
25 | #[unstable (feature = "f128" , issue = "116909" )] |
26 | pub const PI: f128 = 3.14159265358979323846264338327950288419716939937510582097494_f128; |
27 | |
28 | /// The full circle constant (τ) |
29 | /// |
30 | /// Equal to 2π. |
31 | #[unstable (feature = "f128" , issue = "116909" )] |
32 | pub const TAU: f128 = 6.28318530717958647692528676655900576839433879875021164194989_f128; |
33 | |
34 | /// The golden ratio (φ) |
35 | #[unstable (feature = "f128" , issue = "116909" )] |
36 | // Also, #[unstable(feature = "more_float_constants", issue = "103883")] |
37 | pub const PHI: f128 = 1.61803398874989484820458683436563811772030917980576286213545_f128; |
38 | |
39 | /// The Euler-Mascheroni constant (γ) |
40 | #[unstable (feature = "f128" , issue = "116909" )] |
41 | // Also, #[unstable(feature = "more_float_constants", issue = "103883")] |
42 | pub const EGAMMA: f128 = 0.577215664901532860606512090082402431042159335939923598805767_f128; |
43 | |
44 | /// π/2 |
45 | #[unstable (feature = "f128" , issue = "116909" )] |
46 | pub const FRAC_PI_2: f128 = 1.57079632679489661923132169163975144209858469968755291048747_f128; |
47 | |
48 | /// π/3 |
49 | #[unstable (feature = "f128" , issue = "116909" )] |
50 | pub const FRAC_PI_3: f128 = 1.04719755119659774615421446109316762806572313312503527365831_f128; |
51 | |
52 | /// π/4 |
53 | #[unstable (feature = "f128" , issue = "116909" )] |
54 | pub const FRAC_PI_4: f128 = 0.785398163397448309615660845819875721049292349843776455243736_f128; |
55 | |
56 | /// π/6 |
57 | #[unstable (feature = "f128" , issue = "116909" )] |
58 | pub const FRAC_PI_6: f128 = 0.523598775598298873077107230546583814032861566562517636829157_f128; |
59 | |
60 | /// π/8 |
61 | #[unstable (feature = "f128" , issue = "116909" )] |
62 | pub const FRAC_PI_8: f128 = 0.392699081698724154807830422909937860524646174921888227621868_f128; |
63 | |
64 | /// 1/π |
65 | #[unstable (feature = "f128" , issue = "116909" )] |
66 | pub const FRAC_1_PI: f128 = 0.318309886183790671537767526745028724068919291480912897495335_f128; |
67 | |
68 | /// 1/sqrt(π) |
69 | #[unstable (feature = "f128" , issue = "116909" )] |
70 | // Also, #[unstable(feature = "more_float_constants", issue = "103883")] |
71 | pub const FRAC_1_SQRT_PI: f128 = |
72 | 0.564189583547756286948079451560772585844050629328998856844086_f128; |
73 | |
74 | /// 1/sqrt(2π) |
75 | #[doc (alias = "FRAC_1_SQRT_TAU" )] |
76 | #[unstable (feature = "f128" , issue = "116909" )] |
77 | // Also, #[unstable(feature = "more_float_constants", issue = "103883")] |
78 | pub const FRAC_1_SQRT_2PI: f128 = |
79 | 0.398942280401432677939946059934381868475858631164934657665926_f128; |
80 | |
81 | /// 2/π |
82 | #[unstable (feature = "f128" , issue = "116909" )] |
83 | pub const FRAC_2_PI: f128 = 0.636619772367581343075535053490057448137838582961825794990669_f128; |
84 | |
85 | /// 2/sqrt(π) |
86 | #[unstable (feature = "f128" , issue = "116909" )] |
87 | pub const FRAC_2_SQRT_PI: f128 = |
88 | 1.12837916709551257389615890312154517168810125865799771368817_f128; |
89 | |
90 | /// sqrt(2) |
91 | #[unstable (feature = "f128" , issue = "116909" )] |
92 | pub const SQRT_2: f128 = 1.41421356237309504880168872420969807856967187537694807317668_f128; |
93 | |
94 | /// 1/sqrt(2) |
95 | #[unstable (feature = "f128" , issue = "116909" )] |
96 | pub const FRAC_1_SQRT_2: f128 = |
97 | 0.707106781186547524400844362104849039284835937688474036588340_f128; |
98 | |
99 | /// sqrt(3) |
100 | #[unstable (feature = "f128" , issue = "116909" )] |
101 | // Also, #[unstable(feature = "more_float_constants", issue = "103883")] |
102 | pub const SQRT_3: f128 = 1.73205080756887729352744634150587236694280525381038062805581_f128; |
103 | |
104 | /// 1/sqrt(3) |
105 | #[unstable (feature = "f128" , issue = "116909" )] |
106 | // Also, #[unstable(feature = "more_float_constants", issue = "103883")] |
107 | pub const FRAC_1_SQRT_3: f128 = |
108 | 0.577350269189625764509148780501957455647601751270126876018602_f128; |
109 | |
110 | /// Euler's number (e) |
111 | #[unstable (feature = "f128" , issue = "116909" )] |
112 | pub const E: f128 = 2.71828182845904523536028747135266249775724709369995957496697_f128; |
113 | |
114 | /// log<sub>2</sub>(10) |
115 | #[unstable (feature = "f128" , issue = "116909" )] |
116 | pub const LOG2_10: f128 = 3.32192809488736234787031942948939017586483139302458061205476_f128; |
117 | |
118 | /// log<sub>2</sub>(e) |
119 | #[unstable (feature = "f128" , issue = "116909" )] |
120 | pub const LOG2_E: f128 = 1.44269504088896340735992468100189213742664595415298593413545_f128; |
121 | |
122 | /// log<sub>10</sub>(2) |
123 | #[unstable (feature = "f128" , issue = "116909" )] |
124 | pub const LOG10_2: f128 = 0.301029995663981195213738894724493026768189881462108541310427_f128; |
125 | |
126 | /// log<sub>10</sub>(e) |
127 | #[unstable (feature = "f128" , issue = "116909" )] |
128 | pub const LOG10_E: f128 = 0.434294481903251827651128918916605082294397005803666566114454_f128; |
129 | |
130 | /// ln(2) |
131 | #[unstable (feature = "f128" , issue = "116909" )] |
132 | pub const LN_2: f128 = 0.693147180559945309417232121458176568075500134360255254120680_f128; |
133 | |
134 | /// ln(10) |
135 | #[unstable (feature = "f128" , issue = "116909" )] |
136 | pub const LN_10: f128 = 2.30258509299404568401799145468436420760110148862877297603333_f128; |
137 | } |
138 | |
139 | impl f128 { |
140 | // FIXME(f16_f128): almost all methods in this `impl` are missing examples and a const |
141 | // implementation. Add these once we can run code on all platforms and have f16/f128 in CTFE. |
142 | |
143 | /// The radix or base of the internal representation of `f128`. |
144 | #[unstable (feature = "f128" , issue = "116909" )] |
145 | pub const RADIX: u32 = 2; |
146 | |
147 | /// Number of significant digits in base 2. |
148 | /// |
149 | /// Note that the size of the mantissa in the bitwise representation is one |
150 | /// smaller than this since the leading 1 is not stored explicitly. |
151 | #[unstable (feature = "f128" , issue = "116909" )] |
152 | pub const MANTISSA_DIGITS: u32 = 113; |
153 | |
154 | /// Approximate number of significant digits in base 10. |
155 | /// |
156 | /// This is the maximum <i>x</i> such that any decimal number with <i>x</i> |
157 | /// significant digits can be converted to `f128` and back without loss. |
158 | /// |
159 | /// Equal to floor(log<sub>10</sub> 2<sup>[`MANTISSA_DIGITS`] − 1</sup>). |
160 | /// |
161 | /// [`MANTISSA_DIGITS`]: f128::MANTISSA_DIGITS |
162 | #[unstable (feature = "f128" , issue = "116909" )] |
163 | pub const DIGITS: u32 = 33; |
164 | |
165 | /// [Machine epsilon] value for `f128`. |
166 | /// |
167 | /// This is the difference between `1.0` and the next larger representable number. |
168 | /// |
169 | /// Equal to 2<sup>1 − [`MANTISSA_DIGITS`]</sup>. |
170 | /// |
171 | /// [Machine epsilon]: https://en.wikipedia.org/wiki/Machine_epsilon |
172 | /// [`MANTISSA_DIGITS`]: f128::MANTISSA_DIGITS |
173 | #[unstable (feature = "f128" , issue = "116909" )] |
174 | #[rustc_diagnostic_item = "f128_epsilon" ] |
175 | pub const EPSILON: f128 = 1.92592994438723585305597794258492732e-34_f128; |
176 | |
177 | /// Smallest finite `f128` value. |
178 | /// |
179 | /// Equal to −[`MAX`]. |
180 | /// |
181 | /// [`MAX`]: f128::MAX |
182 | #[unstable (feature = "f128" , issue = "116909" )] |
183 | pub const MIN: f128 = -1.18973149535723176508575932662800702e+4932_f128; |
184 | /// Smallest positive normal `f128` value. |
185 | /// |
186 | /// Equal to 2<sup>[`MIN_EXP`] − 1</sup>. |
187 | /// |
188 | /// [`MIN_EXP`]: f128::MIN_EXP |
189 | #[unstable (feature = "f128" , issue = "116909" )] |
190 | pub const MIN_POSITIVE: f128 = 3.36210314311209350626267781732175260e-4932_f128; |
191 | /// Largest finite `f128` value. |
192 | /// |
193 | /// Equal to |
194 | /// (1 − 2<sup>−[`MANTISSA_DIGITS`]</sup>) 2<sup>[`MAX_EXP`]</sup>. |
195 | /// |
196 | /// [`MANTISSA_DIGITS`]: f128::MANTISSA_DIGITS |
197 | /// [`MAX_EXP`]: f128::MAX_EXP |
198 | #[unstable (feature = "f128" , issue = "116909" )] |
199 | pub const MAX: f128 = 1.18973149535723176508575932662800702e+4932_f128; |
200 | |
201 | /// One greater than the minimum possible *normal* power of 2 exponent |
202 | /// for a significand bounded by 1 ≤ x < 2 (i.e. the IEEE definition). |
203 | /// |
204 | /// This corresponds to the exact minimum possible *normal* power of 2 exponent |
205 | /// for a significand bounded by 0.5 ≤ x < 1 (i.e. the C definition). |
206 | /// In other words, all normal numbers representable by this type are |
207 | /// greater than or equal to 0.5 × 2<sup><i>MIN_EXP</i></sup>. |
208 | #[unstable (feature = "f128" , issue = "116909" )] |
209 | pub const MIN_EXP: i32 = -16_381; |
210 | /// One greater than the maximum possible power of 2 exponent |
211 | /// for a significand bounded by 1 ≤ x < 2 (i.e. the IEEE definition). |
212 | /// |
213 | /// This corresponds to the exact maximum possible power of 2 exponent |
214 | /// for a significand bounded by 0.5 ≤ x < 1 (i.e. the C definition). |
215 | /// In other words, all numbers representable by this type are |
216 | /// strictly less than 2<sup><i>MAX_EXP</i></sup>. |
217 | #[unstable (feature = "f128" , issue = "116909" )] |
218 | pub const MAX_EXP: i32 = 16_384; |
219 | |
220 | /// Minimum <i>x</i> for which 10<sup><i>x</i></sup> is normal. |
221 | /// |
222 | /// Equal to ceil(log<sub>10</sub> [`MIN_POSITIVE`]). |
223 | /// |
224 | /// [`MIN_POSITIVE`]: f128::MIN_POSITIVE |
225 | #[unstable (feature = "f128" , issue = "116909" )] |
226 | pub const MIN_10_EXP: i32 = -4_931; |
227 | /// Maximum <i>x</i> for which 10<sup><i>x</i></sup> is normal. |
228 | /// |
229 | /// Equal to floor(log<sub>10</sub> [`MAX`]). |
230 | /// |
231 | /// [`MAX`]: f128::MAX |
232 | #[unstable (feature = "f128" , issue = "116909" )] |
233 | pub const MAX_10_EXP: i32 = 4_932; |
234 | |
235 | /// Not a Number (NaN). |
236 | /// |
237 | /// Note that IEEE 754 doesn't define just a single NaN value; a plethora of bit patterns are |
238 | /// considered to be NaN. Furthermore, the standard makes a difference between a "signaling" and |
239 | /// a "quiet" NaN, and allows inspecting its "payload" (the unspecified bits in the bit pattern) |
240 | /// and its sign. See the [specification of NaN bit patterns](f32#nan-bit-patterns) for more |
241 | /// info. |
242 | /// |
243 | /// This constant is guaranteed to be a quiet NaN (on targets that follow the Rust assumptions |
244 | /// that the quiet/signaling bit being set to 1 indicates a quiet NaN). Beyond that, nothing is |
245 | /// guaranteed about the specific bit pattern chosen here: both payload and sign are arbitrary. |
246 | /// The concrete bit pattern may change across Rust versions and target platforms. |
247 | #[allow (clippy::eq_op)] |
248 | #[rustc_diagnostic_item = "f128_nan" ] |
249 | #[unstable (feature = "f128" , issue = "116909" )] |
250 | pub const NAN: f128 = 0.0_f128 / 0.0_f128; |
251 | |
252 | /// Infinity (∞). |
253 | #[unstable (feature = "f128" , issue = "116909" )] |
254 | pub const INFINITY: f128 = 1.0_f128 / 0.0_f128; |
255 | |
256 | /// Negative infinity (−∞). |
257 | #[unstable (feature = "f128" , issue = "116909" )] |
258 | pub const NEG_INFINITY: f128 = -1.0_f128 / 0.0_f128; |
259 | |
260 | /// Sign bit |
261 | pub(crate) const SIGN_MASK: u128 = 0x8000_0000_0000_0000_0000_0000_0000_0000; |
262 | |
263 | /// Exponent mask |
264 | pub(crate) const EXP_MASK: u128 = 0x7fff_0000_0000_0000_0000_0000_0000_0000; |
265 | |
266 | /// Mantissa mask |
267 | pub(crate) const MAN_MASK: u128 = 0x0000_ffff_ffff_ffff_ffff_ffff_ffff_ffff; |
268 | |
269 | /// Minimum representable positive value (min subnormal) |
270 | const TINY_BITS: u128 = 0x1; |
271 | |
272 | /// Minimum representable negative value (min negative subnormal) |
273 | const NEG_TINY_BITS: u128 = Self::TINY_BITS | Self::SIGN_MASK; |
274 | |
275 | /// Returns `true` if this value is NaN. |
276 | /// |
277 | /// ``` |
278 | /// #![feature(f128)] |
279 | /// # // FIXME(f16_f128): remove when `unordtf2` is available |
280 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
281 | /// |
282 | /// let nan = f128::NAN; |
283 | /// let f = 7.0_f128; |
284 | /// |
285 | /// assert!(nan.is_nan()); |
286 | /// assert!(!f.is_nan()); |
287 | /// # } |
288 | /// ``` |
289 | #[inline ] |
290 | #[must_use ] |
291 | #[unstable (feature = "f128" , issue = "116909" )] |
292 | #[allow (clippy::eq_op)] // > if you intended to check if the operand is NaN, use `.is_nan()` instead :) |
293 | pub const fn is_nan(self) -> bool { |
294 | self != self |
295 | } |
296 | |
297 | /// Returns `true` if this value is positive infinity or negative infinity, and |
298 | /// `false` otherwise. |
299 | /// |
300 | /// ``` |
301 | /// #![feature(f128)] |
302 | /// # // FIXME(f16_f128): remove when `eqtf2` is available |
303 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
304 | /// |
305 | /// let f = 7.0f128; |
306 | /// let inf = f128::INFINITY; |
307 | /// let neg_inf = f128::NEG_INFINITY; |
308 | /// let nan = f128::NAN; |
309 | /// |
310 | /// assert!(!f.is_infinite()); |
311 | /// assert!(!nan.is_infinite()); |
312 | /// |
313 | /// assert!(inf.is_infinite()); |
314 | /// assert!(neg_inf.is_infinite()); |
315 | /// # } |
316 | /// ``` |
317 | #[inline ] |
318 | #[must_use ] |
319 | #[unstable (feature = "f128" , issue = "116909" )] |
320 | pub const fn is_infinite(self) -> bool { |
321 | (self == f128::INFINITY) | (self == f128::NEG_INFINITY) |
322 | } |
323 | |
324 | /// Returns `true` if this number is neither infinite nor NaN. |
325 | /// |
326 | /// ``` |
327 | /// #![feature(f128)] |
328 | /// # // FIXME(f16_f128): remove when `lttf2` is available |
329 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
330 | /// |
331 | /// let f = 7.0f128; |
332 | /// let inf: f128 = f128::INFINITY; |
333 | /// let neg_inf: f128 = f128::NEG_INFINITY; |
334 | /// let nan: f128 = f128::NAN; |
335 | /// |
336 | /// assert!(f.is_finite()); |
337 | /// |
338 | /// assert!(!nan.is_finite()); |
339 | /// assert!(!inf.is_finite()); |
340 | /// assert!(!neg_inf.is_finite()); |
341 | /// # } |
342 | /// ``` |
343 | #[inline ] |
344 | #[must_use ] |
345 | #[unstable (feature = "f128" , issue = "116909" )] |
346 | #[rustc_const_unstable (feature = "f128" , issue = "116909" )] |
347 | pub const fn is_finite(self) -> bool { |
348 | // There's no need to handle NaN separately: if self is NaN, |
349 | // the comparison is not true, exactly as desired. |
350 | self.abs() < Self::INFINITY |
351 | } |
352 | |
353 | /// Returns `true` if the number is [subnormal]. |
354 | /// |
355 | /// ``` |
356 | /// #![feature(f128)] |
357 | /// # // FIXME(f16_f128): remove when `eqtf2` is available |
358 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
359 | /// |
360 | /// let min = f128::MIN_POSITIVE; // 3.362103143e-4932f128 |
361 | /// let max = f128::MAX; |
362 | /// let lower_than_min = 1.0e-4960_f128; |
363 | /// let zero = 0.0_f128; |
364 | /// |
365 | /// assert!(!min.is_subnormal()); |
366 | /// assert!(!max.is_subnormal()); |
367 | /// |
368 | /// assert!(!zero.is_subnormal()); |
369 | /// assert!(!f128::NAN.is_subnormal()); |
370 | /// assert!(!f128::INFINITY.is_subnormal()); |
371 | /// // Values between `0` and `min` are Subnormal. |
372 | /// assert!(lower_than_min.is_subnormal()); |
373 | /// # } |
374 | /// ``` |
375 | /// |
376 | /// [subnormal]: https://en.wikipedia.org/wiki/Denormal_number |
377 | #[inline ] |
378 | #[must_use ] |
379 | #[unstable (feature = "f128" , issue = "116909" )] |
380 | pub const fn is_subnormal(self) -> bool { |
381 | matches!(self.classify(), FpCategory::Subnormal) |
382 | } |
383 | |
384 | /// Returns `true` if the number is neither zero, infinite, [subnormal], or NaN. |
385 | /// |
386 | /// ``` |
387 | /// #![feature(f128)] |
388 | /// # // FIXME(f16_f128): remove when `eqtf2` is available |
389 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
390 | /// |
391 | /// let min = f128::MIN_POSITIVE; // 3.362103143e-4932f128 |
392 | /// let max = f128::MAX; |
393 | /// let lower_than_min = 1.0e-4960_f128; |
394 | /// let zero = 0.0_f128; |
395 | /// |
396 | /// assert!(min.is_normal()); |
397 | /// assert!(max.is_normal()); |
398 | /// |
399 | /// assert!(!zero.is_normal()); |
400 | /// assert!(!f128::NAN.is_normal()); |
401 | /// assert!(!f128::INFINITY.is_normal()); |
402 | /// // Values between `0` and `min` are Subnormal. |
403 | /// assert!(!lower_than_min.is_normal()); |
404 | /// # } |
405 | /// ``` |
406 | /// |
407 | /// [subnormal]: https://en.wikipedia.org/wiki/Denormal_number |
408 | #[inline ] |
409 | #[must_use ] |
410 | #[unstable (feature = "f128" , issue = "116909" )] |
411 | pub const fn is_normal(self) -> bool { |
412 | matches!(self.classify(), FpCategory::Normal) |
413 | } |
414 | |
415 | /// Returns the floating point category of the number. If only one property |
416 | /// is going to be tested, it is generally faster to use the specific |
417 | /// predicate instead. |
418 | /// |
419 | /// ``` |
420 | /// #![feature(f128)] |
421 | /// # // FIXME(f16_f128): remove when `eqtf2` is available |
422 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
423 | /// |
424 | /// use std::num::FpCategory; |
425 | /// |
426 | /// let num = 12.4_f128; |
427 | /// let inf = f128::INFINITY; |
428 | /// |
429 | /// assert_eq!(num.classify(), FpCategory::Normal); |
430 | /// assert_eq!(inf.classify(), FpCategory::Infinite); |
431 | /// # } |
432 | /// ``` |
433 | #[inline ] |
434 | #[unstable (feature = "f128" , issue = "116909" )] |
435 | pub const fn classify(self) -> FpCategory { |
436 | let bits = self.to_bits(); |
437 | match (bits & Self::MAN_MASK, bits & Self::EXP_MASK) { |
438 | (0, Self::EXP_MASK) => FpCategory::Infinite, |
439 | (_, Self::EXP_MASK) => FpCategory::Nan, |
440 | (0, 0) => FpCategory::Zero, |
441 | (_, 0) => FpCategory::Subnormal, |
442 | _ => FpCategory::Normal, |
443 | } |
444 | } |
445 | |
446 | /// Returns `true` if `self` has a positive sign, including `+0.0`, NaNs with |
447 | /// positive sign bit and positive infinity. |
448 | /// |
449 | /// Note that IEEE 754 doesn't assign any meaning to the sign bit in case of |
450 | /// a NaN, and as Rust doesn't guarantee that the bit pattern of NaNs are |
451 | /// conserved over arithmetic operations, the result of `is_sign_positive` on |
452 | /// a NaN might produce an unexpected or non-portable result. See the [specification |
453 | /// of NaN bit patterns](f32#nan-bit-patterns) for more info. Use `self.signum() == 1.0` |
454 | /// if you need fully portable behavior (will return `false` for all NaNs). |
455 | /// |
456 | /// ``` |
457 | /// #![feature(f128)] |
458 | /// |
459 | /// let f = 7.0_f128; |
460 | /// let g = -7.0_f128; |
461 | /// |
462 | /// assert!(f.is_sign_positive()); |
463 | /// assert!(!g.is_sign_positive()); |
464 | /// ``` |
465 | #[inline ] |
466 | #[must_use ] |
467 | #[unstable (feature = "f128" , issue = "116909" )] |
468 | pub const fn is_sign_positive(self) -> bool { |
469 | !self.is_sign_negative() |
470 | } |
471 | |
472 | /// Returns `true` if `self` has a negative sign, including `-0.0`, NaNs with |
473 | /// negative sign bit and negative infinity. |
474 | /// |
475 | /// Note that IEEE 754 doesn't assign any meaning to the sign bit in case of |
476 | /// a NaN, and as Rust doesn't guarantee that the bit pattern of NaNs are |
477 | /// conserved over arithmetic operations, the result of `is_sign_negative` on |
478 | /// a NaN might produce an unexpected or non-portable result. See the [specification |
479 | /// of NaN bit patterns](f32#nan-bit-patterns) for more info. Use `self.signum() == -1.0` |
480 | /// if you need fully portable behavior (will return `false` for all NaNs). |
481 | /// |
482 | /// ``` |
483 | /// #![feature(f128)] |
484 | /// |
485 | /// let f = 7.0_f128; |
486 | /// let g = -7.0_f128; |
487 | /// |
488 | /// assert!(!f.is_sign_negative()); |
489 | /// assert!(g.is_sign_negative()); |
490 | /// ``` |
491 | #[inline ] |
492 | #[must_use ] |
493 | #[unstable (feature = "f128" , issue = "116909" )] |
494 | pub const fn is_sign_negative(self) -> bool { |
495 | // IEEE754 says: isSignMinus(x) is true if and only if x has negative sign. isSignMinus |
496 | // applies to zeros and NaNs as well. |
497 | // SAFETY: This is just transmuting to get the sign bit, it's fine. |
498 | (self.to_bits() & (1 << 127)) != 0 |
499 | } |
500 | |
501 | /// Returns the least number greater than `self`. |
502 | /// |
503 | /// Let `TINY` be the smallest representable positive `f128`. Then, |
504 | /// - if `self.is_nan()`, this returns `self`; |
505 | /// - if `self` is [`NEG_INFINITY`], this returns [`MIN`]; |
506 | /// - if `self` is `-TINY`, this returns -0.0; |
507 | /// - if `self` is -0.0 or +0.0, this returns `TINY`; |
508 | /// - if `self` is [`MAX`] or [`INFINITY`], this returns [`INFINITY`]; |
509 | /// - otherwise the unique least value greater than `self` is returned. |
510 | /// |
511 | /// The identity `x.next_up() == -(-x).next_down()` holds for all non-NaN `x`. When `x` |
512 | /// is finite `x == x.next_up().next_down()` also holds. |
513 | /// |
514 | /// ```rust |
515 | /// #![feature(f128)] |
516 | /// # // FIXME(f16_f128): remove when `eqtf2` is available |
517 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
518 | /// |
519 | /// // f128::EPSILON is the difference between 1.0 and the next number up. |
520 | /// assert_eq!(1.0f128.next_up(), 1.0 + f128::EPSILON); |
521 | /// // But not for most numbers. |
522 | /// assert!(0.1f128.next_up() < 0.1 + f128::EPSILON); |
523 | /// assert_eq!(4611686018427387904f128.next_up(), 4611686018427387904.000000000000001); |
524 | /// # } |
525 | /// ``` |
526 | /// |
527 | /// This operation corresponds to IEEE-754 `nextUp`. |
528 | /// |
529 | /// [`NEG_INFINITY`]: Self::NEG_INFINITY |
530 | /// [`INFINITY`]: Self::INFINITY |
531 | /// [`MIN`]: Self::MIN |
532 | /// [`MAX`]: Self::MAX |
533 | #[inline ] |
534 | #[doc (alias = "nextUp" )] |
535 | #[unstable (feature = "f128" , issue = "116909" )] |
536 | pub const fn next_up(self) -> Self { |
537 | // Some targets violate Rust's assumption of IEEE semantics, e.g. by flushing |
538 | // denormals to zero. This is in general unsound and unsupported, but here |
539 | // we do our best to still produce the correct result on such targets. |
540 | let bits = self.to_bits(); |
541 | if self.is_nan() || bits == Self::INFINITY.to_bits() { |
542 | return self; |
543 | } |
544 | |
545 | let abs = bits & !Self::SIGN_MASK; |
546 | let next_bits = if abs == 0 { |
547 | Self::TINY_BITS |
548 | } else if bits == abs { |
549 | bits + 1 |
550 | } else { |
551 | bits - 1 |
552 | }; |
553 | Self::from_bits(next_bits) |
554 | } |
555 | |
556 | /// Returns the greatest number less than `self`. |
557 | /// |
558 | /// Let `TINY` be the smallest representable positive `f128`. Then, |
559 | /// - if `self.is_nan()`, this returns `self`; |
560 | /// - if `self` is [`INFINITY`], this returns [`MAX`]; |
561 | /// - if `self` is `TINY`, this returns 0.0; |
562 | /// - if `self` is -0.0 or +0.0, this returns `-TINY`; |
563 | /// - if `self` is [`MIN`] or [`NEG_INFINITY`], this returns [`NEG_INFINITY`]; |
564 | /// - otherwise the unique greatest value less than `self` is returned. |
565 | /// |
566 | /// The identity `x.next_down() == -(-x).next_up()` holds for all non-NaN `x`. When `x` |
567 | /// is finite `x == x.next_down().next_up()` also holds. |
568 | /// |
569 | /// ```rust |
570 | /// #![feature(f128)] |
571 | /// # // FIXME(f16_f128): remove when `eqtf2` is available |
572 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
573 | /// |
574 | /// let x = 1.0f128; |
575 | /// // Clamp value into range [0, 1). |
576 | /// let clamped = x.clamp(0.0, 1.0f128.next_down()); |
577 | /// assert!(clamped < 1.0); |
578 | /// assert_eq!(clamped.next_up(), 1.0); |
579 | /// # } |
580 | /// ``` |
581 | /// |
582 | /// This operation corresponds to IEEE-754 `nextDown`. |
583 | /// |
584 | /// [`NEG_INFINITY`]: Self::NEG_INFINITY |
585 | /// [`INFINITY`]: Self::INFINITY |
586 | /// [`MIN`]: Self::MIN |
587 | /// [`MAX`]: Self::MAX |
588 | #[inline ] |
589 | #[doc (alias = "nextDown" )] |
590 | #[unstable (feature = "f128" , issue = "116909" )] |
591 | pub const fn next_down(self) -> Self { |
592 | // Some targets violate Rust's assumption of IEEE semantics, e.g. by flushing |
593 | // denormals to zero. This is in general unsound and unsupported, but here |
594 | // we do our best to still produce the correct result on such targets. |
595 | let bits = self.to_bits(); |
596 | if self.is_nan() || bits == Self::NEG_INFINITY.to_bits() { |
597 | return self; |
598 | } |
599 | |
600 | let abs = bits & !Self::SIGN_MASK; |
601 | let next_bits = if abs == 0 { |
602 | Self::NEG_TINY_BITS |
603 | } else if bits == abs { |
604 | bits - 1 |
605 | } else { |
606 | bits + 1 |
607 | }; |
608 | Self::from_bits(next_bits) |
609 | } |
610 | |
611 | /// Takes the reciprocal (inverse) of a number, `1/x`. |
612 | /// |
613 | /// ``` |
614 | /// #![feature(f128)] |
615 | /// # // FIXME(f16_f128): remove when `eqtf2` is available |
616 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
617 | /// |
618 | /// let x = 2.0_f128; |
619 | /// let abs_difference = (x.recip() - (1.0 / x)).abs(); |
620 | /// |
621 | /// assert!(abs_difference <= f128::EPSILON); |
622 | /// # } |
623 | /// ``` |
624 | #[inline ] |
625 | #[unstable (feature = "f128" , issue = "116909" )] |
626 | #[must_use = "this returns the result of the operation, without modifying the original" ] |
627 | pub const fn recip(self) -> Self { |
628 | 1.0 / self |
629 | } |
630 | |
631 | /// Converts radians to degrees. |
632 | /// |
633 | /// ``` |
634 | /// #![feature(f128)] |
635 | /// # // FIXME(f16_f128): remove when `eqtf2` is available |
636 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
637 | /// |
638 | /// let angle = std::f128::consts::PI; |
639 | /// |
640 | /// let abs_difference = (angle.to_degrees() - 180.0).abs(); |
641 | /// assert!(abs_difference <= f128::EPSILON); |
642 | /// # } |
643 | /// ``` |
644 | #[inline ] |
645 | #[unstable (feature = "f128" , issue = "116909" )] |
646 | #[must_use = "this returns the result of the operation, without modifying the original" ] |
647 | pub const fn to_degrees(self) -> Self { |
648 | // Use a literal for better precision. |
649 | const PIS_IN_180: f128 = 57.2957795130823208767981548141051703324054724665643215491602_f128; |
650 | self * PIS_IN_180 |
651 | } |
652 | |
653 | /// Converts degrees to radians. |
654 | /// |
655 | /// ``` |
656 | /// #![feature(f128)] |
657 | /// # // FIXME(f16_f128): remove when `eqtf2` is available |
658 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
659 | /// |
660 | /// let angle = 180.0f128; |
661 | /// |
662 | /// let abs_difference = (angle.to_radians() - std::f128::consts::PI).abs(); |
663 | /// |
664 | /// assert!(abs_difference <= 1e-30); |
665 | /// # } |
666 | /// ``` |
667 | #[inline ] |
668 | #[unstable (feature = "f128" , issue = "116909" )] |
669 | #[must_use = "this returns the result of the operation, without modifying the original" ] |
670 | pub const fn to_radians(self) -> f128 { |
671 | // Use a literal for better precision. |
672 | const RADS_PER_DEG: f128 = |
673 | 0.0174532925199432957692369076848861271344287188854172545609719_f128; |
674 | self * RADS_PER_DEG |
675 | } |
676 | |
677 | /// Returns the maximum of the two numbers, ignoring NaN. |
678 | /// |
679 | /// If one of the arguments is NaN, then the other argument is returned. |
680 | /// This follows the IEEE 754-2008 semantics for maxNum, except for handling of signaling NaNs; |
681 | /// this function handles all NaNs the same way and avoids maxNum's problems with associativity. |
682 | /// This also matches the behavior of libm’s fmax. In particular, if the inputs compare equal |
683 | /// (such as for the case of `+0.0` and `-0.0`), either input may be returned non-deterministically. |
684 | /// |
685 | /// ``` |
686 | /// #![feature(f128)] |
687 | /// # // Using aarch64 because `reliable_f128_math` is needed |
688 | /// # #[cfg (all(target_arch = "aarch64" , target_os = "linux" ))] { |
689 | /// |
690 | /// let x = 1.0f128; |
691 | /// let y = 2.0f128; |
692 | /// |
693 | /// assert_eq!(x.max(y), y); |
694 | /// # } |
695 | /// ``` |
696 | #[inline ] |
697 | #[unstable (feature = "f128" , issue = "116909" )] |
698 | #[rustc_const_unstable (feature = "f128" , issue = "116909" )] |
699 | #[must_use = "this returns the result of the comparison, without modifying either input" ] |
700 | pub const fn max(self, other: f128) -> f128 { |
701 | intrinsics::maxnumf128(self, other) |
702 | } |
703 | |
704 | /// Returns the minimum of the two numbers, ignoring NaN. |
705 | /// |
706 | /// If one of the arguments is NaN, then the other argument is returned. |
707 | /// This follows the IEEE 754-2008 semantics for minNum, except for handling of signaling NaNs; |
708 | /// this function handles all NaNs the same way and avoids minNum's problems with associativity. |
709 | /// This also matches the behavior of libm’s fmin. In particular, if the inputs compare equal |
710 | /// (such as for the case of `+0.0` and `-0.0`), either input may be returned non-deterministically. |
711 | /// |
712 | /// ``` |
713 | /// #![feature(f128)] |
714 | /// # // Using aarch64 because `reliable_f128_math` is needed |
715 | /// # #[cfg (all(target_arch = "aarch64" , target_os = "linux" ))] { |
716 | /// |
717 | /// let x = 1.0f128; |
718 | /// let y = 2.0f128; |
719 | /// |
720 | /// assert_eq!(x.min(y), x); |
721 | /// # } |
722 | /// ``` |
723 | #[inline ] |
724 | #[unstable (feature = "f128" , issue = "116909" )] |
725 | #[rustc_const_unstable (feature = "f128" , issue = "116909" )] |
726 | #[must_use = "this returns the result of the comparison, without modifying either input" ] |
727 | pub const fn min(self, other: f128) -> f128 { |
728 | intrinsics::minnumf128(self, other) |
729 | } |
730 | |
731 | /// Returns the maximum of the two numbers, propagating NaN. |
732 | /// |
733 | /// This returns NaN when *either* argument is NaN, as opposed to |
734 | /// [`f128::max`] which only returns NaN when *both* arguments are NaN. |
735 | /// |
736 | /// ``` |
737 | /// #![feature(f128)] |
738 | /// #![feature(float_minimum_maximum)] |
739 | /// # // Using aarch64 because `reliable_f128_math` is needed |
740 | /// # #[cfg (all(target_arch = "aarch64" , target_os = "linux" ))] { |
741 | /// |
742 | /// let x = 1.0f128; |
743 | /// let y = 2.0f128; |
744 | /// |
745 | /// assert_eq!(x.maximum(y), y); |
746 | /// assert!(x.maximum(f128::NAN).is_nan()); |
747 | /// # } |
748 | /// ``` |
749 | /// |
750 | /// If one of the arguments is NaN, then NaN is returned. Otherwise this returns the greater |
751 | /// of the two numbers. For this operation, -0.0 is considered to be less than +0.0. |
752 | /// Note that this follows the semantics specified in IEEE 754-2019. |
753 | /// |
754 | /// Also note that "propagation" of NaNs here doesn't necessarily mean that the bitpattern of a NaN |
755 | /// operand is conserved; see the [specification of NaN bit patterns](f32#nan-bit-patterns) for more info. |
756 | #[inline ] |
757 | #[unstable (feature = "f128" , issue = "116909" )] |
758 | // #[unstable(feature = "float_minimum_maximum", issue = "91079")] |
759 | #[must_use = "this returns the result of the comparison, without modifying either input" ] |
760 | pub const fn maximum(self, other: f128) -> f128 { |
761 | intrinsics::maximumf128(self, other) |
762 | } |
763 | |
764 | /// Returns the minimum of the two numbers, propagating NaN. |
765 | /// |
766 | /// This returns NaN when *either* argument is NaN, as opposed to |
767 | /// [`f128::min`] which only returns NaN when *both* arguments are NaN. |
768 | /// |
769 | /// ``` |
770 | /// #![feature(f128)] |
771 | /// #![feature(float_minimum_maximum)] |
772 | /// # // Using aarch64 because `reliable_f128_math` is needed |
773 | /// # #[cfg (all(target_arch = "aarch64" , target_os = "linux" ))] { |
774 | /// |
775 | /// let x = 1.0f128; |
776 | /// let y = 2.0f128; |
777 | /// |
778 | /// assert_eq!(x.minimum(y), x); |
779 | /// assert!(x.minimum(f128::NAN).is_nan()); |
780 | /// # } |
781 | /// ``` |
782 | /// |
783 | /// If one of the arguments is NaN, then NaN is returned. Otherwise this returns the lesser |
784 | /// of the two numbers. For this operation, -0.0 is considered to be less than +0.0. |
785 | /// Note that this follows the semantics specified in IEEE 754-2019. |
786 | /// |
787 | /// Also note that "propagation" of NaNs here doesn't necessarily mean that the bitpattern of a NaN |
788 | /// operand is conserved; see the [specification of NaN bit patterns](f32#nan-bit-patterns) for more info. |
789 | #[inline ] |
790 | #[unstable (feature = "f128" , issue = "116909" )] |
791 | // #[unstable(feature = "float_minimum_maximum", issue = "91079")] |
792 | #[must_use = "this returns the result of the comparison, without modifying either input" ] |
793 | pub const fn minimum(self, other: f128) -> f128 { |
794 | intrinsics::minimumf128(self, other) |
795 | } |
796 | |
797 | /// Calculates the midpoint (average) between `self` and `rhs`. |
798 | /// |
799 | /// This returns NaN when *either* argument is NaN or if a combination of |
800 | /// +inf and -inf is provided as arguments. |
801 | /// |
802 | /// # Examples |
803 | /// |
804 | /// ``` |
805 | /// #![feature(f128)] |
806 | /// # // Using aarch64 because `reliable_f128_math` is needed |
807 | /// # #[cfg (all(target_arch = "aarch64" , target_os = "linux" ))] { |
808 | /// |
809 | /// assert_eq!(1f128.midpoint(4.0), 2.5); |
810 | /// assert_eq!((-5.5f128).midpoint(8.0), 1.25); |
811 | /// # } |
812 | /// ``` |
813 | #[inline ] |
814 | #[doc (alias = "average" )] |
815 | #[unstable (feature = "f128" , issue = "116909" )] |
816 | #[rustc_const_unstable (feature = "f128" , issue = "116909" )] |
817 | pub const fn midpoint(self, other: f128) -> f128 { |
818 | const LO: f128 = f128::MIN_POSITIVE * 2.; |
819 | const HI: f128 = f128::MAX / 2.; |
820 | |
821 | let (a, b) = (self, other); |
822 | let abs_a = a.abs(); |
823 | let abs_b = b.abs(); |
824 | |
825 | if abs_a <= HI && abs_b <= HI { |
826 | // Overflow is impossible |
827 | (a + b) / 2. |
828 | } else if abs_a < LO { |
829 | // Not safe to halve `a` (would underflow) |
830 | a + (b / 2.) |
831 | } else if abs_b < LO { |
832 | // Not safe to halve `b` (would underflow) |
833 | (a / 2.) + b |
834 | } else { |
835 | // Safe to halve `a` and `b` |
836 | (a / 2.) + (b / 2.) |
837 | } |
838 | } |
839 | |
840 | /// Rounds toward zero and converts to any primitive integer type, |
841 | /// assuming that the value is finite and fits in that type. |
842 | /// |
843 | /// ``` |
844 | /// #![feature(f128)] |
845 | /// # // FIXME(f16_f128): remove when `float*itf` is available |
846 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
847 | /// |
848 | /// let value = 4.6_f128; |
849 | /// let rounded = unsafe { value.to_int_unchecked::<u16>() }; |
850 | /// assert_eq!(rounded, 4); |
851 | /// |
852 | /// let value = -128.9_f128; |
853 | /// let rounded = unsafe { value.to_int_unchecked::<i8>() }; |
854 | /// assert_eq!(rounded, i8::MIN); |
855 | /// # } |
856 | /// ``` |
857 | /// |
858 | /// # Safety |
859 | /// |
860 | /// The value must: |
861 | /// |
862 | /// * Not be `NaN` |
863 | /// * Not be infinite |
864 | /// * Be representable in the return type `Int`, after truncating off its fractional part |
865 | #[inline ] |
866 | #[unstable (feature = "f128" , issue = "116909" )] |
867 | #[must_use = "this returns the result of the operation, without modifying the original" ] |
868 | pub unsafe fn to_int_unchecked<Int>(self) -> Int |
869 | where |
870 | Self: FloatToInt<Int>, |
871 | { |
872 | // SAFETY: the caller must uphold the safety contract for |
873 | // `FloatToInt::to_int_unchecked`. |
874 | unsafe { FloatToInt::<Int>::to_int_unchecked(self) } |
875 | } |
876 | |
877 | /// Raw transmutation to `u128`. |
878 | /// |
879 | /// This is currently identical to `transmute::<f128, u128>(self)` on all platforms. |
880 | /// |
881 | /// See [`from_bits`](#method.from_bits) for some discussion of the |
882 | /// portability of this operation (there are almost no issues). |
883 | /// |
884 | /// Note that this function is distinct from `as` casting, which attempts to |
885 | /// preserve the *numeric* value, and not the bitwise value. |
886 | /// |
887 | /// ``` |
888 | /// #![feature(f128)] |
889 | /// |
890 | /// # // FIXME(f16_f128): enable this once const casting works |
891 | /// # // assert_ne!((1f128).to_bits(), 1f128 as u128); // to_bits() is not casting! |
892 | /// assert_eq!((12.5f128).to_bits(), 0x40029000000000000000000000000000); |
893 | /// ``` |
894 | #[inline ] |
895 | #[unstable (feature = "f128" , issue = "116909" )] |
896 | #[must_use = "this returns the result of the operation, without modifying the original" ] |
897 | #[allow (unnecessary_transmutes)] |
898 | pub const fn to_bits(self) -> u128 { |
899 | // SAFETY: `u128` is a plain old datatype so we can always transmute to it. |
900 | unsafe { mem::transmute(self) } |
901 | } |
902 | |
903 | /// Raw transmutation from `u128`. |
904 | /// |
905 | /// This is currently identical to `transmute::<u128, f128>(v)` on all platforms. |
906 | /// It turns out this is incredibly portable, for two reasons: |
907 | /// |
908 | /// * Floats and Ints have the same endianness on all supported platforms. |
909 | /// * IEEE 754 very precisely specifies the bit layout of floats. |
910 | /// |
911 | /// However there is one caveat: prior to the 2008 version of IEEE 754, how |
912 | /// to interpret the NaN signaling bit wasn't actually specified. Most platforms |
913 | /// (notably x86 and ARM) picked the interpretation that was ultimately |
914 | /// standardized in 2008, but some didn't (notably MIPS). As a result, all |
915 | /// signaling NaNs on MIPS are quiet NaNs on x86, and vice-versa. |
916 | /// |
917 | /// Rather than trying to preserve signaling-ness cross-platform, this |
918 | /// implementation favors preserving the exact bits. This means that |
919 | /// any payloads encoded in NaNs will be preserved even if the result of |
920 | /// this method is sent over the network from an x86 machine to a MIPS one. |
921 | /// |
922 | /// If the results of this method are only manipulated by the same |
923 | /// architecture that produced them, then there is no portability concern. |
924 | /// |
925 | /// If the input isn't NaN, then there is no portability concern. |
926 | /// |
927 | /// If you don't care about signalingness (very likely), then there is no |
928 | /// portability concern. |
929 | /// |
930 | /// Note that this function is distinct from `as` casting, which attempts to |
931 | /// preserve the *numeric* value, and not the bitwise value. |
932 | /// |
933 | /// ``` |
934 | /// #![feature(f128)] |
935 | /// # // FIXME(f16_f128): remove when `eqtf2` is available |
936 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
937 | /// |
938 | /// let v = f128::from_bits(0x40029000000000000000000000000000); |
939 | /// assert_eq!(v, 12.5); |
940 | /// # } |
941 | /// ``` |
942 | #[inline ] |
943 | #[must_use ] |
944 | #[unstable (feature = "f128" , issue = "116909" )] |
945 | #[allow (unnecessary_transmutes)] |
946 | pub const fn from_bits(v: u128) -> Self { |
947 | // It turns out the safety issues with sNaN were overblown! Hooray! |
948 | // SAFETY: `u128` is a plain old datatype so we can always transmute from it. |
949 | unsafe { mem::transmute(v) } |
950 | } |
951 | |
952 | /// Returns the memory representation of this floating point number as a byte array in |
953 | /// big-endian (network) byte order. |
954 | /// |
955 | /// See [`from_bits`](Self::from_bits) for some discussion of the |
956 | /// portability of this operation (there are almost no issues). |
957 | /// |
958 | /// # Examples |
959 | /// |
960 | /// ``` |
961 | /// #![feature(f128)] |
962 | /// |
963 | /// let bytes = 12.5f128.to_be_bytes(); |
964 | /// assert_eq!( |
965 | /// bytes, |
966 | /// [0x40, 0x02, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, |
967 | /// 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] |
968 | /// ); |
969 | /// ``` |
970 | #[inline ] |
971 | #[unstable (feature = "f128" , issue = "116909" )] |
972 | #[must_use = "this returns the result of the operation, without modifying the original" ] |
973 | pub const fn to_be_bytes(self) -> [u8; 16] { |
974 | self.to_bits().to_be_bytes() |
975 | } |
976 | |
977 | /// Returns the memory representation of this floating point number as a byte array in |
978 | /// little-endian byte order. |
979 | /// |
980 | /// See [`from_bits`](Self::from_bits) for some discussion of the |
981 | /// portability of this operation (there are almost no issues). |
982 | /// |
983 | /// # Examples |
984 | /// |
985 | /// ``` |
986 | /// #![feature(f128)] |
987 | /// |
988 | /// let bytes = 12.5f128.to_le_bytes(); |
989 | /// assert_eq!( |
990 | /// bytes, |
991 | /// [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
992 | /// 0x00, 0x00, 0x00, 0x00, 0x00, 0x90, 0x02, 0x40] |
993 | /// ); |
994 | /// ``` |
995 | #[inline ] |
996 | #[unstable (feature = "f128" , issue = "116909" )] |
997 | #[must_use = "this returns the result of the operation, without modifying the original" ] |
998 | pub const fn to_le_bytes(self) -> [u8; 16] { |
999 | self.to_bits().to_le_bytes() |
1000 | } |
1001 | |
1002 | /// Returns the memory representation of this floating point number as a byte array in |
1003 | /// native byte order. |
1004 | /// |
1005 | /// As the target platform's native endianness is used, portable code |
1006 | /// should use [`to_be_bytes`] or [`to_le_bytes`], as appropriate, instead. |
1007 | /// |
1008 | /// [`to_be_bytes`]: f128::to_be_bytes |
1009 | /// [`to_le_bytes`]: f128::to_le_bytes |
1010 | /// |
1011 | /// See [`from_bits`](Self::from_bits) for some discussion of the |
1012 | /// portability of this operation (there are almost no issues). |
1013 | /// |
1014 | /// # Examples |
1015 | /// |
1016 | /// ``` |
1017 | /// #![feature(f128)] |
1018 | /// |
1019 | /// let bytes = 12.5f128.to_ne_bytes(); |
1020 | /// assert_eq!( |
1021 | /// bytes, |
1022 | /// if cfg!(target_endian = "big" ) { |
1023 | /// [0x40, 0x02, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, |
1024 | /// 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] |
1025 | /// } else { |
1026 | /// [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
1027 | /// 0x00, 0x00, 0x00, 0x00, 0x00, 0x90, 0x02, 0x40] |
1028 | /// } |
1029 | /// ); |
1030 | /// ``` |
1031 | #[inline ] |
1032 | #[unstable (feature = "f128" , issue = "116909" )] |
1033 | #[must_use = "this returns the result of the operation, without modifying the original" ] |
1034 | pub const fn to_ne_bytes(self) -> [u8; 16] { |
1035 | self.to_bits().to_ne_bytes() |
1036 | } |
1037 | |
1038 | /// Creates a floating point value from its representation as a byte array in big endian. |
1039 | /// |
1040 | /// See [`from_bits`](Self::from_bits) for some discussion of the |
1041 | /// portability of this operation (there are almost no issues). |
1042 | /// |
1043 | /// # Examples |
1044 | /// |
1045 | /// ``` |
1046 | /// #![feature(f128)] |
1047 | /// # // FIXME(f16_f128): remove when `eqtf2` is available |
1048 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
1049 | /// |
1050 | /// let value = f128::from_be_bytes( |
1051 | /// [0x40, 0x02, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, |
1052 | /// 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] |
1053 | /// ); |
1054 | /// assert_eq!(value, 12.5); |
1055 | /// # } |
1056 | /// ``` |
1057 | #[inline ] |
1058 | #[must_use ] |
1059 | #[unstable (feature = "f128" , issue = "116909" )] |
1060 | pub const fn from_be_bytes(bytes: [u8; 16]) -> Self { |
1061 | Self::from_bits(u128::from_be_bytes(bytes)) |
1062 | } |
1063 | |
1064 | /// Creates a floating point value from its representation as a byte array in little endian. |
1065 | /// |
1066 | /// See [`from_bits`](Self::from_bits) for some discussion of the |
1067 | /// portability of this operation (there are almost no issues). |
1068 | /// |
1069 | /// # Examples |
1070 | /// |
1071 | /// ``` |
1072 | /// #![feature(f128)] |
1073 | /// # // FIXME(f16_f128): remove when `eqtf2` is available |
1074 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
1075 | /// |
1076 | /// let value = f128::from_le_bytes( |
1077 | /// [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
1078 | /// 0x00, 0x00, 0x00, 0x00, 0x00, 0x90, 0x02, 0x40] |
1079 | /// ); |
1080 | /// assert_eq!(value, 12.5); |
1081 | /// # } |
1082 | /// ``` |
1083 | #[inline ] |
1084 | #[must_use ] |
1085 | #[unstable (feature = "f128" , issue = "116909" )] |
1086 | pub const fn from_le_bytes(bytes: [u8; 16]) -> Self { |
1087 | Self::from_bits(u128::from_le_bytes(bytes)) |
1088 | } |
1089 | |
1090 | /// Creates a floating point value from its representation as a byte array in native endian. |
1091 | /// |
1092 | /// As the target platform's native endianness is used, portable code |
1093 | /// likely wants to use [`from_be_bytes`] or [`from_le_bytes`], as |
1094 | /// appropriate instead. |
1095 | /// |
1096 | /// [`from_be_bytes`]: f128::from_be_bytes |
1097 | /// [`from_le_bytes`]: f128::from_le_bytes |
1098 | /// |
1099 | /// See [`from_bits`](Self::from_bits) for some discussion of the |
1100 | /// portability of this operation (there are almost no issues). |
1101 | /// |
1102 | /// # Examples |
1103 | /// |
1104 | /// ``` |
1105 | /// #![feature(f128)] |
1106 | /// # // FIXME(f16_f128): remove when `eqtf2` is available |
1107 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
1108 | /// |
1109 | /// let value = f128::from_ne_bytes(if cfg!(target_endian = "big" ) { |
1110 | /// [0x40, 0x02, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, |
1111 | /// 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] |
1112 | /// } else { |
1113 | /// [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
1114 | /// 0x00, 0x00, 0x00, 0x00, 0x00, 0x90, 0x02, 0x40] |
1115 | /// }); |
1116 | /// assert_eq!(value, 12.5); |
1117 | /// # } |
1118 | /// ``` |
1119 | #[inline ] |
1120 | #[must_use ] |
1121 | #[unstable (feature = "f128" , issue = "116909" )] |
1122 | pub const fn from_ne_bytes(bytes: [u8; 16]) -> Self { |
1123 | Self::from_bits(u128::from_ne_bytes(bytes)) |
1124 | } |
1125 | |
1126 | /// Returns the ordering between `self` and `other`. |
1127 | /// |
1128 | /// Unlike the standard partial comparison between floating point numbers, |
1129 | /// this comparison always produces an ordering in accordance to |
1130 | /// the `totalOrder` predicate as defined in the IEEE 754 (2008 revision) |
1131 | /// floating point standard. The values are ordered in the following sequence: |
1132 | /// |
1133 | /// - negative quiet NaN |
1134 | /// - negative signaling NaN |
1135 | /// - negative infinity |
1136 | /// - negative numbers |
1137 | /// - negative subnormal numbers |
1138 | /// - negative zero |
1139 | /// - positive zero |
1140 | /// - positive subnormal numbers |
1141 | /// - positive numbers |
1142 | /// - positive infinity |
1143 | /// - positive signaling NaN |
1144 | /// - positive quiet NaN. |
1145 | /// |
1146 | /// The ordering established by this function does not always agree with the |
1147 | /// [`PartialOrd`] and [`PartialEq`] implementations of `f128`. For example, |
1148 | /// they consider negative and positive zero equal, while `total_cmp` |
1149 | /// doesn't. |
1150 | /// |
1151 | /// The interpretation of the signaling NaN bit follows the definition in |
1152 | /// the IEEE 754 standard, which may not match the interpretation by some of |
1153 | /// the older, non-conformant (e.g. MIPS) hardware implementations. |
1154 | /// |
1155 | /// # Example |
1156 | /// |
1157 | /// ``` |
1158 | /// #![feature(f128)] |
1159 | /// |
1160 | /// struct GoodBoy { |
1161 | /// name: &'static str, |
1162 | /// weight: f128, |
1163 | /// } |
1164 | /// |
1165 | /// let mut bois = vec![ |
1166 | /// GoodBoy { name: "Pucci" , weight: 0.1 }, |
1167 | /// GoodBoy { name: "Woofer" , weight: 99.0 }, |
1168 | /// GoodBoy { name: "Yapper" , weight: 10.0 }, |
1169 | /// GoodBoy { name: "Chonk" , weight: f128::INFINITY }, |
1170 | /// GoodBoy { name: "Abs. Unit" , weight: f128::NAN }, |
1171 | /// GoodBoy { name: "Floaty" , weight: -5.0 }, |
1172 | /// ]; |
1173 | /// |
1174 | /// bois.sort_by(|a, b| a.weight.total_cmp(&b.weight)); |
1175 | /// |
1176 | /// // `f128::NAN` could be positive or negative, which will affect the sort order. |
1177 | /// if f128::NAN.is_sign_negative() { |
1178 | /// bois.into_iter().map(|b| b.weight) |
1179 | /// .zip([f128::NAN, -5.0, 0.1, 10.0, 99.0, f128::INFINITY].iter()) |
1180 | /// .for_each(|(a, b)| assert_eq!(a.to_bits(), b.to_bits())) |
1181 | /// } else { |
1182 | /// bois.into_iter().map(|b| b.weight) |
1183 | /// .zip([-5.0, 0.1, 10.0, 99.0, f128::INFINITY, f128::NAN].iter()) |
1184 | /// .for_each(|(a, b)| assert_eq!(a.to_bits(), b.to_bits())) |
1185 | /// } |
1186 | /// ``` |
1187 | #[inline ] |
1188 | #[must_use ] |
1189 | #[unstable (feature = "f128" , issue = "116909" )] |
1190 | pub fn total_cmp(&self, other: &Self) -> crate::cmp::Ordering { |
1191 | let mut left = self.to_bits() as i128; |
1192 | let mut right = other.to_bits() as i128; |
1193 | |
1194 | // In case of negatives, flip all the bits except the sign |
1195 | // to achieve a similar layout as two's complement integers |
1196 | // |
1197 | // Why does this work? IEEE 754 floats consist of three fields: |
1198 | // Sign bit, exponent and mantissa. The set of exponent and mantissa |
1199 | // fields as a whole have the property that their bitwise order is |
1200 | // equal to the numeric magnitude where the magnitude is defined. |
1201 | // The magnitude is not normally defined on NaN values, but |
1202 | // IEEE 754 totalOrder defines the NaN values also to follow the |
1203 | // bitwise order. This leads to order explained in the doc comment. |
1204 | // However, the representation of magnitude is the same for negative |
1205 | // and positive numbers – only the sign bit is different. |
1206 | // To easily compare the floats as signed integers, we need to |
1207 | // flip the exponent and mantissa bits in case of negative numbers. |
1208 | // We effectively convert the numbers to "two's complement" form. |
1209 | // |
1210 | // To do the flipping, we construct a mask and XOR against it. |
1211 | // We branchlessly calculate an "all-ones except for the sign bit" |
1212 | // mask from negative-signed values: right shifting sign-extends |
1213 | // the integer, so we "fill" the mask with sign bits, and then |
1214 | // convert to unsigned to push one more zero bit. |
1215 | // On positive values, the mask is all zeros, so it's a no-op. |
1216 | left ^= (((left >> 127) as u128) >> 1) as i128; |
1217 | right ^= (((right >> 127) as u128) >> 1) as i128; |
1218 | |
1219 | left.cmp(&right) |
1220 | } |
1221 | |
1222 | /// Restrict a value to a certain interval unless it is NaN. |
1223 | /// |
1224 | /// Returns `max` if `self` is greater than `max`, and `min` if `self` is |
1225 | /// less than `min`. Otherwise this returns `self`. |
1226 | /// |
1227 | /// Note that this function returns NaN if the initial value was NaN as |
1228 | /// well. |
1229 | /// |
1230 | /// # Panics |
1231 | /// |
1232 | /// Panics if `min > max`, `min` is NaN, or `max` is NaN. |
1233 | /// |
1234 | /// # Examples |
1235 | /// |
1236 | /// ``` |
1237 | /// #![feature(f128)] |
1238 | /// # // FIXME(f16_f128): remove when `{eq,gt,unord}tf` are available |
1239 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
1240 | /// |
1241 | /// assert!((-3.0f128).clamp(-2.0, 1.0) == -2.0); |
1242 | /// assert!((0.0f128).clamp(-2.0, 1.0) == 0.0); |
1243 | /// assert!((2.0f128).clamp(-2.0, 1.0) == 1.0); |
1244 | /// assert!((f128::NAN).clamp(-2.0, 1.0).is_nan()); |
1245 | /// # } |
1246 | /// ``` |
1247 | #[inline ] |
1248 | #[unstable (feature = "f128" , issue = "116909" )] |
1249 | #[must_use = "method returns a new number and does not mutate the original value" ] |
1250 | pub const fn clamp(mut self, min: f128, max: f128) -> f128 { |
1251 | const_assert!( |
1252 | min <= max, |
1253 | "min > max, or either was NaN" , |
1254 | "min > max, or either was NaN. min = {min:?}, max = {max:?}" , |
1255 | min: f128, |
1256 | max: f128, |
1257 | ); |
1258 | |
1259 | if self < min { |
1260 | self = min; |
1261 | } |
1262 | if self > max { |
1263 | self = max; |
1264 | } |
1265 | self |
1266 | } |
1267 | |
1268 | /// Computes the absolute value of `self`. |
1269 | /// |
1270 | /// This function always returns the precise result. |
1271 | /// |
1272 | /// # Examples |
1273 | /// |
1274 | /// ``` |
1275 | /// #![feature(f128)] |
1276 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
1277 | /// |
1278 | /// let x = 3.5_f128; |
1279 | /// let y = -3.5_f128; |
1280 | /// |
1281 | /// assert_eq!(x.abs(), x); |
1282 | /// assert_eq!(y.abs(), -y); |
1283 | /// |
1284 | /// assert!(f128::NAN.abs().is_nan()); |
1285 | /// # } |
1286 | /// ``` |
1287 | #[inline ] |
1288 | #[unstable (feature = "f128" , issue = "116909" )] |
1289 | #[rustc_const_unstable (feature = "f128" , issue = "116909" )] |
1290 | #[must_use = "method returns a new number and does not mutate the original value" ] |
1291 | pub const fn abs(self) -> Self { |
1292 | // FIXME(f16_f128): replace with `intrinsics::fabsf128` when available |
1293 | // We don't do this now because LLVM has lowering bugs for f128 math. |
1294 | Self::from_bits(self.to_bits() & !(1 << 127)) |
1295 | } |
1296 | |
1297 | /// Returns a number that represents the sign of `self`. |
1298 | /// |
1299 | /// - `1.0` if the number is positive, `+0.0` or `INFINITY` |
1300 | /// - `-1.0` if the number is negative, `-0.0` or `NEG_INFINITY` |
1301 | /// - NaN if the number is NaN |
1302 | /// |
1303 | /// # Examples |
1304 | /// |
1305 | /// ``` |
1306 | /// #![feature(f128)] |
1307 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
1308 | /// |
1309 | /// let f = 3.5_f128; |
1310 | /// |
1311 | /// assert_eq!(f.signum(), 1.0); |
1312 | /// assert_eq!(f128::NEG_INFINITY.signum(), -1.0); |
1313 | /// |
1314 | /// assert!(f128::NAN.signum().is_nan()); |
1315 | /// # } |
1316 | /// ``` |
1317 | #[inline ] |
1318 | #[unstable (feature = "f128" , issue = "116909" )] |
1319 | #[rustc_const_unstable (feature = "f128" , issue = "116909" )] |
1320 | #[must_use = "method returns a new number and does not mutate the original value" ] |
1321 | pub const fn signum(self) -> f128 { |
1322 | if self.is_nan() { Self::NAN } else { 1.0_f128.copysign(self) } |
1323 | } |
1324 | |
1325 | /// Returns a number composed of the magnitude of `self` and the sign of |
1326 | /// `sign`. |
1327 | /// |
1328 | /// Equal to `self` if the sign of `self` and `sign` are the same, otherwise equal to `-self`. |
1329 | /// If `self` is a NaN, then a NaN with the same payload as `self` and the sign bit of `sign` is |
1330 | /// returned. |
1331 | /// |
1332 | /// If `sign` is a NaN, then this operation will still carry over its sign into the result. Note |
1333 | /// that IEEE 754 doesn't assign any meaning to the sign bit in case of a NaN, and as Rust |
1334 | /// doesn't guarantee that the bit pattern of NaNs are conserved over arithmetic operations, the |
1335 | /// result of `copysign` with `sign` being a NaN might produce an unexpected or non-portable |
1336 | /// result. See the [specification of NaN bit patterns](primitive@f32#nan-bit-patterns) for more |
1337 | /// info. |
1338 | /// |
1339 | /// # Examples |
1340 | /// |
1341 | /// ``` |
1342 | /// #![feature(f128)] |
1343 | /// # #[cfg (all(target_arch = "x86_64" , target_os = "linux" ))] { |
1344 | /// |
1345 | /// let f = 3.5_f128; |
1346 | /// |
1347 | /// assert_eq!(f.copysign(0.42), 3.5_f128); |
1348 | /// assert_eq!(f.copysign(-0.42), -3.5_f128); |
1349 | /// assert_eq!((-f).copysign(0.42), 3.5_f128); |
1350 | /// assert_eq!((-f).copysign(-0.42), -3.5_f128); |
1351 | /// |
1352 | /// assert!(f128::NAN.copysign(1.0).is_nan()); |
1353 | /// # } |
1354 | /// ``` |
1355 | #[inline ] |
1356 | #[unstable (feature = "f128" , issue = "116909" )] |
1357 | #[rustc_const_unstable (feature = "f128" , issue = "116909" )] |
1358 | #[must_use = "method returns a new number and does not mutate the original value" ] |
1359 | pub const fn copysign(self, sign: f128) -> f128 { |
1360 | // SAFETY: this is actually a safe intrinsic |
1361 | unsafe { intrinsics::copysignf128(self, sign) } |
1362 | } |
1363 | |
1364 | /// Float addition that allows optimizations based on algebraic rules. |
1365 | /// |
1366 | /// See [algebraic operators](primitive@f32#algebraic-operators) for more info. |
1367 | #[must_use = "method returns a new number and does not mutate the original value" ] |
1368 | #[unstable (feature = "float_algebraic" , issue = "136469" )] |
1369 | #[rustc_const_unstable (feature = "float_algebraic" , issue = "136469" )] |
1370 | #[inline ] |
1371 | pub const fn algebraic_add(self, rhs: f128) -> f128 { |
1372 | intrinsics::fadd_algebraic(self, rhs) |
1373 | } |
1374 | |
1375 | /// Float subtraction that allows optimizations based on algebraic rules. |
1376 | /// |
1377 | /// See [algebraic operators](primitive@f32#algebraic-operators) for more info. |
1378 | #[must_use = "method returns a new number and does not mutate the original value" ] |
1379 | #[unstable (feature = "float_algebraic" , issue = "136469" )] |
1380 | #[rustc_const_unstable (feature = "float_algebraic" , issue = "136469" )] |
1381 | #[inline ] |
1382 | pub const fn algebraic_sub(self, rhs: f128) -> f128 { |
1383 | intrinsics::fsub_algebraic(self, rhs) |
1384 | } |
1385 | |
1386 | /// Float multiplication that allows optimizations based on algebraic rules. |
1387 | /// |
1388 | /// See [algebraic operators](primitive@f32#algebraic-operators) for more info. |
1389 | #[must_use = "method returns a new number and does not mutate the original value" ] |
1390 | #[unstable (feature = "float_algebraic" , issue = "136469" )] |
1391 | #[rustc_const_unstable (feature = "float_algebraic" , issue = "136469" )] |
1392 | #[inline ] |
1393 | pub const fn algebraic_mul(self, rhs: f128) -> f128 { |
1394 | intrinsics::fmul_algebraic(self, rhs) |
1395 | } |
1396 | |
1397 | /// Float division that allows optimizations based on algebraic rules. |
1398 | /// |
1399 | /// See [algebraic operators](primitive@f32#algebraic-operators) for more info. |
1400 | #[must_use = "method returns a new number and does not mutate the original value" ] |
1401 | #[unstable (feature = "float_algebraic" , issue = "136469" )] |
1402 | #[rustc_const_unstable (feature = "float_algebraic" , issue = "136469" )] |
1403 | #[inline ] |
1404 | pub const fn algebraic_div(self, rhs: f128) -> f128 { |
1405 | intrinsics::fdiv_algebraic(self, rhs) |
1406 | } |
1407 | |
1408 | /// Float remainder that allows optimizations based on algebraic rules. |
1409 | /// |
1410 | /// See [algebraic operators](primitive@f32#algebraic-operators) for more info. |
1411 | #[must_use = "method returns a new number and does not mutate the original value" ] |
1412 | #[unstable (feature = "float_algebraic" , issue = "136469" )] |
1413 | #[rustc_const_unstable (feature = "float_algebraic" , issue = "136469" )] |
1414 | #[inline ] |
1415 | pub const fn algebraic_rem(self, rhs: f128) -> f128 { |
1416 | intrinsics::frem_algebraic(self, rhs) |
1417 | } |
1418 | } |
1419 | |
1420 | // Functions in this module fall into `core_float_math` |
1421 | // FIXME(f16_f128): all doctests must be gated to platforms that have `long double` === `_Float128` |
1422 | // due to https://github.com/llvm/llvm-project/issues/44744. aarch64 linux matches this. |
1423 | // #[unstable(feature = "core_float_math", issue = "137578")] |
1424 | #[cfg (not(test))] |
1425 | #[doc (test(attr(feature(cfg_target_has_reliable_f16_f128), expect(internal_features))))] |
1426 | impl f128 { |
1427 | /// Returns the largest integer less than or equal to `self`. |
1428 | /// |
1429 | /// This function always returns the precise result. |
1430 | /// |
1431 | /// # Examples |
1432 | /// |
1433 | /// ``` |
1434 | /// #![feature(f128)] |
1435 | /// # #[cfg (not(miri))] |
1436 | /// # #[cfg (target_has_reliable_f128_math)] { |
1437 | /// |
1438 | /// let f = 3.7_f128; |
1439 | /// let g = 3.0_f128; |
1440 | /// let h = -3.7_f128; |
1441 | /// |
1442 | /// assert_eq!(f.floor(), 3.0); |
1443 | /// assert_eq!(g.floor(), 3.0); |
1444 | /// assert_eq!(h.floor(), -4.0); |
1445 | /// # } |
1446 | /// ``` |
1447 | #[inline ] |
1448 | #[rustc_allow_incoherent_impl ] |
1449 | #[unstable (feature = "f128" , issue = "116909" )] |
1450 | #[rustc_const_unstable (feature = "f128" , issue = "116909" )] |
1451 | // #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] |
1452 | #[must_use = "method returns a new number and does not mutate the original value" ] |
1453 | pub const fn floor(self) -> f128 { |
1454 | // SAFETY: intrinsic with no preconditions |
1455 | unsafe { intrinsics::floorf128(self) } |
1456 | } |
1457 | |
1458 | /// Returns the smallest integer greater than or equal to `self`. |
1459 | /// |
1460 | /// This function always returns the precise result. |
1461 | /// |
1462 | /// # Examples |
1463 | /// |
1464 | /// ``` |
1465 | /// #![feature(f128)] |
1466 | /// # #[cfg (not(miri))] |
1467 | /// # #[cfg (target_has_reliable_f128_math)] { |
1468 | /// |
1469 | /// let f = 3.01_f128; |
1470 | /// let g = 4.0_f128; |
1471 | /// |
1472 | /// assert_eq!(f.ceil(), 4.0); |
1473 | /// assert_eq!(g.ceil(), 4.0); |
1474 | /// # } |
1475 | /// ``` |
1476 | #[inline ] |
1477 | #[doc (alias = "ceiling" )] |
1478 | #[rustc_allow_incoherent_impl ] |
1479 | #[unstable (feature = "f128" , issue = "116909" )] |
1480 | #[rustc_const_unstable (feature = "f128" , issue = "116909" )] |
1481 | // #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] |
1482 | #[must_use = "method returns a new number and does not mutate the original value" ] |
1483 | pub const fn ceil(self) -> f128 { |
1484 | // SAFETY: intrinsic with no preconditions |
1485 | unsafe { intrinsics::ceilf128(self) } |
1486 | } |
1487 | |
1488 | /// Returns the nearest integer to `self`. If a value is half-way between two |
1489 | /// integers, round away from `0.0`. |
1490 | /// |
1491 | /// This function always returns the precise result. |
1492 | /// |
1493 | /// # Examples |
1494 | /// |
1495 | /// ``` |
1496 | /// #![feature(f128)] |
1497 | /// # #[cfg (not(miri))] |
1498 | /// # #[cfg (target_has_reliable_f128_math)] { |
1499 | /// |
1500 | /// let f = 3.3_f128; |
1501 | /// let g = -3.3_f128; |
1502 | /// let h = -3.7_f128; |
1503 | /// let i = 3.5_f128; |
1504 | /// let j = 4.5_f128; |
1505 | /// |
1506 | /// assert_eq!(f.round(), 3.0); |
1507 | /// assert_eq!(g.round(), -3.0); |
1508 | /// assert_eq!(h.round(), -4.0); |
1509 | /// assert_eq!(i.round(), 4.0); |
1510 | /// assert_eq!(j.round(), 5.0); |
1511 | /// # } |
1512 | /// ``` |
1513 | #[inline ] |
1514 | #[rustc_allow_incoherent_impl ] |
1515 | #[unstable (feature = "f128" , issue = "116909" )] |
1516 | #[rustc_const_unstable (feature = "f128" , issue = "116909" )] |
1517 | // #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] |
1518 | #[must_use = "method returns a new number and does not mutate the original value" ] |
1519 | pub const fn round(self) -> f128 { |
1520 | // SAFETY: intrinsic with no preconditions |
1521 | unsafe { intrinsics::roundf128(self) } |
1522 | } |
1523 | |
1524 | /// Returns the nearest integer to a number. Rounds half-way cases to the number |
1525 | /// with an even least significant digit. |
1526 | /// |
1527 | /// This function always returns the precise result. |
1528 | /// |
1529 | /// # Examples |
1530 | /// |
1531 | /// ``` |
1532 | /// #![feature(f128)] |
1533 | /// # #[cfg (not(miri))] |
1534 | /// # #[cfg (target_has_reliable_f128_math)] { |
1535 | /// |
1536 | /// let f = 3.3_f128; |
1537 | /// let g = -3.3_f128; |
1538 | /// let h = 3.5_f128; |
1539 | /// let i = 4.5_f128; |
1540 | /// |
1541 | /// assert_eq!(f.round_ties_even(), 3.0); |
1542 | /// assert_eq!(g.round_ties_even(), -3.0); |
1543 | /// assert_eq!(h.round_ties_even(), 4.0); |
1544 | /// assert_eq!(i.round_ties_even(), 4.0); |
1545 | /// # } |
1546 | /// ``` |
1547 | #[inline ] |
1548 | #[rustc_allow_incoherent_impl ] |
1549 | #[unstable (feature = "f128" , issue = "116909" )] |
1550 | #[rustc_const_unstable (feature = "f128" , issue = "116909" )] |
1551 | // #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] |
1552 | #[must_use = "method returns a new number and does not mutate the original value" ] |
1553 | pub const fn round_ties_even(self) -> f128 { |
1554 | intrinsics::round_ties_even_f128(self) |
1555 | } |
1556 | |
1557 | /// Returns the integer part of `self`. |
1558 | /// This means that non-integer numbers are always truncated towards zero. |
1559 | /// |
1560 | /// This function always returns the precise result. |
1561 | /// |
1562 | /// # Examples |
1563 | /// |
1564 | /// ``` |
1565 | /// #![feature(f128)] |
1566 | /// # #[cfg (not(miri))] |
1567 | /// # #[cfg (target_has_reliable_f128_math)] { |
1568 | /// |
1569 | /// let f = 3.7_f128; |
1570 | /// let g = 3.0_f128; |
1571 | /// let h = -3.7_f128; |
1572 | /// |
1573 | /// assert_eq!(f.trunc(), 3.0); |
1574 | /// assert_eq!(g.trunc(), 3.0); |
1575 | /// assert_eq!(h.trunc(), -3.0); |
1576 | /// # } |
1577 | /// ``` |
1578 | #[inline ] |
1579 | #[doc (alias = "truncate" )] |
1580 | #[rustc_allow_incoherent_impl ] |
1581 | #[unstable (feature = "f128" , issue = "116909" )] |
1582 | #[rustc_const_unstable (feature = "f128" , issue = "116909" )] |
1583 | // #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] |
1584 | #[must_use = "method returns a new number and does not mutate the original value" ] |
1585 | pub const fn trunc(self) -> f128 { |
1586 | // SAFETY: intrinsic with no preconditions |
1587 | unsafe { intrinsics::truncf128(self) } |
1588 | } |
1589 | |
1590 | /// Returns the fractional part of `self`. |
1591 | /// |
1592 | /// This function always returns the precise result. |
1593 | /// |
1594 | /// # Examples |
1595 | /// |
1596 | /// ``` |
1597 | /// #![feature(f128)] |
1598 | /// # #[cfg (not(miri))] |
1599 | /// # #[cfg (target_has_reliable_f128_math)] { |
1600 | /// |
1601 | /// let x = 3.6_f128; |
1602 | /// let y = -3.6_f128; |
1603 | /// let abs_difference_x = (x.fract() - 0.6).abs(); |
1604 | /// let abs_difference_y = (y.fract() - (-0.6)).abs(); |
1605 | /// |
1606 | /// assert!(abs_difference_x <= f128::EPSILON); |
1607 | /// assert!(abs_difference_y <= f128::EPSILON); |
1608 | /// # } |
1609 | /// ``` |
1610 | #[inline ] |
1611 | #[rustc_allow_incoherent_impl ] |
1612 | #[unstable (feature = "f128" , issue = "116909" )] |
1613 | #[rustc_const_unstable (feature = "f128" , issue = "116909" )] |
1614 | // #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] |
1615 | #[must_use = "method returns a new number and does not mutate the original value" ] |
1616 | pub const fn fract(self) -> f128 { |
1617 | self - self.trunc() |
1618 | } |
1619 | |
1620 | /// Fused multiply-add. Computes `(self * a) + b` with only one rounding |
1621 | /// error, yielding a more accurate result than an unfused multiply-add. |
1622 | /// |
1623 | /// Using `mul_add` *may* be more performant than an unfused multiply-add if |
1624 | /// the target architecture has a dedicated `fma` CPU instruction. However, |
1625 | /// this is not always true, and will be heavily dependant on designing |
1626 | /// algorithms with specific target hardware in mind. |
1627 | /// |
1628 | /// # Precision |
1629 | /// |
1630 | /// The result of this operation is guaranteed to be the rounded |
1631 | /// infinite-precision result. It is specified by IEEE 754 as |
1632 | /// `fusedMultiplyAdd` and guaranteed not to change. |
1633 | /// |
1634 | /// # Examples |
1635 | /// |
1636 | /// ``` |
1637 | /// #![feature(f128)] |
1638 | /// # #[cfg (not(miri))] |
1639 | /// # #[cfg (target_has_reliable_f128_math)] { |
1640 | /// |
1641 | /// let m = 10.0_f128; |
1642 | /// let x = 4.0_f128; |
1643 | /// let b = 60.0_f128; |
1644 | /// |
1645 | /// assert_eq!(m.mul_add(x, b), 100.0); |
1646 | /// assert_eq!(m * x + b, 100.0); |
1647 | /// |
1648 | /// let one_plus_eps = 1.0_f128 + f128::EPSILON; |
1649 | /// let one_minus_eps = 1.0_f128 - f128::EPSILON; |
1650 | /// let minus_one = -1.0_f128; |
1651 | /// |
1652 | /// // The exact result (1 + eps) * (1 - eps) = 1 - eps * eps. |
1653 | /// assert_eq!(one_plus_eps.mul_add(one_minus_eps, minus_one), -f128::EPSILON * f128::EPSILON); |
1654 | /// // Different rounding with the non-fused multiply and add. |
1655 | /// assert_eq!(one_plus_eps * one_minus_eps + minus_one, 0.0); |
1656 | /// # } |
1657 | /// ``` |
1658 | #[inline ] |
1659 | #[rustc_allow_incoherent_impl ] |
1660 | #[doc (alias = "fmaf128" , alias = "fusedMultiplyAdd" )] |
1661 | #[unstable (feature = "f128" , issue = "116909" )] |
1662 | #[must_use = "method returns a new number and does not mutate the original value" ] |
1663 | pub fn mul_add(self, a: f128, b: f128) -> f128 { |
1664 | // SAFETY: intrinsic with no preconditions |
1665 | unsafe { intrinsics::fmaf128(self, a, b) } |
1666 | } |
1667 | |
1668 | /// Calculates Euclidean division, the matching method for `rem_euclid`. |
1669 | /// |
1670 | /// This computes the integer `n` such that |
1671 | /// `self = n * rhs + self.rem_euclid(rhs)`. |
1672 | /// In other words, the result is `self / rhs` rounded to the integer `n` |
1673 | /// such that `self >= n * rhs`. |
1674 | /// |
1675 | /// # Precision |
1676 | /// |
1677 | /// The result of this operation is guaranteed to be the rounded |
1678 | /// infinite-precision result. |
1679 | /// |
1680 | /// # Examples |
1681 | /// |
1682 | /// ``` |
1683 | /// #![feature(f128)] |
1684 | /// # #[cfg (not(miri))] |
1685 | /// # #[cfg (target_has_reliable_f128_math)] { |
1686 | /// |
1687 | /// let a: f128 = 7.0; |
1688 | /// let b = 4.0; |
1689 | /// assert_eq!(a.div_euclid(b), 1.0); // 7.0 > 4.0 * 1.0 |
1690 | /// assert_eq!((-a).div_euclid(b), -2.0); // -7.0 >= 4.0 * -2.0 |
1691 | /// assert_eq!(a.div_euclid(-b), -1.0); // 7.0 >= -4.0 * -1.0 |
1692 | /// assert_eq!((-a).div_euclid(-b), 2.0); // -7.0 >= -4.0 * 2.0 |
1693 | /// # } |
1694 | /// ``` |
1695 | #[inline ] |
1696 | #[rustc_allow_incoherent_impl ] |
1697 | #[unstable (feature = "f128" , issue = "116909" )] |
1698 | #[must_use = "method returns a new number and does not mutate the original value" ] |
1699 | pub fn div_euclid(self, rhs: f128) -> f128 { |
1700 | let q = (self / rhs).trunc(); |
1701 | if self % rhs < 0.0 { |
1702 | return if rhs > 0.0 { q - 1.0 } else { q + 1.0 }; |
1703 | } |
1704 | q |
1705 | } |
1706 | |
1707 | /// Calculates the least nonnegative remainder of `self (mod rhs)`. |
1708 | /// |
1709 | /// In particular, the return value `r` satisfies `0.0 <= r < rhs.abs()` in |
1710 | /// most cases. However, due to a floating point round-off error it can |
1711 | /// result in `r == rhs.abs()`, violating the mathematical definition, if |
1712 | /// `self` is much smaller than `rhs.abs()` in magnitude and `self < 0.0`. |
1713 | /// This result is not an element of the function's codomain, but it is the |
1714 | /// closest floating point number in the real numbers and thus fulfills the |
1715 | /// property `self == self.div_euclid(rhs) * rhs + self.rem_euclid(rhs)` |
1716 | /// approximately. |
1717 | /// |
1718 | /// # Precision |
1719 | /// |
1720 | /// The result of this operation is guaranteed to be the rounded |
1721 | /// infinite-precision result. |
1722 | /// |
1723 | /// # Examples |
1724 | /// |
1725 | /// ``` |
1726 | /// #![feature(f128)] |
1727 | /// # #[cfg (not(miri))] |
1728 | /// # #[cfg (target_has_reliable_f128_math)] { |
1729 | /// |
1730 | /// let a: f128 = 7.0; |
1731 | /// let b = 4.0; |
1732 | /// assert_eq!(a.rem_euclid(b), 3.0); |
1733 | /// assert_eq!((-a).rem_euclid(b), 1.0); |
1734 | /// assert_eq!(a.rem_euclid(-b), 3.0); |
1735 | /// assert_eq!((-a).rem_euclid(-b), 1.0); |
1736 | /// // limitation due to round-off error |
1737 | /// assert!((-f128::EPSILON).rem_euclid(3.0) != 0.0); |
1738 | /// # } |
1739 | /// ``` |
1740 | #[inline ] |
1741 | #[rustc_allow_incoherent_impl ] |
1742 | #[doc (alias = "modulo" , alias = "mod" )] |
1743 | #[unstable (feature = "f128" , issue = "116909" )] |
1744 | #[must_use = "method returns a new number and does not mutate the original value" ] |
1745 | pub fn rem_euclid(self, rhs: f128) -> f128 { |
1746 | let r = self % rhs; |
1747 | if r < 0.0 { r + rhs.abs() } else { r } |
1748 | } |
1749 | |
1750 | /// Raises a number to an integer power. |
1751 | /// |
1752 | /// Using this function is generally faster than using `powf`. |
1753 | /// It might have a different sequence of rounding operations than `powf`, |
1754 | /// so the results are not guaranteed to agree. |
1755 | /// |
1756 | /// # Unspecified precision |
1757 | /// |
1758 | /// The precision of this function is non-deterministic. This means it varies by platform, |
1759 | /// Rust version, and can even differ within the same execution from one invocation to the next. |
1760 | /// |
1761 | /// # Examples |
1762 | /// |
1763 | /// ``` |
1764 | /// #![feature(f128)] |
1765 | /// # #[cfg (not(miri))] |
1766 | /// # #[cfg (target_has_reliable_f128_math)] { |
1767 | /// |
1768 | /// let x = 2.0_f128; |
1769 | /// let abs_difference = (x.powi(2) - (x * x)).abs(); |
1770 | /// assert!(abs_difference <= f128::EPSILON); |
1771 | /// |
1772 | /// assert_eq!(f128::powi(f128::NAN, 0), 1.0); |
1773 | /// # } |
1774 | /// ``` |
1775 | #[inline ] |
1776 | #[rustc_allow_incoherent_impl ] |
1777 | #[unstable (feature = "f128" , issue = "116909" )] |
1778 | #[must_use = "method returns a new number and does not mutate the original value" ] |
1779 | pub fn powi(self, n: i32) -> f128 { |
1780 | // SAFETY: intrinsic with no preconditions |
1781 | unsafe { intrinsics::powif128(self, n) } |
1782 | } |
1783 | |
1784 | /// Returns the square root of a number. |
1785 | /// |
1786 | /// Returns NaN if `self` is a negative number other than `-0.0`. |
1787 | /// |
1788 | /// # Precision |
1789 | /// |
1790 | /// The result of this operation is guaranteed to be the rounded |
1791 | /// infinite-precision result. It is specified by IEEE 754 as `squareRoot` |
1792 | /// and guaranteed not to change. |
1793 | /// |
1794 | /// # Examples |
1795 | /// |
1796 | /// ``` |
1797 | /// #![feature(f128)] |
1798 | /// # #[cfg (not(miri))] |
1799 | /// # #[cfg (target_has_reliable_f128_math)] { |
1800 | /// |
1801 | /// let positive = 4.0_f128; |
1802 | /// let negative = -4.0_f128; |
1803 | /// let negative_zero = -0.0_f128; |
1804 | /// |
1805 | /// assert_eq!(positive.sqrt(), 2.0); |
1806 | /// assert!(negative.sqrt().is_nan()); |
1807 | /// assert!(negative_zero.sqrt() == negative_zero); |
1808 | /// # } |
1809 | /// ``` |
1810 | #[inline ] |
1811 | #[doc (alias = "squareRoot" )] |
1812 | #[rustc_allow_incoherent_impl ] |
1813 | #[unstable (feature = "f128" , issue = "116909" )] |
1814 | #[must_use = "method returns a new number and does not mutate the original value" ] |
1815 | pub fn sqrt(self) -> f128 { |
1816 | // SAFETY: intrinsic with no preconditions |
1817 | unsafe { intrinsics::sqrtf128(self) } |
1818 | } |
1819 | } |
1820 | |