1 | // The functions are complex with many branches, and explicit |
2 | // `return`s makes it clear where function exit points are |
3 | #![allow (clippy::needless_return)] |
4 | |
5 | use crate::float::Float; |
6 | use crate::int::{CastInto, DInt, HInt, Int}; |
7 | |
8 | fn div32<F: Float>(a: F, b: F) -> F |
9 | where |
10 | u32: CastInto<F::Int>, |
11 | F::Int: CastInto<u32>, |
12 | i32: CastInto<F::Int>, |
13 | F::Int: CastInto<i32>, |
14 | F::Int: HInt, |
15 | <F as Float>::Int: core::ops::Mul, |
16 | { |
17 | const NUMBER_OF_HALF_ITERATIONS: usize = 0; |
18 | const NUMBER_OF_FULL_ITERATIONS: usize = 3; |
19 | const USE_NATIVE_FULL_ITERATIONS: bool = true; |
20 | |
21 | let one = F::Int::ONE; |
22 | let zero = F::Int::ZERO; |
23 | let hw = F::BITS / 2; |
24 | let lo_mask = u32::MAX >> hw; |
25 | |
26 | let significand_bits = F::SIGNIFICAND_BITS; |
27 | let max_exponent = F::EXPONENT_MAX; |
28 | |
29 | let exponent_bias = F::EXPONENT_BIAS; |
30 | |
31 | let implicit_bit = F::IMPLICIT_BIT; |
32 | let significand_mask = F::SIGNIFICAND_MASK; |
33 | let sign_bit = F::SIGN_MASK as F::Int; |
34 | let abs_mask = sign_bit - one; |
35 | let exponent_mask = F::EXPONENT_MASK; |
36 | let inf_rep = exponent_mask; |
37 | let quiet_bit = implicit_bit >> 1; |
38 | let qnan_rep = exponent_mask | quiet_bit; |
39 | |
40 | #[inline (always)] |
41 | fn negate_u32(a: u32) -> u32 { |
42 | (<i32>::wrapping_neg(a as i32)) as u32 |
43 | } |
44 | |
45 | let a_rep = a.repr(); |
46 | let b_rep = b.repr(); |
47 | |
48 | let a_exponent = (a_rep >> significand_bits) & max_exponent.cast(); |
49 | let b_exponent = (b_rep >> significand_bits) & max_exponent.cast(); |
50 | let quotient_sign = (a_rep ^ b_rep) & sign_bit; |
51 | |
52 | let mut a_significand = a_rep & significand_mask; |
53 | let mut b_significand = b_rep & significand_mask; |
54 | let mut scale = 0; |
55 | |
56 | // Detect if a or b is zero, denormal, infinity, or NaN. |
57 | if a_exponent.wrapping_sub(one) >= (max_exponent - 1).cast() |
58 | || b_exponent.wrapping_sub(one) >= (max_exponent - 1).cast() |
59 | { |
60 | let a_abs = a_rep & abs_mask; |
61 | let b_abs = b_rep & abs_mask; |
62 | |
63 | // NaN / anything = qNaN |
64 | if a_abs > inf_rep { |
65 | return F::from_repr(a_rep | quiet_bit); |
66 | } |
67 | // anything / NaN = qNaN |
68 | if b_abs > inf_rep { |
69 | return F::from_repr(b_rep | quiet_bit); |
70 | } |
71 | |
72 | if a_abs == inf_rep { |
73 | if b_abs == inf_rep { |
74 | // infinity / infinity = NaN |
75 | return F::from_repr(qnan_rep); |
76 | } else { |
77 | // infinity / anything else = +/- infinity |
78 | return F::from_repr(a_abs | quotient_sign); |
79 | } |
80 | } |
81 | |
82 | // anything else / infinity = +/- 0 |
83 | if b_abs == inf_rep { |
84 | return F::from_repr(quotient_sign); |
85 | } |
86 | |
87 | if a_abs == zero { |
88 | if b_abs == zero { |
89 | // zero / zero = NaN |
90 | return F::from_repr(qnan_rep); |
91 | } else { |
92 | // zero / anything else = +/- zero |
93 | return F::from_repr(quotient_sign); |
94 | } |
95 | } |
96 | |
97 | // anything else / zero = +/- infinity |
98 | if b_abs == zero { |
99 | return F::from_repr(inf_rep | quotient_sign); |
100 | } |
101 | |
102 | // one or both of a or b is denormal, the other (if applicable) is a |
103 | // normal number. Renormalize one or both of a and b, and set scale to |
104 | // include the necessary exponent adjustment. |
105 | if a_abs < implicit_bit { |
106 | let (exponent, significand) = F::normalize(a_significand); |
107 | scale += exponent; |
108 | a_significand = significand; |
109 | } |
110 | |
111 | if b_abs < implicit_bit { |
112 | let (exponent, significand) = F::normalize(b_significand); |
113 | scale -= exponent; |
114 | b_significand = significand; |
115 | } |
116 | } |
117 | |
118 | // Set the implicit significand bit. If we fell through from the |
119 | // denormal path it was already set by normalize( ), but setting it twice |
120 | // won't hurt anything. |
121 | a_significand |= implicit_bit; |
122 | b_significand |= implicit_bit; |
123 | |
124 | let written_exponent: i32 = CastInto::<u32>::cast( |
125 | a_exponent |
126 | .wrapping_sub(b_exponent) |
127 | .wrapping_add(scale.cast()), |
128 | ) |
129 | .wrapping_add(exponent_bias) as i32; |
130 | let b_uq1 = b_significand << (F::BITS - significand_bits - 1); |
131 | |
132 | // Align the significand of b as a UQ1.(n-1) fixed-point number in the range |
133 | // [1.0, 2.0) and get a UQ0.n approximate reciprocal using a small minimax |
134 | // polynomial approximation: x0 = 3/4 + 1/sqrt(2) - b/2. |
135 | // The max error for this approximation is achieved at endpoints, so |
136 | // abs(x0(b) - 1/b) <= abs(x0(1) - 1/1) = 3/4 - 1/sqrt(2) = 0.04289..., |
137 | // which is about 4.5 bits. |
138 | // The initial approximation is between x0(1.0) = 0.9571... and x0(2.0) = 0.4571... |
139 | |
140 | // Then, refine the reciprocal estimate using a quadratically converging |
141 | // Newton-Raphson iteration: |
142 | // x_{n+1} = x_n * (2 - x_n * b) |
143 | // |
144 | // Let b be the original divisor considered "in infinite precision" and |
145 | // obtained from IEEE754 representation of function argument (with the |
146 | // implicit bit set). Corresponds to rep_t-sized b_UQ1 represented in |
147 | // UQ1.(W-1). |
148 | // |
149 | // Let b_hw be an infinitely precise number obtained from the highest (HW-1) |
150 | // bits of divisor significand (with the implicit bit set). Corresponds to |
151 | // half_rep_t-sized b_UQ1_hw represented in UQ1.(HW-1) that is a **truncated** |
152 | // version of b_UQ1. |
153 | // |
154 | // Let e_n := x_n - 1/b_hw |
155 | // E_n := x_n - 1/b |
156 | // abs(E_n) <= abs(e_n) + (1/b_hw - 1/b) |
157 | // = abs(e_n) + (b - b_hw) / (b*b_hw) |
158 | // <= abs(e_n) + 2 * 2^-HW |
159 | |
160 | // rep_t-sized iterations may be slower than the corresponding half-width |
161 | // variant depending on the handware and whether single/double/quad precision |
162 | // is selected. |
163 | // NB: Using half-width iterations increases computation errors due to |
164 | // rounding, so error estimations have to be computed taking the selected |
165 | // mode into account! |
166 | |
167 | #[allow (clippy::absurd_extreme_comparisons)] |
168 | let mut x_uq0 = if NUMBER_OF_HALF_ITERATIONS > 0 { |
169 | // Starting with (n-1) half-width iterations |
170 | let b_uq1_hw: u16 = |
171 | (CastInto::<u32>::cast(b_significand) >> (significand_bits + 1 - hw)) as u16; |
172 | |
173 | // C is (3/4 + 1/sqrt(2)) - 1 truncated to W0 fractional bits as UQ0.HW |
174 | // with W0 being either 16 or 32 and W0 <= HW. |
175 | // That is, C is the aforementioned 3/4 + 1/sqrt(2) constant (from which |
176 | // b/2 is subtracted to obtain x0) wrapped to [0, 1) range. |
177 | |
178 | // HW is at least 32. Shifting into the highest bits if needed. |
179 | let c_hw = (0x7504_u32 as u16).wrapping_shl(hw.wrapping_sub(32)); |
180 | |
181 | // b >= 1, thus an upper bound for 3/4 + 1/sqrt(2) - b/2 is about 0.9572, |
182 | // so x0 fits to UQ0.HW without wrapping. |
183 | let x_uq0_hw: u16 = { |
184 | let mut x_uq0_hw: u16 = c_hw.wrapping_sub(b_uq1_hw /* exact b_hw/2 as UQ0.HW */); |
185 | // An e_0 error is comprised of errors due to |
186 | // * x0 being an inherently imprecise first approximation of 1/b_hw |
187 | // * C_hw being some (irrational) number **truncated** to W0 bits |
188 | // Please note that e_0 is calculated against the infinitely precise |
189 | // reciprocal of b_hw (that is, **truncated** version of b). |
190 | // |
191 | // e_0 <= 3/4 - 1/sqrt(2) + 2^-W0 |
192 | |
193 | // By construction, 1 <= b < 2 |
194 | // f(x) = x * (2 - b*x) = 2*x - b*x^2 |
195 | // f'(x) = 2 * (1 - b*x) |
196 | // |
197 | // On the [0, 1] interval, f(0) = 0, |
198 | // then it increses until f(1/b) = 1 / b, maximum on (0, 1), |
199 | // then it decreses to f(1) = 2 - b |
200 | // |
201 | // Let g(x) = x - f(x) = b*x^2 - x. |
202 | // On (0, 1/b), g(x) < 0 <=> f(x) > x |
203 | // On (1/b, 1], g(x) > 0 <=> f(x) < x |
204 | // |
205 | // For half-width iterations, b_hw is used instead of b. |
206 | #[allow (clippy::reversed_empty_ranges)] |
207 | for _ in 0..NUMBER_OF_HALF_ITERATIONS { |
208 | // corr_UQ1_hw can be **larger** than 2 - b_hw*x by at most 1*Ulp |
209 | // of corr_UQ1_hw. |
210 | // "0.0 - (...)" is equivalent to "2.0 - (...)" in UQ1.(HW-1). |
211 | // On the other hand, corr_UQ1_hw should not overflow from 2.0 to 0.0 provided |
212 | // no overflow occurred earlier: ((rep_t)x_UQ0_hw * b_UQ1_hw >> HW) is |
213 | // expected to be strictly positive because b_UQ1_hw has its highest bit set |
214 | // and x_UQ0_hw should be rather large (it converges to 1/2 < 1/b_hw <= 1). |
215 | let corr_uq1_hw: u16 = |
216 | 0.wrapping_sub((x_uq0_hw as u32).wrapping_mul(b_uq1_hw.cast()) >> hw) as u16; |
217 | |
218 | // Now, we should multiply UQ0.HW and UQ1.(HW-1) numbers, naturally |
219 | // obtaining an UQ1.(HW-1) number and proving its highest bit could be |
220 | // considered to be 0 to be able to represent it in UQ0.HW. |
221 | // From the above analysis of f(x), if corr_UQ1_hw would be represented |
222 | // without any intermediate loss of precision (that is, in twice_rep_t) |
223 | // x_UQ0_hw could be at most [1.]000... if b_hw is exactly 1.0 and strictly |
224 | // less otherwise. On the other hand, to obtain [1.]000..., one have to pass |
225 | // 1/b_hw == 1.0 to f(x), so this cannot occur at all without overflow (due |
226 | // to 1.0 being not representable as UQ0.HW). |
227 | // The fact corr_UQ1_hw was virtually round up (due to result of |
228 | // multiplication being **first** truncated, then negated - to improve |
229 | // error estimations) can increase x_UQ0_hw by up to 2*Ulp of x_UQ0_hw. |
230 | x_uq0_hw = ((x_uq0_hw as u32).wrapping_mul(corr_uq1_hw as u32) >> (hw - 1)) as u16; |
231 | // Now, either no overflow occurred or x_UQ0_hw is 0 or 1 in its half_rep_t |
232 | // representation. In the latter case, x_UQ0_hw will be either 0 or 1 after |
233 | // any number of iterations, so just subtract 2 from the reciprocal |
234 | // approximation after last iteration. |
235 | |
236 | // In infinite precision, with 0 <= eps1, eps2 <= U = 2^-HW: |
237 | // corr_UQ1_hw = 2 - (1/b_hw + e_n) * b_hw + 2*eps1 |
238 | // = 1 - e_n * b_hw + 2*eps1 |
239 | // x_UQ0_hw = (1/b_hw + e_n) * (1 - e_n*b_hw + 2*eps1) - eps2 |
240 | // = 1/b_hw - e_n + 2*eps1/b_hw + e_n - e_n^2*b_hw + 2*e_n*eps1 - eps2 |
241 | // = 1/b_hw + 2*eps1/b_hw - e_n^2*b_hw + 2*e_n*eps1 - eps2 |
242 | // e_{n+1} = -e_n^2*b_hw + 2*eps1/b_hw + 2*e_n*eps1 - eps2 |
243 | // = 2*e_n*eps1 - (e_n^2*b_hw + eps2) + 2*eps1/b_hw |
244 | // \------ >0 -------/ \-- >0 ---/ |
245 | // abs(e_{n+1}) <= 2*abs(e_n)*U + max(2*e_n^2 + U, 2 * U) |
246 | } |
247 | // For initial half-width iterations, U = 2^-HW |
248 | // Let abs(e_n) <= u_n * U, |
249 | // then abs(e_{n+1}) <= 2 * u_n * U^2 + max(2 * u_n^2 * U^2 + U, 2 * U) |
250 | // u_{n+1} <= 2 * u_n * U + max(2 * u_n^2 * U + 1, 2) |
251 | |
252 | // Account for possible overflow (see above). For an overflow to occur for the |
253 | // first time, for "ideal" corr_UQ1_hw (that is, without intermediate |
254 | // truncation), the result of x_UQ0_hw * corr_UQ1_hw should be either maximum |
255 | // value representable in UQ0.HW or less by 1. This means that 1/b_hw have to |
256 | // be not below that value (see g(x) above), so it is safe to decrement just |
257 | // once after the final iteration. On the other hand, an effective value of |
258 | // divisor changes after this point (from b_hw to b), so adjust here. |
259 | x_uq0_hw.wrapping_sub(1_u16) |
260 | }; |
261 | |
262 | // Error estimations for full-precision iterations are calculated just |
263 | // as above, but with U := 2^-W and taking extra decrementing into account. |
264 | // We need at least one such iteration. |
265 | |
266 | // Simulating operations on a twice_rep_t to perform a single final full-width |
267 | // iteration. Using ad-hoc multiplication implementations to take advantage |
268 | // of particular structure of operands. |
269 | |
270 | let blo: u32 = (CastInto::<u32>::cast(b_uq1)) & lo_mask; |
271 | // x_UQ0 = x_UQ0_hw * 2^HW - 1 |
272 | // x_UQ0 * b_UQ1 = (x_UQ0_hw * 2^HW) * (b_UQ1_hw * 2^HW + blo) - b_UQ1 |
273 | // |
274 | // <--- higher half ---><--- lower half ---> |
275 | // [x_UQ0_hw * b_UQ1_hw] |
276 | // + [ x_UQ0_hw * blo ] |
277 | // - [ b_UQ1 ] |
278 | // = [ result ][.... discarded ...] |
279 | let corr_uq1 = negate_u32( |
280 | (x_uq0_hw as u32) * (b_uq1_hw as u32) + (((x_uq0_hw as u32) * (blo)) >> hw) - 1, |
281 | ); // account for *possible* carry |
282 | let lo_corr = corr_uq1 & lo_mask; |
283 | let hi_corr = corr_uq1 >> hw; |
284 | // x_UQ0 * corr_UQ1 = (x_UQ0_hw * 2^HW) * (hi_corr * 2^HW + lo_corr) - corr_UQ1 |
285 | let mut x_uq0: <F as Float>::Int = ((((x_uq0_hw as u32) * hi_corr) << 1) |
286 | .wrapping_add(((x_uq0_hw as u32) * lo_corr) >> (hw - 1)) |
287 | .wrapping_sub(2)) |
288 | .cast(); // 1 to account for the highest bit of corr_UQ1 can be 1 |
289 | // 1 to account for possible carry |
290 | // Just like the case of half-width iterations but with possibility |
291 | // of overflowing by one extra Ulp of x_UQ0. |
292 | x_uq0 -= one; |
293 | // ... and then traditional fixup by 2 should work |
294 | |
295 | // On error estimation: |
296 | // abs(E_{N-1}) <= (u_{N-1} + 2 /* due to conversion e_n -> E_n */) * 2^-HW |
297 | // + (2^-HW + 2^-W)) |
298 | // abs(E_{N-1}) <= (u_{N-1} + 3.01) * 2^-HW |
299 | |
300 | // Then like for the half-width iterations: |
301 | // With 0 <= eps1, eps2 < 2^-W |
302 | // E_N = 4 * E_{N-1} * eps1 - (E_{N-1}^2 * b + 4 * eps2) + 4 * eps1 / b |
303 | // abs(E_N) <= 2^-W * [ 4 * abs(E_{N-1}) + max(2 * abs(E_{N-1})^2 * 2^W + 4, 8)) ] |
304 | // abs(E_N) <= 2^-W * [ 4 * (u_{N-1} + 3.01) * 2^-HW + max(4 + 2 * (u_{N-1} + 3.01)^2, 8) ] |
305 | x_uq0 |
306 | } else { |
307 | // C is (3/4 + 1/sqrt(2)) - 1 truncated to 32 fractional bits as UQ0.n |
308 | let c: <F as Float>::Int = (0x7504F333 << (F::BITS - 32)).cast(); |
309 | let x_uq0: <F as Float>::Int = c.wrapping_sub(b_uq1); |
310 | // E_0 <= 3/4 - 1/sqrt(2) + 2 * 2^-32 |
311 | x_uq0 |
312 | }; |
313 | |
314 | let mut x_uq0 = if USE_NATIVE_FULL_ITERATIONS { |
315 | for _ in 0..NUMBER_OF_FULL_ITERATIONS { |
316 | let corr_uq1: u32 = 0.wrapping_sub( |
317 | ((CastInto::<u32>::cast(x_uq0) as u64) * (CastInto::<u32>::cast(b_uq1) as u64)) |
318 | >> F::BITS, |
319 | ) as u32; |
320 | x_uq0 = ((((CastInto::<u32>::cast(x_uq0) as u64) * (corr_uq1 as u64)) >> (F::BITS - 1)) |
321 | as u32) |
322 | .cast(); |
323 | } |
324 | x_uq0 |
325 | } else { |
326 | // not using native full iterations |
327 | x_uq0 |
328 | }; |
329 | |
330 | // Finally, account for possible overflow, as explained above. |
331 | x_uq0 = x_uq0.wrapping_sub(2.cast()); |
332 | |
333 | // u_n for different precisions (with N-1 half-width iterations): |
334 | // W0 is the precision of C |
335 | // u_0 = (3/4 - 1/sqrt(2) + 2^-W0) * 2^HW |
336 | |
337 | // Estimated with bc: |
338 | // define half1(un) { return 2.0 * (un + un^2) / 2.0^hw + 1.0; } |
339 | // define half2(un) { return 2.0 * un / 2.0^hw + 2.0; } |
340 | // define full1(un) { return 4.0 * (un + 3.01) / 2.0^hw + 2.0 * (un + 3.01)^2 + 4.0; } |
341 | // define full2(un) { return 4.0 * (un + 3.01) / 2.0^hw + 8.0; } |
342 | |
343 | // | f32 (0 + 3) | f32 (2 + 1) | f64 (3 + 1) | f128 (4 + 1) |
344 | // u_0 | < 184224974 | < 2812.1 | < 184224974 | < 791240234244348797 |
345 | // u_1 | < 15804007 | < 242.7 | < 15804007 | < 67877681371350440 |
346 | // u_2 | < 116308 | < 2.81 | < 116308 | < 499533100252317 |
347 | // u_3 | < 7.31 | | < 7.31 | < 27054456580 |
348 | // u_4 | | | | < 80.4 |
349 | // Final (U_N) | same as u_3 | < 72 | < 218 | < 13920 |
350 | |
351 | // Add 2 to U_N due to final decrement. |
352 | |
353 | let reciprocal_precision: <F as Float>::Int = 10.cast(); |
354 | |
355 | // Suppose 1/b - P * 2^-W < x < 1/b + P * 2^-W |
356 | let x_uq0 = x_uq0 - reciprocal_precision; |
357 | // Now 1/b - (2*P) * 2^-W < x < 1/b |
358 | // FIXME Is x_UQ0 still >= 0.5? |
359 | |
360 | let mut quotient: <F as Float>::Int = x_uq0.widen_mul(a_significand << 1).hi(); |
361 | // Now, a/b - 4*P * 2^-W < q < a/b for q=<quotient_UQ1:dummy> in UQ1.(SB+1+W). |
362 | |
363 | // quotient_UQ1 is in [0.5, 2.0) as UQ1.(SB+1), |
364 | // adjust it to be in [1.0, 2.0) as UQ1.SB. |
365 | let (mut residual, written_exponent) = if quotient < (implicit_bit << 1) { |
366 | // Highest bit is 0, so just reinterpret quotient_UQ1 as UQ1.SB, |
367 | // effectively doubling its value as well as its error estimation. |
368 | let residual_lo = (a_significand << (significand_bits + 1)).wrapping_sub( |
369 | (CastInto::<u32>::cast(quotient).wrapping_mul(CastInto::<u32>::cast(b_significand))) |
370 | .cast(), |
371 | ); |
372 | a_significand <<= 1; |
373 | (residual_lo, written_exponent.wrapping_sub(1)) |
374 | } else { |
375 | // Highest bit is 1 (the UQ1.(SB+1) value is in [1, 2)), convert it |
376 | // to UQ1.SB by right shifting by 1. Least significant bit is omitted. |
377 | quotient >>= 1; |
378 | let residual_lo = (a_significand << significand_bits).wrapping_sub( |
379 | (CastInto::<u32>::cast(quotient).wrapping_mul(CastInto::<u32>::cast(b_significand))) |
380 | .cast(), |
381 | ); |
382 | (residual_lo, written_exponent) |
383 | }; |
384 | |
385 | //drop mutability |
386 | let quotient = quotient; |
387 | |
388 | // NB: residualLo is calculated above for the normal result case. |
389 | // It is re-computed on denormal path that is expected to be not so |
390 | // performance-sensitive. |
391 | |
392 | // Now, q cannot be greater than a/b and can differ by at most 8*P * 2^-W + 2^-SB |
393 | // Each NextAfter() increments the floating point value by at least 2^-SB |
394 | // (more, if exponent was incremented). |
395 | // Different cases (<---> is of 2^-SB length, * = a/b that is shown as a midpoint): |
396 | // q |
397 | // | | * | | | | | |
398 | // <---> 2^t |
399 | // | | | | | * | | |
400 | // q |
401 | // To require at most one NextAfter(), an error should be less than 1.5 * 2^-SB. |
402 | // (8*P) * 2^-W + 2^-SB < 1.5 * 2^-SB |
403 | // (8*P) * 2^-W < 0.5 * 2^-SB |
404 | // P < 2^(W-4-SB) |
405 | // Generally, for at most R NextAfter() to be enough, |
406 | // P < (2*R - 1) * 2^(W-4-SB) |
407 | // For f32 (0+3): 10 < 32 (OK) |
408 | // For f32 (2+1): 32 < 74 < 32 * 3, so two NextAfter() are required |
409 | // For f64: 220 < 256 (OK) |
410 | // For f128: 4096 * 3 < 13922 < 4096 * 5 (three NextAfter() are required) |
411 | |
412 | // If we have overflowed the exponent, return infinity |
413 | if written_exponent >= max_exponent as i32 { |
414 | return F::from_repr(inf_rep | quotient_sign); |
415 | } |
416 | |
417 | // Now, quotient <= the correctly-rounded result |
418 | // and may need taking NextAfter() up to 3 times (see error estimates above) |
419 | // r = a - b * q |
420 | let abs_result = if written_exponent > 0 { |
421 | let mut ret = quotient & significand_mask; |
422 | ret |= ((written_exponent as u32) << significand_bits).cast(); |
423 | residual <<= 1; |
424 | ret |
425 | } else { |
426 | if (significand_bits as i32 + written_exponent) < 0 { |
427 | return F::from_repr(quotient_sign); |
428 | } |
429 | let ret = quotient.wrapping_shr(negate_u32(CastInto::<u32>::cast(written_exponent)) + 1); |
430 | residual = (CastInto::<u32>::cast( |
431 | a_significand.wrapping_shl( |
432 | significand_bits.wrapping_add(CastInto::<u32>::cast(written_exponent)), |
433 | ), |
434 | ) |
435 | .wrapping_sub( |
436 | (CastInto::<u32>::cast(ret).wrapping_mul(CastInto::<u32>::cast(b_significand))) << 1, |
437 | )) |
438 | .cast(); |
439 | ret |
440 | }; |
441 | // Round |
442 | let abs_result = { |
443 | residual += abs_result & one; // tie to even |
444 | // The above line conditionally turns the below LT comparison into LTE |
445 | |
446 | if residual > b_significand { |
447 | abs_result + one |
448 | } else { |
449 | abs_result |
450 | } |
451 | }; |
452 | F::from_repr(abs_result | quotient_sign) |
453 | } |
454 | |
455 | fn div64<F: Float>(a: F, b: F) -> F |
456 | where |
457 | u32: CastInto<F::Int>, |
458 | F::Int: CastInto<u32>, |
459 | i32: CastInto<F::Int>, |
460 | F::Int: CastInto<i32>, |
461 | u64: CastInto<F::Int>, |
462 | F::Int: CastInto<u64>, |
463 | i64: CastInto<F::Int>, |
464 | F::Int: CastInto<i64>, |
465 | F::Int: HInt, |
466 | { |
467 | const NUMBER_OF_HALF_ITERATIONS: usize = 3; |
468 | const NUMBER_OF_FULL_ITERATIONS: usize = 1; |
469 | const USE_NATIVE_FULL_ITERATIONS: bool = false; |
470 | |
471 | let one = F::Int::ONE; |
472 | let zero = F::Int::ZERO; |
473 | let hw = F::BITS / 2; |
474 | let lo_mask = u64::MAX >> hw; |
475 | |
476 | let significand_bits = F::SIGNIFICAND_BITS; |
477 | let max_exponent = F::EXPONENT_MAX; |
478 | |
479 | let exponent_bias = F::EXPONENT_BIAS; |
480 | |
481 | let implicit_bit = F::IMPLICIT_BIT; |
482 | let significand_mask = F::SIGNIFICAND_MASK; |
483 | let sign_bit = F::SIGN_MASK as F::Int; |
484 | let abs_mask = sign_bit - one; |
485 | let exponent_mask = F::EXPONENT_MASK; |
486 | let inf_rep = exponent_mask; |
487 | let quiet_bit = implicit_bit >> 1; |
488 | let qnan_rep = exponent_mask | quiet_bit; |
489 | |
490 | #[inline (always)] |
491 | fn negate_u64(a: u64) -> u64 { |
492 | (<i64>::wrapping_neg(a as i64)) as u64 |
493 | } |
494 | |
495 | let a_rep = a.repr(); |
496 | let b_rep = b.repr(); |
497 | |
498 | let a_exponent = (a_rep >> significand_bits) & max_exponent.cast(); |
499 | let b_exponent = (b_rep >> significand_bits) & max_exponent.cast(); |
500 | let quotient_sign = (a_rep ^ b_rep) & sign_bit; |
501 | |
502 | let mut a_significand = a_rep & significand_mask; |
503 | let mut b_significand = b_rep & significand_mask; |
504 | let mut scale = 0; |
505 | |
506 | // Detect if a or b is zero, denormal, infinity, or NaN. |
507 | if a_exponent.wrapping_sub(one) >= (max_exponent - 1).cast() |
508 | || b_exponent.wrapping_sub(one) >= (max_exponent - 1).cast() |
509 | { |
510 | let a_abs = a_rep & abs_mask; |
511 | let b_abs = b_rep & abs_mask; |
512 | |
513 | // NaN / anything = qNaN |
514 | if a_abs > inf_rep { |
515 | return F::from_repr(a_rep | quiet_bit); |
516 | } |
517 | // anything / NaN = qNaN |
518 | if b_abs > inf_rep { |
519 | return F::from_repr(b_rep | quiet_bit); |
520 | } |
521 | |
522 | if a_abs == inf_rep { |
523 | if b_abs == inf_rep { |
524 | // infinity / infinity = NaN |
525 | return F::from_repr(qnan_rep); |
526 | } else { |
527 | // infinity / anything else = +/- infinity |
528 | return F::from_repr(a_abs | quotient_sign); |
529 | } |
530 | } |
531 | |
532 | // anything else / infinity = +/- 0 |
533 | if b_abs == inf_rep { |
534 | return F::from_repr(quotient_sign); |
535 | } |
536 | |
537 | if a_abs == zero { |
538 | if b_abs == zero { |
539 | // zero / zero = NaN |
540 | return F::from_repr(qnan_rep); |
541 | } else { |
542 | // zero / anything else = +/- zero |
543 | return F::from_repr(quotient_sign); |
544 | } |
545 | } |
546 | |
547 | // anything else / zero = +/- infinity |
548 | if b_abs == zero { |
549 | return F::from_repr(inf_rep | quotient_sign); |
550 | } |
551 | |
552 | // one or both of a or b is denormal, the other (if applicable) is a |
553 | // normal number. Renormalize one or both of a and b, and set scale to |
554 | // include the necessary exponent adjustment. |
555 | if a_abs < implicit_bit { |
556 | let (exponent, significand) = F::normalize(a_significand); |
557 | scale += exponent; |
558 | a_significand = significand; |
559 | } |
560 | |
561 | if b_abs < implicit_bit { |
562 | let (exponent, significand) = F::normalize(b_significand); |
563 | scale -= exponent; |
564 | b_significand = significand; |
565 | } |
566 | } |
567 | |
568 | // Set the implicit significand bit. If we fell through from the |
569 | // denormal path it was already set by normalize( ), but setting it twice |
570 | // won't hurt anything. |
571 | a_significand |= implicit_bit; |
572 | b_significand |= implicit_bit; |
573 | |
574 | let written_exponent: i64 = CastInto::<u64>::cast( |
575 | a_exponent |
576 | .wrapping_sub(b_exponent) |
577 | .wrapping_add(scale.cast()), |
578 | ) |
579 | .wrapping_add(exponent_bias as u64) as i64; |
580 | let b_uq1 = b_significand << (F::BITS - significand_bits - 1); |
581 | |
582 | // Align the significand of b as a UQ1.(n-1) fixed-point number in the range |
583 | // [1.0, 2.0) and get a UQ0.n approximate reciprocal using a small minimax |
584 | // polynomial approximation: x0 = 3/4 + 1/sqrt(2) - b/2. |
585 | // The max error for this approximation is achieved at endpoints, so |
586 | // abs(x0(b) - 1/b) <= abs(x0(1) - 1/1) = 3/4 - 1/sqrt(2) = 0.04289..., |
587 | // which is about 4.5 bits. |
588 | // The initial approximation is between x0(1.0) = 0.9571... and x0(2.0) = 0.4571... |
589 | |
590 | // Then, refine the reciprocal estimate using a quadratically converging |
591 | // Newton-Raphson iteration: |
592 | // x_{n+1} = x_n * (2 - x_n * b) |
593 | // |
594 | // Let b be the original divisor considered "in infinite precision" and |
595 | // obtained from IEEE754 representation of function argument (with the |
596 | // implicit bit set). Corresponds to rep_t-sized b_UQ1 represented in |
597 | // UQ1.(W-1). |
598 | // |
599 | // Let b_hw be an infinitely precise number obtained from the highest (HW-1) |
600 | // bits of divisor significand (with the implicit bit set). Corresponds to |
601 | // half_rep_t-sized b_UQ1_hw represented in UQ1.(HW-1) that is a **truncated** |
602 | // version of b_UQ1. |
603 | // |
604 | // Let e_n := x_n - 1/b_hw |
605 | // E_n := x_n - 1/b |
606 | // abs(E_n) <= abs(e_n) + (1/b_hw - 1/b) |
607 | // = abs(e_n) + (b - b_hw) / (b*b_hw) |
608 | // <= abs(e_n) + 2 * 2^-HW |
609 | |
610 | // rep_t-sized iterations may be slower than the corresponding half-width |
611 | // variant depending on the handware and whether single/double/quad precision |
612 | // is selected. |
613 | // NB: Using half-width iterations increases computation errors due to |
614 | // rounding, so error estimations have to be computed taking the selected |
615 | // mode into account! |
616 | |
617 | let mut x_uq0 = if NUMBER_OF_HALF_ITERATIONS > 0 { |
618 | // Starting with (n-1) half-width iterations |
619 | let b_uq1_hw: u32 = |
620 | (CastInto::<u64>::cast(b_significand) >> (significand_bits + 1 - hw)) as u32; |
621 | |
622 | // C is (3/4 + 1/sqrt(2)) - 1 truncated to W0 fractional bits as UQ0.HW |
623 | // with W0 being either 16 or 32 and W0 <= HW. |
624 | // That is, C is the aforementioned 3/4 + 1/sqrt(2) constant (from which |
625 | // b/2 is subtracted to obtain x0) wrapped to [0, 1) range. |
626 | |
627 | // HW is at least 32. Shifting into the highest bits if needed. |
628 | let c_hw = (0x7504F333_u64 as u32).wrapping_shl(hw.wrapping_sub(32)); |
629 | |
630 | // b >= 1, thus an upper bound for 3/4 + 1/sqrt(2) - b/2 is about 0.9572, |
631 | // so x0 fits to UQ0.HW without wrapping. |
632 | let x_uq0_hw: u32 = { |
633 | let mut x_uq0_hw: u32 = c_hw.wrapping_sub(b_uq1_hw /* exact b_hw/2 as UQ0.HW */); |
634 | // dbg!(x_uq0_hw); |
635 | // An e_0 error is comprised of errors due to |
636 | // * x0 being an inherently imprecise first approximation of 1/b_hw |
637 | // * C_hw being some (irrational) number **truncated** to W0 bits |
638 | // Please note that e_0 is calculated against the infinitely precise |
639 | // reciprocal of b_hw (that is, **truncated** version of b). |
640 | // |
641 | // e_0 <= 3/4 - 1/sqrt(2) + 2^-W0 |
642 | |
643 | // By construction, 1 <= b < 2 |
644 | // f(x) = x * (2 - b*x) = 2*x - b*x^2 |
645 | // f'(x) = 2 * (1 - b*x) |
646 | // |
647 | // On the [0, 1] interval, f(0) = 0, |
648 | // then it increses until f(1/b) = 1 / b, maximum on (0, 1), |
649 | // then it decreses to f(1) = 2 - b |
650 | // |
651 | // Let g(x) = x - f(x) = b*x^2 - x. |
652 | // On (0, 1/b), g(x) < 0 <=> f(x) > x |
653 | // On (1/b, 1], g(x) > 0 <=> f(x) < x |
654 | // |
655 | // For half-width iterations, b_hw is used instead of b. |
656 | for _ in 0..NUMBER_OF_HALF_ITERATIONS { |
657 | // corr_UQ1_hw can be **larger** than 2 - b_hw*x by at most 1*Ulp |
658 | // of corr_UQ1_hw. |
659 | // "0.0 - (...)" is equivalent to "2.0 - (...)" in UQ1.(HW-1). |
660 | // On the other hand, corr_UQ1_hw should not overflow from 2.0 to 0.0 provided |
661 | // no overflow occurred earlier: ((rep_t)x_UQ0_hw * b_UQ1_hw >> HW) is |
662 | // expected to be strictly positive because b_UQ1_hw has its highest bit set |
663 | // and x_UQ0_hw should be rather large (it converges to 1/2 < 1/b_hw <= 1). |
664 | let corr_uq1_hw: u32 = |
665 | 0.wrapping_sub(((x_uq0_hw as u64).wrapping_mul(b_uq1_hw as u64)) >> hw) as u32; |
666 | // dbg!(corr_uq1_hw); |
667 | |
668 | // Now, we should multiply UQ0.HW and UQ1.(HW-1) numbers, naturally |
669 | // obtaining an UQ1.(HW-1) number and proving its highest bit could be |
670 | // considered to be 0 to be able to represent it in UQ0.HW. |
671 | // From the above analysis of f(x), if corr_UQ1_hw would be represented |
672 | // without any intermediate loss of precision (that is, in twice_rep_t) |
673 | // x_UQ0_hw could be at most [1.]000... if b_hw is exactly 1.0 and strictly |
674 | // less otherwise. On the other hand, to obtain [1.]000..., one have to pass |
675 | // 1/b_hw == 1.0 to f(x), so this cannot occur at all without overflow (due |
676 | // to 1.0 being not representable as UQ0.HW). |
677 | // The fact corr_UQ1_hw was virtually round up (due to result of |
678 | // multiplication being **first** truncated, then negated - to improve |
679 | // error estimations) can increase x_UQ0_hw by up to 2*Ulp of x_UQ0_hw. |
680 | x_uq0_hw = ((x_uq0_hw as u64).wrapping_mul(corr_uq1_hw as u64) >> (hw - 1)) as u32; |
681 | // dbg!(x_uq0_hw); |
682 | // Now, either no overflow occurred or x_UQ0_hw is 0 or 1 in its half_rep_t |
683 | // representation. In the latter case, x_UQ0_hw will be either 0 or 1 after |
684 | // any number of iterations, so just subtract 2 from the reciprocal |
685 | // approximation after last iteration. |
686 | |
687 | // In infinite precision, with 0 <= eps1, eps2 <= U = 2^-HW: |
688 | // corr_UQ1_hw = 2 - (1/b_hw + e_n) * b_hw + 2*eps1 |
689 | // = 1 - e_n * b_hw + 2*eps1 |
690 | // x_UQ0_hw = (1/b_hw + e_n) * (1 - e_n*b_hw + 2*eps1) - eps2 |
691 | // = 1/b_hw - e_n + 2*eps1/b_hw + e_n - e_n^2*b_hw + 2*e_n*eps1 - eps2 |
692 | // = 1/b_hw + 2*eps1/b_hw - e_n^2*b_hw + 2*e_n*eps1 - eps2 |
693 | // e_{n+1} = -e_n^2*b_hw + 2*eps1/b_hw + 2*e_n*eps1 - eps2 |
694 | // = 2*e_n*eps1 - (e_n^2*b_hw + eps2) + 2*eps1/b_hw |
695 | // \------ >0 -------/ \-- >0 ---/ |
696 | // abs(e_{n+1}) <= 2*abs(e_n)*U + max(2*e_n^2 + U, 2 * U) |
697 | } |
698 | // For initial half-width iterations, U = 2^-HW |
699 | // Let abs(e_n) <= u_n * U, |
700 | // then abs(e_{n+1}) <= 2 * u_n * U^2 + max(2 * u_n^2 * U^2 + U, 2 * U) |
701 | // u_{n+1} <= 2 * u_n * U + max(2 * u_n^2 * U + 1, 2) |
702 | |
703 | // Account for possible overflow (see above). For an overflow to occur for the |
704 | // first time, for "ideal" corr_UQ1_hw (that is, without intermediate |
705 | // truncation), the result of x_UQ0_hw * corr_UQ1_hw should be either maximum |
706 | // value representable in UQ0.HW or less by 1. This means that 1/b_hw have to |
707 | // be not below that value (see g(x) above), so it is safe to decrement just |
708 | // once after the final iteration. On the other hand, an effective value of |
709 | // divisor changes after this point (from b_hw to b), so adjust here. |
710 | x_uq0_hw.wrapping_sub(1_u32) |
711 | }; |
712 | |
713 | // Error estimations for full-precision iterations are calculated just |
714 | // as above, but with U := 2^-W and taking extra decrementing into account. |
715 | // We need at least one such iteration. |
716 | |
717 | // Simulating operations on a twice_rep_t to perform a single final full-width |
718 | // iteration. Using ad-hoc multiplication implementations to take advantage |
719 | // of particular structure of operands. |
720 | let blo: u64 = (CastInto::<u64>::cast(b_uq1)) & lo_mask; |
721 | // x_UQ0 = x_UQ0_hw * 2^HW - 1 |
722 | // x_UQ0 * b_UQ1 = (x_UQ0_hw * 2^HW) * (b_UQ1_hw * 2^HW + blo) - b_UQ1 |
723 | // |
724 | // <--- higher half ---><--- lower half ---> |
725 | // [x_UQ0_hw * b_UQ1_hw] |
726 | // + [ x_UQ0_hw * blo ] |
727 | // - [ b_UQ1 ] |
728 | // = [ result ][.... discarded ...] |
729 | let corr_uq1 = negate_u64( |
730 | (x_uq0_hw as u64) * (b_uq1_hw as u64) + (((x_uq0_hw as u64) * (blo)) >> hw) - 1, |
731 | ); // account for *possible* carry |
732 | let lo_corr = corr_uq1 & lo_mask; |
733 | let hi_corr = corr_uq1 >> hw; |
734 | // x_UQ0 * corr_UQ1 = (x_UQ0_hw * 2^HW) * (hi_corr * 2^HW + lo_corr) - corr_UQ1 |
735 | let mut x_uq0: <F as Float>::Int = ((((x_uq0_hw as u64) * hi_corr) << 1) |
736 | .wrapping_add(((x_uq0_hw as u64) * lo_corr) >> (hw - 1)) |
737 | .wrapping_sub(2)) |
738 | .cast(); // 1 to account for the highest bit of corr_UQ1 can be 1 |
739 | // 1 to account for possible carry |
740 | // Just like the case of half-width iterations but with possibility |
741 | // of overflowing by one extra Ulp of x_UQ0. |
742 | x_uq0 -= one; |
743 | // ... and then traditional fixup by 2 should work |
744 | |
745 | // On error estimation: |
746 | // abs(E_{N-1}) <= (u_{N-1} + 2 /* due to conversion e_n -> E_n */) * 2^-HW |
747 | // + (2^-HW + 2^-W)) |
748 | // abs(E_{N-1}) <= (u_{N-1} + 3.01) * 2^-HW |
749 | |
750 | // Then like for the half-width iterations: |
751 | // With 0 <= eps1, eps2 < 2^-W |
752 | // E_N = 4 * E_{N-1} * eps1 - (E_{N-1}^2 * b + 4 * eps2) + 4 * eps1 / b |
753 | // abs(E_N) <= 2^-W * [ 4 * abs(E_{N-1}) + max(2 * abs(E_{N-1})^2 * 2^W + 4, 8)) ] |
754 | // abs(E_N) <= 2^-W * [ 4 * (u_{N-1} + 3.01) * 2^-HW + max(4 + 2 * (u_{N-1} + 3.01)^2, 8) ] |
755 | x_uq0 |
756 | } else { |
757 | // C is (3/4 + 1/sqrt(2)) - 1 truncated to 64 fractional bits as UQ0.n |
758 | let c: <F as Float>::Int = (0x7504F333 << (F::BITS - 32)).cast(); |
759 | let x_uq0: <F as Float>::Int = c.wrapping_sub(b_uq1); |
760 | // E_0 <= 3/4 - 1/sqrt(2) + 2 * 2^-64 |
761 | x_uq0 |
762 | }; |
763 | |
764 | let mut x_uq0 = if USE_NATIVE_FULL_ITERATIONS { |
765 | for _ in 0..NUMBER_OF_FULL_ITERATIONS { |
766 | let corr_uq1: u64 = 0.wrapping_sub( |
767 | (CastInto::<u64>::cast(x_uq0) * (CastInto::<u64>::cast(b_uq1))) >> F::BITS, |
768 | ); |
769 | x_uq0 = ((((CastInto::<u64>::cast(x_uq0) as u128) * (corr_uq1 as u128)) |
770 | >> (F::BITS - 1)) as u64) |
771 | .cast(); |
772 | } |
773 | x_uq0 |
774 | } else { |
775 | // not using native full iterations |
776 | x_uq0 |
777 | }; |
778 | |
779 | // Finally, account for possible overflow, as explained above. |
780 | x_uq0 = x_uq0.wrapping_sub(2.cast()); |
781 | |
782 | // u_n for different precisions (with N-1 half-width iterations): |
783 | // W0 is the precision of C |
784 | // u_0 = (3/4 - 1/sqrt(2) + 2^-W0) * 2^HW |
785 | |
786 | // Estimated with bc: |
787 | // define half1(un) { return 2.0 * (un + un^2) / 2.0^hw + 1.0; } |
788 | // define half2(un) { return 2.0 * un / 2.0^hw + 2.0; } |
789 | // define full1(un) { return 4.0 * (un + 3.01) / 2.0^hw + 2.0 * (un + 3.01)^2 + 4.0; } |
790 | // define full2(un) { return 4.0 * (un + 3.01) / 2.0^hw + 8.0; } |
791 | |
792 | // | f32 (0 + 3) | f32 (2 + 1) | f64 (3 + 1) | f128 (4 + 1) |
793 | // u_0 | < 184224974 | < 2812.1 | < 184224974 | < 791240234244348797 |
794 | // u_1 | < 15804007 | < 242.7 | < 15804007 | < 67877681371350440 |
795 | // u_2 | < 116308 | < 2.81 | < 116308 | < 499533100252317 |
796 | // u_3 | < 7.31 | | < 7.31 | < 27054456580 |
797 | // u_4 | | | | < 80.4 |
798 | // Final (U_N) | same as u_3 | < 72 | < 218 | < 13920 |
799 | |
800 | // Add 2 to U_N due to final decrement. |
801 | |
802 | let reciprocal_precision: <F as Float>::Int = 220.cast(); |
803 | |
804 | // Suppose 1/b - P * 2^-W < x < 1/b + P * 2^-W |
805 | let x_uq0 = x_uq0 - reciprocal_precision; |
806 | // Now 1/b - (2*P) * 2^-W < x < 1/b |
807 | // FIXME Is x_UQ0 still >= 0.5? |
808 | |
809 | let mut quotient: <F as Float>::Int = x_uq0.widen_mul(a_significand << 1).hi(); |
810 | // Now, a/b - 4*P * 2^-W < q < a/b for q=<quotient_UQ1:dummy> in UQ1.(SB+1+W). |
811 | |
812 | // quotient_UQ1 is in [0.5, 2.0) as UQ1.(SB+1), |
813 | // adjust it to be in [1.0, 2.0) as UQ1.SB. |
814 | let (mut residual, written_exponent) = if quotient < (implicit_bit << 1) { |
815 | // Highest bit is 0, so just reinterpret quotient_UQ1 as UQ1.SB, |
816 | // effectively doubling its value as well as its error estimation. |
817 | let residual_lo = (a_significand << (significand_bits + 1)).wrapping_sub( |
818 | (CastInto::<u64>::cast(quotient).wrapping_mul(CastInto::<u64>::cast(b_significand))) |
819 | .cast(), |
820 | ); |
821 | a_significand <<= 1; |
822 | (residual_lo, written_exponent.wrapping_sub(1)) |
823 | } else { |
824 | // Highest bit is 1 (the UQ1.(SB+1) value is in [1, 2)), convert it |
825 | // to UQ1.SB by right shifting by 1. Least significant bit is omitted. |
826 | quotient >>= 1; |
827 | let residual_lo = (a_significand << significand_bits).wrapping_sub( |
828 | (CastInto::<u64>::cast(quotient).wrapping_mul(CastInto::<u64>::cast(b_significand))) |
829 | .cast(), |
830 | ); |
831 | (residual_lo, written_exponent) |
832 | }; |
833 | |
834 | //drop mutability |
835 | let quotient = quotient; |
836 | |
837 | // NB: residualLo is calculated above for the normal result case. |
838 | // It is re-computed on denormal path that is expected to be not so |
839 | // performance-sensitive. |
840 | |
841 | // Now, q cannot be greater than a/b and can differ by at most 8*P * 2^-W + 2^-SB |
842 | // Each NextAfter() increments the floating point value by at least 2^-SB |
843 | // (more, if exponent was incremented). |
844 | // Different cases (<---> is of 2^-SB length, * = a/b that is shown as a midpoint): |
845 | // q |
846 | // | | * | | | | | |
847 | // <---> 2^t |
848 | // | | | | | * | | |
849 | // q |
850 | // To require at most one NextAfter(), an error should be less than 1.5 * 2^-SB. |
851 | // (8*P) * 2^-W + 2^-SB < 1.5 * 2^-SB |
852 | // (8*P) * 2^-W < 0.5 * 2^-SB |
853 | // P < 2^(W-4-SB) |
854 | // Generally, for at most R NextAfter() to be enough, |
855 | // P < (2*R - 1) * 2^(W-4-SB) |
856 | // For f32 (0+3): 10 < 32 (OK) |
857 | // For f32 (2+1): 32 < 74 < 32 * 3, so two NextAfter() are required |
858 | // For f64: 220 < 256 (OK) |
859 | // For f128: 4096 * 3 < 13922 < 4096 * 5 (three NextAfter() are required) |
860 | |
861 | // If we have overflowed the exponent, return infinity |
862 | if written_exponent >= max_exponent as i64 { |
863 | return F::from_repr(inf_rep | quotient_sign); |
864 | } |
865 | |
866 | // Now, quotient <= the correctly-rounded result |
867 | // and may need taking NextAfter() up to 3 times (see error estimates above) |
868 | // r = a - b * q |
869 | let abs_result = if written_exponent > 0 { |
870 | let mut ret = quotient & significand_mask; |
871 | ret |= ((written_exponent as u64) << significand_bits).cast(); |
872 | residual <<= 1; |
873 | ret |
874 | } else { |
875 | if (significand_bits as i64 + written_exponent) < 0 { |
876 | return F::from_repr(quotient_sign); |
877 | } |
878 | let ret = |
879 | quotient.wrapping_shr((negate_u64(CastInto::<u64>::cast(written_exponent)) + 1) as u32); |
880 | residual = (CastInto::<u64>::cast( |
881 | a_significand.wrapping_shl( |
882 | significand_bits.wrapping_add(CastInto::<u32>::cast(written_exponent)), |
883 | ), |
884 | ) |
885 | .wrapping_sub( |
886 | (CastInto::<u64>::cast(ret).wrapping_mul(CastInto::<u64>::cast(b_significand))) << 1, |
887 | )) |
888 | .cast(); |
889 | ret |
890 | }; |
891 | // Round |
892 | let abs_result = { |
893 | residual += abs_result & one; // tie to even |
894 | // conditionally turns the below LT comparison into LTE |
895 | if residual > b_significand { |
896 | abs_result + one |
897 | } else { |
898 | abs_result |
899 | } |
900 | }; |
901 | F::from_repr(abs_result | quotient_sign) |
902 | } |
903 | |
904 | intrinsics! { |
905 | #[avr_skip] |
906 | #[arm_aeabi_alias = __aeabi_fdiv] |
907 | pub extern "C" fn __divsf3(a: f32, b: f32) -> f32 { |
908 | div32(a, b) |
909 | } |
910 | |
911 | #[avr_skip] |
912 | #[arm_aeabi_alias = __aeabi_ddiv] |
913 | pub extern "C" fn __divdf3(a: f64, b: f64) -> f64 { |
914 | div64(a, b) |
915 | } |
916 | |
917 | #[cfg (target_arch = "arm" )] |
918 | pub extern "C" fn __divsf3vfp(a: f32, b: f32) -> f32 { |
919 | a / b |
920 | } |
921 | |
922 | #[cfg (target_arch = "arm" )] |
923 | pub extern "C" fn __divdf3vfp(a: f64, b: f64) -> f64 { |
924 | a / b |
925 | } |
926 | } |
927 | |