1//===-- Double-precision e^x - 1 function ---------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "src/math/expm1.h"
10#include "common_constants.h" // Lookup tables EXP_M1 and EXP_M2.
11#include "explogxf.h" // ziv_test_denorm.
12#include "src/__support/CPP/bit.h"
13#include "src/__support/CPP/optional.h"
14#include "src/__support/FPUtil/FEnvImpl.h"
15#include "src/__support/FPUtil/FPBits.h"
16#include "src/__support/FPUtil/PolyEval.h"
17#include "src/__support/FPUtil/double_double.h"
18#include "src/__support/FPUtil/dyadic_float.h"
19#include "src/__support/FPUtil/except_value_utils.h"
20#include "src/__support/FPUtil/multiply_add.h"
21#include "src/__support/FPUtil/nearest_integer.h"
22#include "src/__support/FPUtil/rounding_mode.h"
23#include "src/__support/FPUtil/triple_double.h"
24#include "src/__support/common.h"
25#include "src/__support/integer_literals.h"
26#include "src/__support/macros/optimization.h" // LIBC_UNLIKELY
27
28#include <errno.h>
29
30// #define DEBUGDEBUG
31
32#ifdef DEBUGDEBUG
33#include <iomanip>
34#include <iostream>
35#endif
36
37namespace LIBC_NAMESPACE {
38
39using fputil::DoubleDouble;
40using fputil::TripleDouble;
41using Float128 = typename fputil::DyadicFloat<128>;
42
43using LIBC_NAMESPACE::operator""_u128;
44
45// log2(e)
46constexpr double LOG2_E = 0x1.71547652b82fep+0;
47
48// Error bounds:
49// Errors when using double precision.
50// 0x1.8p-63;
51constexpr uint64_t ERR_D = 0x3c08000000000000;
52// Errors when using double-double precision.
53// 0x1.0p-99
54constexpr uint64_t ERR_DD = 0x39c0000000000000;
55
56// -2^-12 * log(2)
57// > a = -2^-12 * log(2);
58// > b = round(a, 30, RN);
59// > c = round(a - b, 30, RN);
60// > d = round(a - b - c, D, RN);
61// Errors < 1.5 * 2^-133
62constexpr double MLOG_2_EXP2_M12_HI = -0x1.62e42ffp-13;
63constexpr double MLOG_2_EXP2_M12_MID = 0x1.718432a1b0e26p-47;
64constexpr double MLOG_2_EXP2_M12_MID_30 = 0x1.718432ap-47;
65constexpr double MLOG_2_EXP2_M12_LO = 0x1.b0e2633fe0685p-79;
66
67namespace {
68
69// Polynomial approximations with double precision:
70// Return expm1(dx) / x ~ 1 + dx / 2 + dx^2 / 6 + dx^3 / 24.
71// For |dx| < 2^-13 + 2^-30:
72// | output - expm1(dx) / dx | < 2^-51.
73LIBC_INLINE double poly_approx_d(double dx) {
74 // dx^2
75 double dx2 = dx * dx;
76 // c0 = 1 + dx / 2
77 double c0 = fputil::multiply_add(x: dx, y: 0.5, z: 1.0);
78 // c1 = 1/6 + dx / 24
79 double c1 =
80 fputil::multiply_add(x: dx, y: 0x1.5555555555555p-5, z: 0x1.5555555555555p-3);
81 // p = dx^2 * c1 + c0 = 1 + dx / 2 + dx^2 / 6 + dx^3 / 24
82 double p = fputil::multiply_add(x: dx2, y: c1, z: c0);
83 return p;
84}
85
86// Polynomial approximation with double-double precision:
87// Return expm1(dx) / dx ~ 1 + dx / 2 + dx^2 / 6 + ... + dx^6 / 5040
88// For |dx| < 2^-13 + 2^-30:
89// | output - expm1(dx) | < 2^-101
90DoubleDouble poly_approx_dd(const DoubleDouble &dx) {
91 // Taylor polynomial.
92 constexpr DoubleDouble COEFFS[] = {
93 {.lo: 0, .hi: 0x1p0}, // 1
94 {.lo: 0, .hi: 0x1p-1}, // 1/2
95 {.lo: 0x1.5555555555555p-57, .hi: 0x1.5555555555555p-3}, // 1/6
96 {.lo: 0x1.5555555555555p-59, .hi: 0x1.5555555555555p-5}, // 1/24
97 {.lo: 0x1.1111111111111p-63, .hi: 0x1.1111111111111p-7}, // 1/120
98 {.lo: -0x1.f49f49f49f49fp-65, .hi: 0x1.6c16c16c16c17p-10}, // 1/720
99 {.lo: 0x1.a01a01a01a01ap-73, .hi: 0x1.a01a01a01a01ap-13}, // 1/5040
100 };
101
102 DoubleDouble p = fputil::polyeval(x: dx, a0: COEFFS[0], a: COEFFS[1], a: COEFFS[2],
103 a: COEFFS[3], a: COEFFS[4], a: COEFFS[5], a: COEFFS[6]);
104 return p;
105}
106
107// Polynomial approximation with 128-bit precision:
108// Return (exp(dx) - 1)/dx ~ 1 + dx / 2 + dx^2 / 6 + ... + dx^6 / 5040
109// For |dx| < 2^-13 + 2^-30:
110// | output - exp(dx) | < 2^-126.
111Float128 poly_approx_f128(const Float128 &dx) {
112 constexpr Float128 COEFFS_128[]{
113 {Sign::POS, -127, 0x80000000'00000000'00000000'00000000_u128}, // 1.0
114 {Sign::POS, -128, 0x80000000'00000000'00000000'00000000_u128}, // 0.5
115 {Sign::POS, -130, 0xaaaaaaaa'aaaaaaaa'aaaaaaaa'aaaaaaab_u128}, // 1/6
116 {Sign::POS, -132, 0xaaaaaaaa'aaaaaaaa'aaaaaaaa'aaaaaaab_u128}, // 1/24
117 {Sign::POS, -134, 0x88888888'88888888'88888888'88888889_u128}, // 1/120
118 {Sign::POS, -137, 0xb60b60b6'0b60b60b'60b60b60'b60b60b6_u128}, // 1/720
119 {Sign::POS, -140, 0xd00d00d0'0d00d00d'00d00d00'd00d00d0_u128}, // 1/5040
120 };
121
122 Float128 p = fputil::polyeval(x: dx, a0: COEFFS_128[0], a: COEFFS_128[1], a: COEFFS_128[2],
123 a: COEFFS_128[3], a: COEFFS_128[4], a: COEFFS_128[5],
124 a: COEFFS_128[6]);
125 return p;
126}
127
128#ifdef DEBUGDEBUG
129std::ostream &operator<<(std::ostream &OS, const Float128 &r) {
130 OS << (r.sign ? "-(" : "(") << r.mantissa.val[0] << " + " << r.mantissa.val[1]
131 << " * 2^64) * 2^" << r.exponent << "\n";
132 return OS;
133}
134
135std::ostream &operator<<(std::ostream &OS, const DoubleDouble &r) {
136 OS << std::hexfloat << r.hi << " + " << r.lo << std::defaultfloat << "\n";
137 return OS;
138}
139#endif
140
141// Compute exp(x) - 1 using 128-bit precision.
142// TODO(lntue): investigate triple-double precision implementation for this
143// step.
144Float128 expm1_f128(double x, double kd, int idx1, int idx2) {
145 // Recalculate dx:
146
147 double t1 = fputil::multiply_add(x: kd, y: MLOG_2_EXP2_M12_HI, z: x); // exact
148 double t2 = kd * MLOG_2_EXP2_M12_MID_30; // exact
149 double t3 = kd * MLOG_2_EXP2_M12_LO; // Error < 2^-133
150
151 Float128 dx = fputil::quick_add(
152 a: Float128(t1), b: fputil::quick_add(a: Float128(t2), b: Float128(t3)));
153
154 // TODO: Skip recalculating exp_mid1 and exp_mid2.
155 Float128 exp_mid1 =
156 fputil::quick_add(a: Float128(EXP2_MID1[idx1].hi),
157 b: fputil::quick_add(a: Float128(EXP2_MID1[idx1].mid),
158 b: Float128(EXP2_MID1[idx1].lo)));
159
160 Float128 exp_mid2 =
161 fputil::quick_add(a: Float128(EXP2_MID2[idx2].hi),
162 b: fputil::quick_add(a: Float128(EXP2_MID2[idx2].mid),
163 b: Float128(EXP2_MID2[idx2].lo)));
164
165 Float128 exp_mid = fputil::quick_mul(a: exp_mid1, b: exp_mid2);
166
167 int hi = static_cast<int>(kd) >> 12;
168 Float128 minus_one{Sign::NEG, -127 - hi,
169 0x80000000'00000000'00000000'00000000_u128};
170
171 Float128 exp_mid_m1 = fputil::quick_add(a: exp_mid, b: minus_one);
172
173 Float128 p = poly_approx_f128(dx);
174
175 // r = exp_mid * (1 + dx * P) - 1
176 // = (exp_mid - 1) + (dx * exp_mid) * P
177 Float128 r =
178 fputil::multiply_add(a: fputil::quick_mul(a: exp_mid, b: dx), b: p, c: exp_mid_m1);
179
180 r.exponent += hi;
181
182#ifdef DEBUGDEBUG
183 std::cout << "=== VERY SLOW PASS ===\n"
184 << " kd: " << kd << "\n"
185 << " dx: " << dx << "exp_mid_m1: " << exp_mid_m1
186 << " exp_mid: " << exp_mid << " p: " << p
187 << " r: " << r << std::endl;
188#endif
189
190 return r;
191}
192
193// Compute exp(x) - 1 with double-double precision.
194DoubleDouble exp_double_double(double x, double kd, const DoubleDouble &exp_mid,
195 const DoubleDouble &hi_part) {
196 // Recalculate dx:
197 // dx = x - k * 2^-12 * log(2)
198 double t1 = fputil::multiply_add(x: kd, y: MLOG_2_EXP2_M12_HI, z: x); // exact
199 double t2 = kd * MLOG_2_EXP2_M12_MID_30; // exact
200 double t3 = kd * MLOG_2_EXP2_M12_LO; // Error < 2^-130
201
202 DoubleDouble dx = fputil::exact_add(a: t1, b: t2);
203 dx.lo += t3;
204
205 // Degree-6 Taylor polynomial approximation in double-double precision.
206 // | p - exp(x) | < 2^-100.
207 DoubleDouble p = poly_approx_dd(dx);
208
209 // Error bounds: 2^-99.
210 DoubleDouble r =
211 fputil::multiply_add(a: fputil::quick_mult(a: exp_mid, b: dx), b: p, c: hi_part);
212
213#ifdef DEBUGDEBUG
214 std::cout << "=== SLOW PASS ===\n"
215 << " dx: " << dx << " p: " << p << " r: " << r << std::endl;
216#endif
217
218 return r;
219}
220
221// Check for exceptional cases when
222// |x| <= 2^-53 or x < log(2^-54) or x >= 0x1.6232bdd7abcd3p+9
223double set_exceptional(double x) {
224 using FPBits = typename fputil::FPBits<double>;
225 FPBits xbits(x);
226
227 uint64_t x_u = xbits.uintval();
228 uint64_t x_abs = xbits.abs().uintval();
229
230 // |x| <= 2^-53.
231 if (x_abs <= 0x3ca0'0000'0000'0000ULL) {
232 // expm1(x) ~ x.
233
234 if (LIBC_UNLIKELY(x_abs <= 0x0370'0000'0000'0000ULL)) {
235 if (LIBC_UNLIKELY(x_abs == 0))
236 return x;
237 // |x| <= 2^-968, need to scale up a bit before rounding, then scale it
238 // back down.
239 return 0x1.0p-200 * fputil::multiply_add(x, y: 0x1.0p+200, z: 0x1.0p-1022);
240 }
241
242 // 2^-968 < |x| <= 2^-53.
243 return fputil::round_result_slightly_up(value_rn: x);
244 }
245
246 // x < log(2^-54) || x >= 0x1.6232bdd7abcd3p+9 or inf/nan.
247
248 // x < log(2^-54) or -inf/nan
249 if (x_u >= 0xc042'b708'8723'20e2ULL) {
250 // expm1(-Inf) = -1
251 if (xbits.is_inf())
252 return -1.0;
253
254 // exp(nan) = nan
255 if (xbits.is_nan())
256 return x;
257
258 return fputil::round_result_slightly_up(value_rn: -1.0);
259 }
260
261 // x >= round(log(MAX_NORMAL), D, RU) = 0x1.62e42fefa39fp+9 or +inf/nan
262 // x is finite
263 if (x_u < 0x7ff0'0000'0000'0000ULL) {
264 int rounding = fputil::quick_get_round();
265 if (rounding == FE_DOWNWARD || rounding == FE_TOWARDZERO)
266 return FPBits::max_normal().get_val();
267
268 fputil::set_errno_if_required(ERANGE);
269 fputil::raise_except_if_required(FE_OVERFLOW);
270 }
271 // x is +inf or nan
272 return x + FPBits::inf().get_val();
273}
274
275} // namespace
276
277LLVM_LIBC_FUNCTION(double, expm1, (double x)) {
278 using FPBits = typename fputil::FPBits<double>;
279
280 FPBits xbits(x);
281
282 bool x_is_neg = xbits.is_neg();
283 uint64_t x_u = xbits.uintval();
284
285 // Upper bound: max normal number = 2^1023 * (2 - 2^-52)
286 // > round(log (2^1023 ( 2 - 2^-52 )), D, RU) = 0x1.62e42fefa39fp+9
287 // > round(log (2^1023 ( 2 - 2^-52 )), D, RD) = 0x1.62e42fefa39efp+9
288 // > round(log (2^1023 ( 2 - 2^-52 )), D, RN) = 0x1.62e42fefa39efp+9
289 // > round(exp(0x1.62e42fefa39fp+9), D, RN) = infty
290
291 // Lower bound: log(2^-54) = -0x1.2b708872320e2p5
292 // > round(log(2^-54), D, RN) = -0x1.2b708872320e2p5
293
294 // x < log(2^-54) or x >= 0x1.6232bdd7abcd3p+9 or |x| <= 2^-53.
295
296 if (LIBC_UNLIKELY(x_u >= 0xc042b708872320e2 ||
297 (x_u <= 0xbca0000000000000 && x_u >= 0x40862e42fefa39f0) ||
298 x_u <= 0x3ca0000000000000)) {
299 return set_exceptional(x);
300 }
301
302 // Now log(2^-54) <= x <= -2^-53 or 2^-53 <= x < log(2^1023 * (2 - 2^-52))
303
304 // Range reduction:
305 // Let x = log(2) * (hi + mid1 + mid2) + lo
306 // in which:
307 // hi is an integer
308 // mid1 * 2^6 is an integer
309 // mid2 * 2^12 is an integer
310 // then:
311 // exp(x) = 2^hi * 2^(mid1) * 2^(mid2) * exp(lo).
312 // With this formula:
313 // - multiplying by 2^hi is exact and cheap, simply by adding the exponent
314 // field.
315 // - 2^(mid1) and 2^(mid2) are stored in 2 x 64-element tables.
316 // - exp(lo) ~ 1 + lo + a0 * lo^2 + ...
317 //
318 // They can be defined by:
319 // hi + mid1 + mid2 = 2^(-12) * round(2^12 * log_2(e) * x)
320 // If we store L2E = round(log2(e), D, RN), then:
321 // log2(e) - L2E ~ 1.5 * 2^(-56)
322 // So the errors when computing in double precision is:
323 // | x * 2^12 * log_2(e) - D(x * 2^12 * L2E) | <=
324 // <= | x * 2^12 * log_2(e) - x * 2^12 * L2E | +
325 // + | x * 2^12 * L2E - D(x * 2^12 * L2E) |
326 // <= 2^12 * ( |x| * 1.5 * 2^-56 + eps(x)) for RN
327 // 2^12 * ( |x| * 1.5 * 2^-56 + 2*eps(x)) for other rounding modes.
328 // So if:
329 // hi + mid1 + mid2 = 2^(-12) * round(x * 2^12 * L2E) is computed entirely
330 // in double precision, the reduced argument:
331 // lo = x - log(2) * (hi + mid1 + mid2) is bounded by:
332 // |lo| <= 2^-13 + (|x| * 1.5 * 2^-56 + 2*eps(x))
333 // < 2^-13 + (1.5 * 2^9 * 1.5 * 2^-56 + 2*2^(9 - 52))
334 // < 2^-13 + 2^-41
335 //
336
337 // The following trick computes the round(x * L2E) more efficiently
338 // than using the rounding instructions, with the tradeoff for less accuracy,
339 // and hence a slightly larger range for the reduced argument `lo`.
340 //
341 // To be precise, since |x| < |log(2^-1075)| < 1.5 * 2^9,
342 // |x * 2^12 * L2E| < 1.5 * 2^9 * 1.5 < 2^23,
343 // So we can fit the rounded result round(x * 2^12 * L2E) in int32_t.
344 // Thus, the goal is to be able to use an additional addition and fixed width
345 // shift to get an int32_t representing round(x * 2^12 * L2E).
346 //
347 // Assuming int32_t using 2-complement representation, since the mantissa part
348 // of a double precision is unsigned with the leading bit hidden, if we add an
349 // extra constant C = 2^e1 + 2^e2 with e1 > e2 >= 2^25 to the product, the
350 // part that are < 2^e2 in resulted mantissa of (x*2^12*L2E + C) can be
351 // considered as a proper 2-complement representations of x*2^12*L2E.
352 //
353 // One small problem with this approach is that the sum (x*2^12*L2E + C) in
354 // double precision is rounded to the least significant bit of the dorminant
355 // factor C. In order to minimize the rounding errors from this addition, we
356 // want to minimize e1. Another constraint that we want is that after
357 // shifting the mantissa so that the least significant bit of int32_t
358 // corresponds to the unit bit of (x*2^12*L2E), the sign is correct without
359 // any adjustment. So combining these 2 requirements, we can choose
360 // C = 2^33 + 2^32, so that the sign bit corresponds to 2^31 bit, and hence
361 // after right shifting the mantissa, the resulting int32_t has correct sign.
362 // With this choice of C, the number of mantissa bits we need to shift to the
363 // right is: 52 - 33 = 19.
364 //
365 // Moreover, since the integer right shifts are equivalent to rounding down,
366 // we can add an extra 0.5 so that it will become round-to-nearest, tie-to-
367 // +infinity. So in particular, we can compute:
368 // hmm = x * 2^12 * L2E + C,
369 // where C = 2^33 + 2^32 + 2^-1, then if
370 // k = int32_t(lower 51 bits of double(x * 2^12 * L2E + C) >> 19),
371 // the reduced argument:
372 // lo = x - log(2) * 2^-12 * k is bounded by:
373 // |lo| <= 2^-13 + 2^-41 + 2^-12*2^-19
374 // = 2^-13 + 2^-31 + 2^-41.
375 //
376 // Finally, notice that k only uses the mantissa of x * 2^12 * L2E, so the
377 // exponent 2^12 is not needed. So we can simply define
378 // C = 2^(33 - 12) + 2^(32 - 12) + 2^(-13 - 12), and
379 // k = int32_t(lower 51 bits of double(x * L2E + C) >> 19).
380
381 // Rounding errors <= 2^-31 + 2^-41.
382 double tmp = fputil::multiply_add(x, y: LOG2_E, z: 0x1.8000'0000'4p21);
383 int k = static_cast<int>(cpp::bit_cast<uint64_t>(from: tmp) >> 19);
384 double kd = static_cast<double>(k);
385
386 uint32_t idx1 = (k >> 6) & 0x3f;
387 uint32_t idx2 = k & 0x3f;
388 int hi = k >> 12;
389
390 DoubleDouble exp_mid1{.lo: EXP2_MID1[idx1].mid, .hi: EXP2_MID1[idx1].hi};
391 DoubleDouble exp_mid2{.lo: EXP2_MID2[idx2].mid, .hi: EXP2_MID2[idx2].hi};
392
393 DoubleDouble exp_mid = fputil::quick_mult(a: exp_mid1, b: exp_mid2);
394
395 // -2^(-hi)
396 double one_scaled =
397 FPBits::create_value(sign: Sign::NEG, biased_exp: FPBits::EXP_BIAS - hi, mantissa: 0).get_val();
398
399 // 2^(mid1 + mid2) - 2^(-hi)
400 DoubleDouble hi_part = x_is_neg ? fputil::exact_add(a: one_scaled, b: exp_mid.hi)
401 : fputil::exact_add(a: exp_mid.hi, b: one_scaled);
402
403 hi_part.lo += exp_mid.lo;
404
405 // |x - (hi + mid1 + mid2) * log(2) - dx| < 2^11 * eps(M_LOG_2_EXP2_M12.lo)
406 // = 2^11 * 2^-13 * 2^-52
407 // = 2^-54.
408 // |dx| < 2^-13 + 2^-30.
409 double lo_h = fputil::multiply_add(x: kd, y: MLOG_2_EXP2_M12_HI, z: x); // exact
410 double dx = fputil::multiply_add(x: kd, y: MLOG_2_EXP2_M12_MID, z: lo_h);
411
412 // We use the degree-4 Taylor polynomial to approximate exp(lo):
413 // exp(lo) ~ 1 + lo + lo^2 / 2 + lo^3 / 6 + lo^4 / 24 = 1 + lo * P(lo)
414 // So that the errors are bounded by:
415 // |P(lo) - expm1(lo)/lo| < |lo|^4 / 64 < 2^(-13 * 4) / 64 = 2^-58
416 // Let P_ be an evaluation of P where all intermediate computations are in
417 // double precision. Using either Horner's or Estrin's schemes, the evaluated
418 // errors can be bounded by:
419 // |P_(dx) - P(dx)| < 2^-51
420 // => |dx * P_(dx) - expm1(lo) | < 1.5 * 2^-64
421 // => 2^(mid1 + mid2) * |dx * P_(dx) - expm1(lo)| < 1.5 * 2^-63.
422 // Since we approximate
423 // 2^(mid1 + mid2) ~ exp_mid.hi + exp_mid.lo,
424 // We use the expression:
425 // (exp_mid.hi + exp_mid.lo) * (1 + dx * P_(dx)) ~
426 // ~ exp_mid.hi + (exp_mid.hi * dx * P_(dx) + exp_mid.lo)
427 // with errors bounded by 1.5 * 2^-63.
428
429 // Finally, we have the following approximation formula:
430 // expm1(x) = 2^hi * 2^(mid1 + mid2) * exp(lo) - 1
431 // = 2^hi * ( 2^(mid1 + mid2) * exp(lo) - 2^(-hi) )
432 // ~ 2^hi * ( (exp_mid.hi - 2^-hi) +
433 // + (exp_mid.hi * dx * P_(dx) + exp_mid.lo))
434
435 double mid_lo = dx * exp_mid.hi;
436
437 // Approximate expm1(dx)/dx ~ 1 + dx / 2 + dx^2 / 6 + dx^3 / 24.
438 double p = poly_approx_d(dx);
439
440 double lo = fputil::multiply_add(x: p, y: mid_lo, z: hi_part.lo);
441
442 // TODO: The following line leaks encoding abstraction. Use FPBits methods
443 // instead.
444 uint64_t err = x_is_neg ? (static_cast<uint64_t>(-hi) << 52) : 0;
445
446 double err_d = cpp::bit_cast<double>(from: ERR_D + err);
447
448 double upper = hi_part.hi + (lo + err_d);
449 double lower = hi_part.hi + (lo - err_d);
450
451#ifdef DEBUGDEBUG
452 std::cout << "=== FAST PASS ===\n"
453 << " x: " << std::hexfloat << x << std::defaultfloat << "\n"
454 << " k: " << k << "\n"
455 << " idx1: " << idx1 << "\n"
456 << " idx2: " << idx2 << "\n"
457 << " hi: " << hi << "\n"
458 << " dx: " << std::hexfloat << dx << std::defaultfloat << "\n"
459 << "exp_mid: " << exp_mid << "hi_part: " << hi_part
460 << " mid_lo: " << std::hexfloat << mid_lo << std::defaultfloat
461 << "\n"
462 << " p: " << std::hexfloat << p << std::defaultfloat << "\n"
463 << " lo: " << std::hexfloat << lo << std::defaultfloat << "\n"
464 << " upper: " << std::hexfloat << upper << std::defaultfloat
465 << "\n"
466 << " lower: " << std::hexfloat << lower << std::defaultfloat
467 << "\n"
468 << std::endl;
469#endif
470
471 if (LIBC_LIKELY(upper == lower)) {
472 // to multiply by 2^hi, a fast way is to simply add hi to the exponent
473 // field.
474 int64_t exp_hi = static_cast<int64_t>(hi) << FPBits::FRACTION_LEN;
475 double r = cpp::bit_cast<double>(from: exp_hi + cpp::bit_cast<int64_t>(from: upper));
476 return r;
477 }
478
479 // Use double-double
480 DoubleDouble r_dd = exp_double_double(x, kd, exp_mid, hi_part);
481
482 double err_dd = cpp::bit_cast<double>(from: ERR_DD + err);
483
484 double upper_dd = r_dd.hi + (r_dd.lo + err_dd);
485 double lower_dd = r_dd.hi + (r_dd.lo - err_dd);
486
487 if (LIBC_LIKELY(upper_dd == lower_dd)) {
488 int64_t exp_hi = static_cast<int64_t>(hi) << FPBits::FRACTION_LEN;
489 double r = cpp::bit_cast<double>(from: exp_hi + cpp::bit_cast<int64_t>(from: upper_dd));
490 return r;
491 }
492
493 // Use 128-bit precision
494 Float128 r_f128 = expm1_f128(x, kd, idx1, idx2);
495
496 return static_cast<double>(r_f128);
497}
498
499} // namespace LIBC_NAMESPACE
500

source code of libc/src/math/generic/expm1.cpp