1//===-- Double-precision asin function ------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "src/math/asin.h"
10#include "asin_utils.h"
11#include "src/__support/FPUtil/FEnvImpl.h"
12#include "src/__support/FPUtil/FPBits.h"
13#include "src/__support/FPUtil/PolyEval.h"
14#include "src/__support/FPUtil/double_double.h"
15#include "src/__support/FPUtil/dyadic_float.h"
16#include "src/__support/FPUtil/multiply_add.h"
17#include "src/__support/FPUtil/sqrt.h"
18#include "src/__support/macros/config.h"
19#include "src/__support/macros/optimization.h" // LIBC_UNLIKELY
20#include "src/__support/macros/properties/cpu_features.h" // LIBC_TARGET_CPU_HAS_FMA
21
22namespace LIBC_NAMESPACE_DECL {
23
24using DoubleDouble = fputil::DoubleDouble;
25using Float128 = fputil::DyadicFloat<128>;
26
27LLVM_LIBC_FUNCTION(double, asin, (double x)) {
28 using FPBits = fputil::FPBits<double>;
29
30 FPBits xbits(x);
31 int x_exp = xbits.get_biased_exponent();
32
33 // |x| < 0.5.
34 if (x_exp < FPBits::EXP_BIAS - 1) {
35 // |x| < 2^-26.
36 if (LIBC_UNLIKELY(x_exp < FPBits::EXP_BIAS - 26)) {
37 // When |x| < 2^-26, the relative error of the approximation asin(x) ~ x
38 // is:
39 // |asin(x) - x| / |asin(x)| < |x^3| / (6|x|)
40 // = x^2 / 6
41 // < 2^-54
42 // < epsilon(1)/2.
43 // So the correctly rounded values of asin(x) are:
44 // = x + sign(x)*eps(x) if rounding mode = FE_TOWARDZERO,
45 // or (rounding mode = FE_UPWARD and x is
46 // negative),
47 // = x otherwise.
48 // To simplify the rounding decision and make it more efficient, we use
49 // fma(x, 2^-54, x) instead.
50 // Note: to use the formula x + 2^-54*x to decide the correct rounding, we
51 // do need fma(x, 2^-54, x) to prevent underflow caused by 2^-54*x when
52 // |x| < 2^-1022. For targets without FMA instructions, when x is close to
53 // denormal range, we normalize x,
54#if defined(LIBC_MATH_HAS_SKIP_ACCURATE_PASS)
55 return x;
56#elif defined(LIBC_TARGET_CPU_HAS_FMA_DOUBLE)
57 return fputil::multiply_add(x, 0x1.0p-54, x);
58#else
59 if (xbits.abs().uintval() == 0)
60 return x;
61 // Get sign(x) * min_normal.
62 FPBits eps_bits = FPBits::min_normal();
63 eps_bits.set_sign(xbits.sign());
64 double eps = eps_bits.get_val();
65 double normalize_const = (x_exp == 0) ? eps : 0.0;
66 double scaled_normal =
67 fputil::multiply_add(x + normalize_const, 0x1.0p54, eps);
68 return fputil::multiply_add(scaled_normal, 0x1.0p-54, -normalize_const);
69#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS
70 }
71
72#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS
73 return x * asin_eval(x * x);
74#else
75 unsigned idx;
76 DoubleDouble x_sq = fputil::exact_mult(x, x);
77 double err = xbits.abs().get_val() * 0x1.0p-51;
78 // Polynomial approximation:
79 // p ~ asin(x)/x
80
81 DoubleDouble p = asin_eval(x_sq, idx, err);
82 // asin(x) ~ x * (ASIN_COEFFS[idx][0] + p)
83 DoubleDouble r0 = fputil::exact_mult(x, p.hi);
84 double r_lo = fputil::multiply_add(x, p.lo, r0.lo);
85
86 // Ziv's accuracy test.
87
88 double r_upper = r0.hi + (r_lo + err);
89 double r_lower = r0.hi + (r_lo - err);
90
91 if (LIBC_LIKELY(r_upper == r_lower))
92 return r_upper;
93
94 // Ziv's accuracy test failed, perform 128-bit calculation.
95
96 // Recalculate mod 1/64.
97 idx = static_cast<unsigned>(fputil::nearest_integer(x_sq.hi * 0x1.0p6));
98
99 // Get x^2 - idx/64 exactly. When FMA is available, double-double
100 // multiplication will be correct for all rounding modes. Otherwise we use
101 // Float128 directly.
102 Float128 x_f128(x);
103
104#ifdef LIBC_TARGET_CPU_HAS_FMA_DOUBLE
105 // u = x^2 - idx/64
106 Float128 u_hi(
107 fputil::multiply_add(static_cast<double>(idx), -0x1.0p-6, x_sq.hi));
108 Float128 u = fputil::quick_add(u_hi, Float128(x_sq.lo));
109#else
110 Float128 x_sq_f128 = fputil::quick_mul(x_f128, x_f128);
111 Float128 u = fputil::quick_add(
112 x_sq_f128, Float128(static_cast<double>(idx) * (-0x1.0p-6)));
113#endif // LIBC_TARGET_CPU_HAS_FMA_DOUBLE
114
115 Float128 p_f128 = asin_eval(u, idx);
116 Float128 r = fputil::quick_mul(x_f128, p_f128);
117
118 return static_cast<double>(r);
119#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS
120 }
121 // |x| >= 0.5
122
123 double x_abs = xbits.abs().get_val();
124
125 // Maintaining the sign:
126 constexpr double SIGN[2] = {1.0, -1.0};
127 double x_sign = SIGN[xbits.is_neg()];
128
129 // |x| >= 1
130 if (LIBC_UNLIKELY(x_exp >= FPBits::EXP_BIAS)) {
131 // x = +-1, asin(x) = +- pi/2
132 if (x_abs == 1.0) {
133 // return +- pi/2
134 return fputil::multiply_add(x_sign, PI_OVER_TWO.hi,
135 x_sign * PI_OVER_TWO.lo);
136 }
137 // |x| > 1, return NaN.
138 if (xbits.is_quiet_nan())
139 return x;
140
141 // Set domain error for non-NaN input.
142 if (!xbits.is_nan())
143 fputil::set_errno_if_required(EDOM);
144
145 fputil::raise_except_if_required(FE_INVALID);
146 return FPBits::quiet_nan().get_val();
147 }
148
149 // When |x| >= 0.5, we perform range reduction as follow:
150 //
151 // Assume further that 0.5 <= x < 1, and let:
152 // y = asin(x)
153 // We will use the double angle formula:
154 // cos(2y) = 1 - 2 sin^2(y)
155 // and the complement angle identity:
156 // x = sin(y) = cos(pi/2 - y)
157 // = 1 - 2 sin^2 (pi/4 - y/2)
158 // So:
159 // sin(pi/4 - y/2) = sqrt( (1 - x)/2 )
160 // And hence:
161 // pi/4 - y/2 = asin( sqrt( (1 - x)/2 ) )
162 // Equivalently:
163 // asin(x) = y = pi/2 - 2 * asin( sqrt( (1 - x)/2 ) )
164 // Let u = (1 - x)/2, then:
165 // asin(x) = pi/2 - 2 * asin( sqrt(u) )
166 // Moreover, since 0.5 <= x < 1:
167 // 0 < u <= 1/4, and 0 < sqrt(u) <= 0.5,
168 // And hence we can reuse the same polynomial approximation of asin(x) when
169 // |x| <= 0.5:
170 // asin(x) ~ pi/2 - 2 * sqrt(u) * P(u),
171
172 // u = (1 - |x|)/2
173 double u = fputil::multiply_add(x_abs, -0.5, 0.5);
174 // v_hi + v_lo ~ sqrt(u).
175 // Let:
176 // h = u - v_hi^2 = (sqrt(u) - v_hi) * (sqrt(u) + v_hi)
177 // Then:
178 // sqrt(u) = v_hi + h / (sqrt(u) + v_hi)
179 // ~ v_hi + h / (2 * v_hi)
180 // So we can use:
181 // v_lo = h / (2 * v_hi).
182 // Then,
183 // asin(x) ~ pi/2 - 2*(v_hi + v_lo) * P(u)
184 double v_hi = fputil::sqrt<double>(u);
185
186#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS
187 double p = asin_eval(u);
188 double r = x_sign * fputil::multiply_add(-2.0 * v_hi, p, PI_OVER_TWO.hi);
189 return r;
190#else
191
192#ifdef LIBC_TARGET_CPU_HAS_FMA_DOUBLE
193 double h = fputil::multiply_add(v_hi, -v_hi, u);
194#else
195 DoubleDouble v_hi_sq = fputil::exact_mult(v_hi, v_hi);
196 double h = (u - v_hi_sq.hi) - v_hi_sq.lo;
197#endif // LIBC_TARGET_CPU_HAS_FMA_DOUBLE
198
199 // Scale v_lo and v_hi by 2 from the formula:
200 // vh = v_hi * 2
201 // vl = 2*v_lo = h / v_hi.
202 double vh = v_hi * 2.0;
203 double vl = h / v_hi;
204
205 // Polynomial approximation:
206 // p ~ asin(sqrt(u))/sqrt(u)
207 unsigned idx;
208 double err = vh * 0x1.0p-51;
209
210 DoubleDouble p = asin_eval(DoubleDouble{0.0, u}, idx, err);
211
212 // Perform computations in double-double arithmetic:
213 // asin(x) = pi/2 - (v_hi + v_lo) * (ASIN_COEFFS[idx][0] + p)
214 DoubleDouble r0 = fputil::quick_mult(DoubleDouble{vl, vh}, p);
215 DoubleDouble r = fputil::exact_add(PI_OVER_TWO.hi, -r0.hi);
216
217 double r_lo = PI_OVER_TWO.lo - r0.lo + r.lo;
218
219 // Ziv's accuracy test.
220
221#ifdef LIBC_TARGET_CPU_HAS_FMA_DOUBLE
222 double r_upper = fputil::multiply_add(
223 r.hi, x_sign, fputil::multiply_add(r_lo, x_sign, err));
224 double r_lower = fputil::multiply_add(
225 r.hi, x_sign, fputil::multiply_add(r_lo, x_sign, -err));
226#else
227 r_lo *= x_sign;
228 r.hi *= x_sign;
229 double r_upper = r.hi + (r_lo + err);
230 double r_lower = r.hi + (r_lo - err);
231#endif // LIBC_TARGET_CPU_HAS_FMA_DOUBLE
232
233 if (LIBC_LIKELY(r_upper == r_lower))
234 return r_upper;
235
236 // Ziv's accuracy test failed, we redo the computations in Float128.
237 // Recalculate mod 1/64.
238 idx = static_cast<unsigned>(fputil::nearest_integer(u * 0x1.0p6));
239
240 // After the first step of Newton-Raphson approximating v = sqrt(u), we have
241 // that:
242 // sqrt(u) = v_hi + h / (sqrt(u) + v_hi)
243 // v_lo = h / (2 * v_hi)
244 // With error:
245 // sqrt(u) - (v_hi + v_lo) = h * ( 1/(sqrt(u) + v_hi) - 1/(2*v_hi) )
246 // = -h^2 / (2*v * (sqrt(u) + v)^2).
247 // Since:
248 // (sqrt(u) + v_hi)^2 ~ (2sqrt(u))^2 = 4u,
249 // we can add another correction term to (v_hi + v_lo) that is:
250 // v_ll = -h^2 / (2*v_hi * 4u)
251 // = -v_lo * (h / 4u)
252 // = -vl * (h / 8u),
253 // making the errors:
254 // sqrt(u) - (v_hi + v_lo + v_ll) = O(h^3)
255 // well beyond 128-bit precision needed.
256
257 // Get the rounding error of vl = 2 * v_lo ~ h / vh
258 // Get full product of vh * vl
259#ifdef LIBC_TARGET_CPU_HAS_FMA_DOUBLE
260 double vl_lo = fputil::multiply_add(-v_hi, vl, h) / v_hi;
261#else
262 DoubleDouble vh_vl = fputil::exact_mult(v_hi, vl);
263 double vl_lo = ((h - vh_vl.hi) - vh_vl.lo) / v_hi;
264#endif // LIBC_TARGET_CPU_HAS_FMA_DOUBLE
265 // vll = 2*v_ll = -vl * (h / (4u)).
266 double t = h * (-0.25) / u;
267 double vll = fputil::multiply_add(vl, t, vl_lo);
268 // m_v = -(v_hi + v_lo + v_ll).
269 Float128 m_v = fputil::quick_add(
270 Float128(vh), fputil::quick_add(Float128(vl), Float128(vll)));
271 m_v.sign = Sign::NEG;
272
273 // Perform computations in Float128:
274 // asin(x) = pi/2 - (v_hi + v_lo + vll) * P(u).
275 Float128 y_f128(fputil::multiply_add(static_cast<double>(idx), -0x1.0p-6, u));
276
277 Float128 p_f128 = asin_eval(y_f128, idx);
278 Float128 r0_f128 = fputil::quick_mul(m_v, p_f128);
279 Float128 r_f128 = fputil::quick_add(PI_OVER_TWO_F128, r0_f128);
280
281 if (xbits.is_neg())
282 r_f128.sign = Sign::NEG;
283
284 return static_cast<double>(r_f128);
285#endif // LIBC_MATH_HAS_SKIP_ACCURATE_PASS
286}
287
288} // namespace LIBC_NAMESPACE_DECL
289

source code of libc/src/math/generic/asin.cpp