1//===-- Double-precision tan function -------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "src/math/tan.h"
10#include "hdr/errno_macros.h"
11#include "src/__support/FPUtil/FEnvImpl.h"
12#include "src/__support/FPUtil/FPBits.h"
13#include "src/__support/FPUtil/PolyEval.h"
14#include "src/__support/FPUtil/double_double.h"
15#include "src/__support/FPUtil/dyadic_float.h"
16#include "src/__support/FPUtil/except_value_utils.h"
17#include "src/__support/FPUtil/multiply_add.h"
18#include "src/__support/FPUtil/rounding_mode.h"
19#include "src/__support/common.h"
20#include "src/__support/macros/config.h"
21#include "src/__support/macros/optimization.h" // LIBC_UNLIKELY
22#include "src/__support/macros/properties/cpu_features.h" // LIBC_TARGET_CPU_HAS_FMA
23#include "src/math/generic/range_reduction_double_common.h"
24
25#ifdef LIBC_TARGET_CPU_HAS_FMA_DOUBLE
26#include "range_reduction_double_fma.h"
27#else
28#include "range_reduction_double_nofma.h"
29#endif // LIBC_TARGET_CPU_HAS_FMA_DOUBLE
30
31namespace LIBC_NAMESPACE_DECL {
32
33using DoubleDouble = fputil::DoubleDouble;
34using Float128 = typename fputil::DyadicFloat<128>;
35
36namespace {
37
38LIBC_INLINE double tan_eval(const DoubleDouble &u, DoubleDouble &result) {
39 // Evaluate tan(y) = tan(x - k * (pi/128))
40 // We use the degree-9 Taylor approximation:
41 // tan(y) ~ P(y) = y + y^3/3 + 2*y^5/15 + 17*y^7/315 + 62*y^9/2835
42 // Then the error is bounded by:
43 // |tan(y) - P(y)| < 2^-6 * |y|^11 < 2^-6 * 2^-66 = 2^-72.
44 // For y ~ u_hi + u_lo, fully expanding the polynomial and drop any terms
45 // < ulp(u_hi^3) gives us:
46 // P(y) = y + y^3/3 + 2*y^5/15 + 17*y^7/315 + 62*y^9/2835 = ...
47 // ~ u_hi + u_hi^3 * (1/3 + u_hi^2 * (2/15 + u_hi^2 * (17/315 +
48 // + u_hi^2 * 62/2835))) +
49 // + u_lo (1 + u_hi^2 * (1 + u_hi^2 * 2/3))
50 double u_hi_sq = u.hi * u.hi; // Error < ulp(u_hi^2) < 2^(-6 - 52) = 2^-58.
51 // p1 ~ 17/315 + u_hi^2 62 / 2835.
52 double p1 =
53 fputil::multiply_add(u_hi_sq, 0x1.664f4882c10fap-6, 0x1.ba1ba1ba1ba1cp-5);
54 // p2 ~ 1/3 + u_hi^2 2 / 15.
55 double p2 =
56 fputil::multiply_add(u_hi_sq, 0x1.1111111111111p-3, 0x1.5555555555555p-2);
57 // q1 ~ 1 + u_hi^2 * 2/3.
58 double q1 = fputil::multiply_add(u_hi_sq, 0x1.5555555555555p-1, 1.0);
59 double u_hi_3 = u_hi_sq * u.hi;
60 double u_hi_4 = u_hi_sq * u_hi_sq;
61 // p3 ~ 1/3 + u_hi^2 * (2/15 + u_hi^2 * (17/315 + u_hi^2 * 62/2835))
62 double p3 = fputil::multiply_add(u_hi_4, p1, p2);
63 // q2 ~ 1 + u_hi^2 * (1 + u_hi^2 * 2/3)
64 double q2 = fputil::multiply_add(u_hi_sq, q1, 1.0);
65 double tan_lo = fputil::multiply_add(u_hi_3, p3, u.lo * q2);
66 // Overall, |tan(y) - (u_hi + tan_lo)| < ulp(u_hi^3) <= 2^-71.
67 // And the relative errors is:
68 // |(tan(y) - (u_hi + tan_lo)) / tan(y) | <= 2*ulp(u_hi^2) < 2^-64
69 result = fputil::exact_add(u.hi, tan_lo);
70 return fputil::multiply_add(fputil::FPBits<double>(u_hi_3).abs().get_val(),
71 0x1.0p-51, 0x1.0p-102);
72}
73
74#ifndef LIBC_MATH_HAS_SKIP_ACCURATE_PASS
75// Accurate evaluation of tan for small u.
76[[maybe_unused]] Float128 tan_eval(const Float128 &u) {
77 Float128 u_sq = fputil::quick_mul(u, u);
78
79 // tan(x) ~ x + x^3/3 + x^5 * 2/15 + x^7 * 17/315 + x^9 * 62/2835 +
80 // + x^11 * 1382/155925 + x^13 * 21844/6081075 +
81 // + x^15 * 929569/638512875 + x^17 * 6404582/10854718875
82 // Relative errors < 2^-127 for |u| < pi/256.
83 constexpr Float128 TAN_COEFFS[] = {
84 {Sign::POS, -127, 0x80000000'00000000'00000000'00000000_u128}, // 1
85 {Sign::POS, -129, 0xaaaaaaaa'aaaaaaaa'aaaaaaaa'aaaaaaab_u128}, // 1
86 {Sign::POS, -130, 0x88888888'88888888'88888888'88888889_u128}, // 2/15
87 {Sign::POS, -132, 0xdd0dd0dd'0dd0dd0d'd0dd0dd0'dd0dd0dd_u128}, // 17/315
88 {Sign::POS, -133, 0xb327a441'6087cf99'6b5dd24e'ec0b327a_u128}, // 62/2835
89 {Sign::POS, -134,
90 0x91371aaf'3611e47a'da8e1cba'7d900eca_u128}, // 1382/155925
91 {Sign::POS, -136,
92 0xeb69e870'abeefdaf'e606d2e4'd1e65fbc_u128}, // 21844/6081075
93 {Sign::POS, -137,
94 0xbed1b229'5baf15b5'0ec9af45'a2619971_u128}, // 929569/638512875
95 {Sign::POS, -138,
96 0x9aac1240'1b3a2291'1b2ac7e3'e4627d0a_u128}, // 6404582/10854718875
97 };
98
99 return fputil::quick_mul(
100 u, fputil::polyeval(u_sq, TAN_COEFFS[0], TAN_COEFFS[1], TAN_COEFFS[2],
101 TAN_COEFFS[3], TAN_COEFFS[4], TAN_COEFFS[5],
102 TAN_COEFFS[6], TAN_COEFFS[7], TAN_COEFFS[8]));
103}
104
105// Calculation a / b = a * (1/b) for Float128.
106// Using the initial approximation of q ~ (1/b), then apply 2 Newton-Raphson
107// iterations, before multiplying by a.
108[[maybe_unused]] Float128 newton_raphson_div(const Float128 &a, Float128 b,
109 double q) {
110 Float128 q0(q);
111 constexpr Float128 TWO(2.0);
112 b.sign = (b.sign == Sign::POS) ? Sign::NEG : Sign::POS;
113 Float128 q1 =
114 fputil::quick_mul(q0, fputil::quick_add(TWO, fputil::quick_mul(b, q0)));
115 Float128 q2 =
116 fputil::quick_mul(q1, fputil::quick_add(TWO, fputil::quick_mul(b, q1)));
117 return fputil::quick_mul(a, q2);
118}
119#endif // !LIBC_MATH_HAS_SKIP_ACCURATE_PASS
120
121} // anonymous namespace
122
123LLVM_LIBC_FUNCTION(double, tan, (double x)) {
124 using FPBits = typename fputil::FPBits<double>;
125 FPBits xbits(x);
126
127 uint16_t x_e = xbits.get_biased_exponent();
128
129 DoubleDouble y;
130 unsigned k;
131 LargeRangeReduction range_reduction_large{};
132
133 // |x| < 2^16
134 if (LIBC_LIKELY(x_e < FPBits::EXP_BIAS + FAST_PASS_EXPONENT)) {
135 // |x| < 2^-7
136 if (LIBC_UNLIKELY(x_e < FPBits::EXP_BIAS - 7)) {
137 // |x| < 2^-27, |tan(x) - x| < ulp(x)/2.
138 if (LIBC_UNLIKELY(x_e < FPBits::EXP_BIAS - 27)) {
139 // Signed zeros.
140 if (LIBC_UNLIKELY(x == 0.0))
141 return x + x; // Make sure it works with FTZ/DAZ.
142
143#ifdef LIBC_TARGET_CPU_HAS_FMA_DOUBLE
144 return fputil::multiply_add(x, 0x1.0p-54, x);
145#else
146 if (LIBC_UNLIKELY(x_e < 4)) {
147 int rounding_mode = fputil::quick_get_round();
148 if ((xbits.sign() == Sign::POS && rounding_mode == FE_UPWARD) ||
149 (xbits.sign() == Sign::NEG && rounding_mode == FE_DOWNWARD))
150 return FPBits(xbits.uintval() + 1).get_val();
151 }
152 return fputil::multiply_add(x, 0x1.0p-54, x);
153#endif // LIBC_TARGET_CPU_HAS_FMA_DOUBLE
154 }
155 // No range reduction needed.
156 k = 0;
157 y.lo = 0.0;
158 y.hi = x;
159 } else {
160 // Small range reduction.
161 k = range_reduction_small(x, y);
162 }
163 } else {
164 // Inf or NaN
165 if (LIBC_UNLIKELY(x_e > 2 * FPBits::EXP_BIAS)) {
166 if (xbits.is_signaling_nan()) {
167 fputil::raise_except_if_required(FE_INVALID);
168 return FPBits::quiet_nan().get_val();
169 }
170 // tan(+-Inf) = NaN
171 if (xbits.get_mantissa() == 0) {
172 fputil::set_errno_if_required(EDOM);
173 fputil::raise_except_if_required(FE_INVALID);
174 }
175 return x + FPBits::quiet_nan().get_val();
176 }
177
178 // Large range reduction.
179 k = range_reduction_large.fast(x, y);
180 }
181
182 DoubleDouble tan_y;
183 [[maybe_unused]] double err = tan_eval(y, tan_y);
184
185 // Look up sin(k * pi/128) and cos(k * pi/128)
186#ifdef LIBC_MATH_HAS_SMALL_TABLES
187 // Memory saving versions. Use 65-entry table:
188 auto get_idx_dd = [](unsigned kk) -> DoubleDouble {
189 unsigned idx = (kk & 64) ? 64 - (kk & 63) : (kk & 63);
190 DoubleDouble ans = SIN_K_PI_OVER_128[idx];
191 if (kk & 128) {
192 ans.hi = -ans.hi;
193 ans.lo = -ans.lo;
194 }
195 return ans;
196 };
197 DoubleDouble msin_k = get_idx_dd(k + 128);
198 DoubleDouble cos_k = get_idx_dd(k + 64);
199#else
200 // Fast look up version, but needs 256-entry table.
201 // cos(k * pi/128) = sin(k * pi/128 + pi/2) = sin((k + 64) * pi/128).
202 DoubleDouble msin_k = SIN_K_PI_OVER_128[(k + 128) & 255];
203 DoubleDouble cos_k = SIN_K_PI_OVER_128[(k + 64) & 255];
204#endif // LIBC_MATH_HAS_SMALL_TABLES
205
206 // After range reduction, k = round(x * 128 / pi) and y = x - k * (pi / 128).
207 // So k is an integer and -pi / 256 <= y <= pi / 256.
208 // Then tan(x) = sin(x) / cos(x)
209 // = sin((k * pi/128 + y) / cos((k * pi/128 + y)
210 // = (cos(y) * sin(k*pi/128) + sin(y) * cos(k*pi/128)) /
211 // / (cos(y) * cos(k*pi/128) - sin(y) * sin(k*pi/128))
212 // = (sin(k*pi/128) + tan(y) * cos(k*pi/128)) /
213 // / (cos(k*pi/128) - tan(y) * sin(k*pi/128))
214 DoubleDouble cos_k_tan_y = fputil::quick_mult(tan_y, cos_k);
215 DoubleDouble msin_k_tan_y = fputil::quick_mult(tan_y, msin_k);
216
217 // num_dd = sin(k*pi/128) + tan(y) * cos(k*pi/128)
218 DoubleDouble num_dd = fputil::exact_add<false>(cos_k_tan_y.hi, -msin_k.hi);
219 // den_dd = cos(k*pi/128) - tan(y) * sin(k*pi/128)
220 DoubleDouble den_dd = fputil::exact_add<false>(msin_k_tan_y.hi, cos_k.hi);
221 num_dd.lo += cos_k_tan_y.lo - msin_k.lo;
222 den_dd.lo += msin_k_tan_y.lo + cos_k.lo;
223
224#ifdef LIBC_MATH_HAS_SKIP_ACCURATE_PASS
225 double tan_x = (num_dd.hi + num_dd.lo) / (den_dd.hi + den_dd.lo);
226 return tan_x;
227#else
228 // Accurate test and pass for correctly rounded implementation.
229
230 // Accurate double-double division
231 DoubleDouble tan_x = fputil::div(num_dd, den_dd);
232
233 // Simple error bound: |1 / den_dd| < 2^(1 + floor(-log2(den_dd)))).
234 uint64_t den_inv = (static_cast<uint64_t>(FPBits::EXP_BIAS + 1)
235 << (FPBits::FRACTION_LEN + 1)) -
236 (FPBits(den_dd.hi).uintval() & FPBits::EXP_MASK);
237
238 // For tan_x = (num_dd + err) / (den_dd + err), the error is bounded by:
239 // | tan_x - num_dd / den_dd | <= err * ( 1 + | tan_x * den_dd | ).
240 double tan_err =
241 err * fputil::multiply_add(FPBits(den_inv).get_val(),
242 FPBits(tan_x.hi).abs().get_val(), 1.0);
243
244 double err_higher = tan_x.lo + tan_err;
245 double err_lower = tan_x.lo - tan_err;
246
247 double tan_upper = tan_x.hi + err_higher;
248 double tan_lower = tan_x.hi + err_lower;
249
250 // Ziv's rounding test.
251 if (LIBC_LIKELY(tan_upper == tan_lower))
252 return tan_upper;
253
254 Float128 u_f128;
255 if (LIBC_LIKELY(x_e < FPBits::EXP_BIAS + FAST_PASS_EXPONENT))
256 u_f128 = range_reduction_small_f128(x);
257 else
258 u_f128 = range_reduction_large.accurate();
259
260 Float128 tan_u = tan_eval(u_f128);
261
262 auto get_sin_k = [](unsigned kk) -> Float128 {
263 unsigned idx = (kk & 64) ? 64 - (kk & 63) : (kk & 63);
264 Float128 ans = SIN_K_PI_OVER_128_F128[idx];
265 if (kk & 128)
266 ans.sign = Sign::NEG;
267 return ans;
268 };
269
270 // cos(k * pi/128) = sin(k * pi/128 + pi/2) = sin((k + 64) * pi/128).
271 Float128 sin_k_f128 = get_sin_k(k);
272 Float128 cos_k_f128 = get_sin_k(k + 64);
273 Float128 msin_k_f128 = get_sin_k(k + 128);
274
275 // num_f128 = sin(k*pi/128) + tan(y) * cos(k*pi/128)
276 Float128 num_f128 =
277 fputil::quick_add(sin_k_f128, fputil::quick_mul(cos_k_f128, tan_u));
278 // den_f128 = cos(k*pi/128) - tan(y) * sin(k*pi/128)
279 Float128 den_f128 =
280 fputil::quick_add(cos_k_f128, fputil::quick_mul(msin_k_f128, tan_u));
281
282 // tan(x) = (sin(k*pi/128) + tan(y) * cos(k*pi/128)) /
283 // / (cos(k*pi/128) - tan(y) * sin(k*pi/128))
284 // TODO: The initial seed 1.0/den_dd.hi for Newton-Raphson reciprocal can be
285 // reused from DoubleDouble fputil::div in the fast pass.
286 Float128 result = newton_raphson_div(num_f128, den_f128, 1.0 / den_dd.hi);
287
288 // TODO: Add assertion if Ziv's accuracy tests fail in debug mode.
289 // https://github.com/llvm/llvm-project/issues/96452.
290 return static_cast<double>(result);
291
292#endif // !LIBC_MATH_HAS_SKIP_ACCURATE_PASS
293}
294
295} // namespace LIBC_NAMESPACE_DECL
296

source code of libc/src/math/generic/tan.cpp