1/* Double-precision vector (Advanced SIMD) log function.
2
3 Copyright (C) 2023-2024 Free Software Foundation, Inc.
4 This file is part of the GNU C Library.
5
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; if not, see
18 <https://www.gnu.org/licenses/>. */
19
20#include "v_math.h"
21
22static const struct data
23{
24 uint64x2_t min_norm;
25 uint32x4_t special_bound;
26 float64x2_t poly[5];
27 float64x2_t ln2;
28 uint64x2_t sign_exp_mask;
29} data = {
30 /* Worst-case error: 1.17 + 0.5 ulp.
31 Rel error: 0x1.6272e588p-56 in [ -0x1.fc1p-9 0x1.009p-8 ]. */
32 .poly = { V2 (-0x1.ffffffffffff7p-2), V2 (0x1.55555555170d4p-2),
33 V2 (-0x1.0000000399c27p-2), V2 (0x1.999b2e90e94cap-3),
34 V2 (-0x1.554e550bd501ep-3) },
35 .ln2 = V2 (0x1.62e42fefa39efp-1),
36 .min_norm = V2 (0x0010000000000000),
37 .special_bound = V4 (0x7fe00000), /* asuint64(inf) - min_norm. */
38 .sign_exp_mask = V2 (0xfff0000000000000)
39};
40
41#define A(i) d->poly[i]
42#define N (1 << V_LOG_TABLE_BITS)
43#define IndexMask (N - 1)
44#define Off v_u64 (0x3fe6900900000000)
45
46struct entry
47{
48 float64x2_t invc;
49 float64x2_t logc;
50};
51
52static inline struct entry
53lookup (uint64x2_t i)
54{
55 /* Since N is a power of 2, n % N = n & (N - 1). */
56 struct entry e;
57 uint64_t i0 = (i[0] >> (52 - V_LOG_TABLE_BITS)) & IndexMask;
58 uint64_t i1 = (i[1] >> (52 - V_LOG_TABLE_BITS)) & IndexMask;
59 float64x2_t e0 = vld1q_f64 (&__v_log_data.table[i0].invc);
60 float64x2_t e1 = vld1q_f64 (&__v_log_data.table[i1].invc);
61#if __BYTE_ORDER == __LITTLE_ENDIAN
62 e.invc = vuzp1q_f64 (e0, e1);
63 e.logc = vuzp2q_f64 (e0, e1);
64#else
65 e.invc = vuzp1q_f64 (e1, e0);
66 e.logc = vuzp2q_f64 (e1, e0);
67#endif
68 return e;
69}
70
71static float64x2_t VPCS_ATTR NOINLINE
72special_case (float64x2_t x, float64x2_t y, float64x2_t hi, float64x2_t r2,
73 uint32x2_t cmp)
74{
75 return v_call_f64 (log, x, vfmaq_f64 (hi, y, r2), vmovl_u32 (cmp));
76}
77
78float64x2_t VPCS_ATTR V_NAME_D1 (log) (float64x2_t x)
79{
80 const struct data *d = ptr_barrier (&data);
81 float64x2_t z, r, r2, p, y, kd, hi;
82 uint64x2_t ix, iz, tmp;
83 uint32x2_t cmp;
84 int64x2_t k;
85 struct entry e;
86
87 ix = vreinterpretq_u64_f64 (x);
88 cmp = vcge_u32 (vsubhn_u64 (ix, d->min_norm),
89 vget_low_u32 (d->special_bound));
90
91 /* x = 2^k z; where z is in range [Off,2*Off) and exact.
92 The range is split into N subintervals.
93 The ith subinterval contains z and c is near its center. */
94 tmp = vsubq_u64 (ix, Off);
95 k = vshrq_n_s64 (vreinterpretq_s64_u64 (tmp), 52); /* arithmetic shift. */
96 iz = vsubq_u64 (ix, vandq_u64 (tmp, d->sign_exp_mask));
97 z = vreinterpretq_f64_u64 (iz);
98 e = lookup (tmp);
99
100 /* log(x) = log1p(z/c-1) + log(c) + k*Ln2. */
101 r = vfmaq_f64 (v_f64 (-1.0), z, e.invc);
102 kd = vcvtq_f64_s64 (k);
103
104 /* hi = r + log(c) + k*Ln2. */
105 hi = vfmaq_f64 (vaddq_f64 (e.logc, r), kd, d->ln2);
106 /* y = r2*(A0 + r*A1 + r2*(A2 + r*A3 + r2*A4)) + hi. */
107 r2 = vmulq_f64 (r, r);
108 y = vfmaq_f64 (A (2), A (3), r);
109 p = vfmaq_f64 (A (0), A (1), r);
110 y = vfmaq_f64 (y, A (4), r2);
111 y = vfmaq_f64 (p, y, r2);
112
113 if (__glibc_unlikely (v_any_u32h (cmp)))
114 return special_case (x, y, hi, r2, cmp);
115 return vfmaq_f64 (hi, y, r2);
116}
117

source code of glibc/sysdeps/aarch64/fpu/log_advsimd.c