| 1 | /* Double-precision AdvSIMD atan2 |
| 2 | |
| 3 | Copyright (C) 2023-2024 Free Software Foundation, Inc. |
| 4 | This file is part of the GNU C Library. |
| 5 | |
| 6 | The GNU C Library is free software; you can redistribute it and/or |
| 7 | modify it under the terms of the GNU Lesser General Public |
| 8 | License as published by the Free Software Foundation; either |
| 9 | version 2.1 of the License, or (at your option) any later version. |
| 10 | |
| 11 | The GNU C Library is distributed in the hope that it will be useful, |
| 12 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 14 | Lesser General Public License for more details. |
| 15 | |
| 16 | You should have received a copy of the GNU Lesser General Public |
| 17 | License along with the GNU C Library; if not, see |
| 18 | <https://www.gnu.org/licenses/>. */ |
| 19 | |
| 20 | #include "v_math.h" |
| 21 | |
| 22 | static const struct data |
| 23 | { |
| 24 | double c1, c3, c5, c7, c9, c11, c13, c15, c17, c19; |
| 25 | float64x2_t c0, c2, c4, c6, c8, c10, c12, c14, c16, c18; |
| 26 | float64x2_t pi_over_2; |
| 27 | uint64x2_t zeroinfnan; |
| 28 | } data = { |
| 29 | /* Coefficients of polynomial P such that |
| 30 | atan(x)~x+x*P(x^2) on [2^-1022, 1.0]. */ |
| 31 | .c0 = V2 (-0x1.555555555552ap-2), |
| 32 | .c1 = 0x1.9999999995aebp-3, |
| 33 | .c2 = V2 (-0x1.24924923923f6p-3), |
| 34 | .c3 = 0x1.c71c7184288a2p-4, |
| 35 | .c4 = V2 (-0x1.745d11fb3d32bp-4), |
| 36 | .c5 = 0x1.3b136a18051b9p-4, |
| 37 | .c6 = V2 (-0x1.110e6d985f496p-4), |
| 38 | .c7 = 0x1.e1bcf7f08801dp-5, |
| 39 | .c8 = V2 (-0x1.ae644e28058c3p-5), |
| 40 | .c9 = 0x1.82eeb1fed85c6p-5, |
| 41 | .c10 = V2 (-0x1.59d7f901566cbp-5), |
| 42 | .c11 = 0x1.2c982855ab069p-5, |
| 43 | .c12 = V2 (-0x1.eb49592998177p-6), |
| 44 | .c13 = 0x1.69d8b396e3d38p-6, |
| 45 | .c14 = V2 (-0x1.ca980345c4204p-7), |
| 46 | .c15 = 0x1.dc050eafde0b3p-8, |
| 47 | .c16 = V2 (-0x1.7ea70755b8eccp-9), |
| 48 | .c17 = 0x1.ba3da3de903e8p-11, |
| 49 | .c18 = V2 (-0x1.44a4b059b6f67p-13), |
| 50 | .c19 = 0x1.c4a45029e5a91p-17, |
| 51 | .pi_over_2 = V2 (0x1.921fb54442d18p+0), |
| 52 | .zeroinfnan = V2 (2 * 0x7ff0000000000000ul - 1), |
| 53 | }; |
| 54 | |
| 55 | #define SignMask v_u64 (0x8000000000000000) |
| 56 | |
| 57 | /* Special cases i.e. 0, infinity, NaN (fall back to scalar calls). */ |
| 58 | static float64x2_t VPCS_ATTR NOINLINE |
| 59 | special_case (float64x2_t y, float64x2_t x, float64x2_t ret, |
| 60 | uint64x2_t sign_xy, uint64x2_t cmp) |
| 61 | { |
| 62 | /* Account for the sign of x and y. */ |
| 63 | ret = vreinterpretq_f64_u64 ( |
| 64 | veorq_u64 (vreinterpretq_u64_f64 (ret), sign_xy)); |
| 65 | return v_call2_f64 (atan2, y, x, ret, cmp); |
| 66 | } |
| 67 | |
| 68 | /* Returns 1 if input is the bit representation of 0, infinity or nan. */ |
| 69 | static inline uint64x2_t |
| 70 | zeroinfnan (uint64x2_t i, const struct data *d) |
| 71 | { |
| 72 | /* (2 * i - 1) >= (2 * asuint64 (INFINITY) - 1). */ |
| 73 | return vcgeq_u64 (vsubq_u64 (vaddq_u64 (i, i), v_u64 (x: 1)), d->zeroinfnan); |
| 74 | } |
| 75 | |
| 76 | /* Fast implementation of vector atan2. |
| 77 | Maximum observed error is 1.97 ulps: |
| 78 | _ZGVnN2vv_atan2 (0x1.42337dba73768p+5, 0x1.422d748cd3e29p+5) |
| 79 | got 0x1.9224810264efcp-1 want 0x1.9224810264efep-1. */ |
| 80 | float64x2_t VPCS_ATTR V_NAME_D2 (atan2) (float64x2_t y, float64x2_t x) |
| 81 | { |
| 82 | const struct data *d = ptr_barrier (&data); |
| 83 | |
| 84 | uint64x2_t ix = vreinterpretq_u64_f64 (x); |
| 85 | uint64x2_t iy = vreinterpretq_u64_f64 (y); |
| 86 | |
| 87 | uint64x2_t special_cases |
| 88 | = vorrq_u64 (zeroinfnan (ix, d), zeroinfnan (iy, d)); |
| 89 | |
| 90 | uint64x2_t sign_x = vandq_u64 (ix, SignMask); |
| 91 | uint64x2_t sign_y = vandq_u64 (iy, SignMask); |
| 92 | uint64x2_t sign_xy = veorq_u64 (sign_x, sign_y); |
| 93 | |
| 94 | float64x2_t ax = vabsq_f64 (x); |
| 95 | float64x2_t ay = vabsq_f64 (y); |
| 96 | |
| 97 | uint64x2_t pred_xlt0 = vcltzq_f64 (x); |
| 98 | uint64x2_t pred_aygtax = vcagtq_f64 (y, x); |
| 99 | |
| 100 | /* Set up z for evaluation of atan. */ |
| 101 | float64x2_t num = vbslq_f64 (pred_aygtax, vnegq_f64 (ax), ay); |
| 102 | float64x2_t den = vbslq_f64 (pred_aygtax, ay, ax); |
| 103 | float64x2_t z = vdivq_f64 (num, den); |
| 104 | |
| 105 | /* Work out the correct shift for atan2: |
| 106 | Multiplication by pi is done later. |
| 107 | -pi when x < 0 and ax < ay |
| 108 | -pi/2 when x < 0 and ax > ay |
| 109 | 0 when x >= 0 and ax < ay |
| 110 | pi/2 when x >= 0 and ax > ay. */ |
| 111 | float64x2_t shift = vreinterpretq_f64_u64 ( |
| 112 | vandq_u64 (pred_xlt0, vreinterpretq_u64_f64 (v_f64 (-2.0)))); |
| 113 | float64x2_t shift2 = vreinterpretq_f64_u64 ( |
| 114 | vandq_u64 (pred_aygtax, vreinterpretq_u64_f64 (v_f64 (1.0)))); |
| 115 | shift = vaddq_f64 (shift, shift2); |
| 116 | |
| 117 | /* Calculate the polynomial approximation. */ |
| 118 | float64x2_t z2 = vmulq_f64 (z, z); |
| 119 | float64x2_t z3 = vmulq_f64 (z2, z); |
| 120 | float64x2_t z4 = vmulq_f64 (z2, z2); |
| 121 | float64x2_t z8 = vmulq_f64 (z4, z4); |
| 122 | float64x2_t z16 = vmulq_f64 (z8, z8); |
| 123 | |
| 124 | float64x2_t c13 = vld1q_f64 (&d->c1); |
| 125 | float64x2_t c57 = vld1q_f64 (&d->c5); |
| 126 | float64x2_t c911 = vld1q_f64 (&d->c9); |
| 127 | float64x2_t c1315 = vld1q_f64 (&d->c13); |
| 128 | float64x2_t c1719 = vld1q_f64 (&d->c17); |
| 129 | |
| 130 | /* Order-7 Estrin. */ |
| 131 | float64x2_t p01 = vfmaq_laneq_f64 (d->c0, z2, c13, 0); |
| 132 | float64x2_t p23 = vfmaq_laneq_f64 (d->c2, z2, c13, 1); |
| 133 | float64x2_t p03 = vfmaq_f64 (p01, z4, p23); |
| 134 | |
| 135 | float64x2_t p45 = vfmaq_laneq_f64 (d->c4, z2, c57, 0); |
| 136 | float64x2_t p67 = vfmaq_laneq_f64 (d->c6, z2, c57, 1); |
| 137 | float64x2_t p47 = vfmaq_f64 (p45, z4, p67); |
| 138 | |
| 139 | float64x2_t p07 = vfmaq_f64 (p03, z8, p47); |
| 140 | |
| 141 | /* Order-11 Estrin. */ |
| 142 | float64x2_t p89 = vfmaq_laneq_f64 (d->c8, z2, c911, 0); |
| 143 | float64x2_t p1011 = vfmaq_laneq_f64 (d->c10, z2, c911, 1); |
| 144 | float64x2_t p811 = vfmaq_f64 (p89, z4, p1011); |
| 145 | |
| 146 | float64x2_t p1213 = vfmaq_laneq_f64 (d->c12, z2, c1315, 0); |
| 147 | float64x2_t p1415 = vfmaq_laneq_f64 (d->c14, z2, c1315, 1); |
| 148 | float64x2_t p1215 = vfmaq_f64 (p1213, z4, p1415); |
| 149 | |
| 150 | float64x2_t p1617 = vfmaq_laneq_f64 (d->c16, z2, c1719, 0); |
| 151 | float64x2_t p1819 = vfmaq_laneq_f64 (d->c18, z2, c1719, 1); |
| 152 | float64x2_t p1619 = vfmaq_f64 (p1617, z4, p1819); |
| 153 | |
| 154 | float64x2_t p815 = vfmaq_f64 (p811, z8, p1215); |
| 155 | float64x2_t p819 = vfmaq_f64 (p815, z16, p1619); |
| 156 | |
| 157 | float64x2_t poly = vfmaq_f64 (p07, p819, z16); |
| 158 | |
| 159 | /* Finalize. y = shift + z + z^3 * P(z^2). */ |
| 160 | float64x2_t ret = vfmaq_f64 (z, shift, d->pi_over_2); |
| 161 | ret = vfmaq_f64 (ret, z3, poly); |
| 162 | |
| 163 | if (__glibc_unlikely (v_any_u64 (special_cases))) |
| 164 | return special_case (y, x, ret, sign_xy, special_cases); |
| 165 | |
| 166 | /* Account for the sign of x and y. */ |
| 167 | return vreinterpretq_f64_u64 ( |
| 168 | veorq_u64 (vreinterpretq_u64_f64 (ret), sign_xy)); |
| 169 | } |
| 170 | |