| 1 | /* Double-precision AdvSIMD atan2 |
| 2 | |
| 3 | Copyright (C) 2023-2024 Free Software Foundation, Inc. |
| 4 | This file is part of the GNU C Library. |
| 5 | |
| 6 | The GNU C Library is free software; you can redistribute it and/or |
| 7 | modify it under the terms of the GNU Lesser General Public |
| 8 | License as published by the Free Software Foundation; either |
| 9 | version 2.1 of the License, or (at your option) any later version. |
| 10 | |
| 11 | The GNU C Library is distributed in the hope that it will be useful, |
| 12 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 14 | Lesser General Public License for more details. |
| 15 | |
| 16 | You should have received a copy of the GNU Lesser General Public |
| 17 | License along with the GNU C Library; if not, see |
| 18 | <https://www.gnu.org/licenses/>. */ |
| 19 | |
| 20 | #include "v_math.h" |
| 21 | #include "poly_advsimd_f64.h" |
| 22 | |
| 23 | static const struct data |
| 24 | { |
| 25 | float64x2_t c0, c2, c4, c6, c8, c10, c12, c14, c16, c18; |
| 26 | float64x2_t pi_over_2; |
| 27 | double c1, c3, c5, c7, c9, c11, c13, c15, c17, c19; |
| 28 | uint64x2_t zeroinfnan, minustwo; |
| 29 | } data = { |
| 30 | /* Coefficients of polynomial P such that atan(x)~x+x*P(x^2) on |
| 31 | [2**-1022, 1.0]. */ |
| 32 | .c0 = V2 (-0x1.5555555555555p-2), |
| 33 | .c1 = 0x1.99999999996c1p-3, |
| 34 | .c2 = V2 (-0x1.2492492478f88p-3), |
| 35 | .c3 = 0x1.c71c71bc3951cp-4, |
| 36 | .c4 = V2 (-0x1.745d160a7e368p-4), |
| 37 | .c5 = 0x1.3b139b6a88ba1p-4, |
| 38 | .c6 = V2 (-0x1.11100ee084227p-4), |
| 39 | .c7 = 0x1.e1d0f9696f63bp-5, |
| 40 | .c8 = V2 (-0x1.aebfe7b418581p-5), |
| 41 | .c9 = 0x1.842dbe9b0d916p-5, |
| 42 | .c10 = V2 (-0x1.5d30140ae5e99p-5), |
| 43 | .c11 = 0x1.338e31eb2fbbcp-5, |
| 44 | .c12 = V2 (-0x1.00e6eece7de8p-5), |
| 45 | .c13 = 0x1.860897b29e5efp-6, |
| 46 | .c14 = V2 (-0x1.0051381722a59p-6), |
| 47 | .c15 = 0x1.14e9dc19a4a4ep-7, |
| 48 | .c16 = V2 (-0x1.d0062b42fe3bfp-9), |
| 49 | .c17 = 0x1.17739e210171ap-10, |
| 50 | .c18 = V2 (-0x1.ab24da7be7402p-13), |
| 51 | .c19 = 0x1.358851160a528p-16, |
| 52 | .pi_over_2 = V2 (0x1.921fb54442d18p+0), |
| 53 | .zeroinfnan = V2 (2 * 0x7ff0000000000000ul - 1), |
| 54 | .minustwo = V2 (0xc000000000000000), |
| 55 | }; |
| 56 | |
| 57 | #define SignMask v_u64 (0x8000000000000000) |
| 58 | |
| 59 | /* Special cases i.e. 0, infinity, NaN (fall back to scalar calls). */ |
| 60 | static float64x2_t VPCS_ATTR NOINLINE |
| 61 | special_case (float64x2_t y, float64x2_t x, float64x2_t ret, |
| 62 | uint64x2_t sign_xy, uint64x2_t cmp) |
| 63 | { |
| 64 | /* Account for the sign of x and y. */ |
| 65 | ret = vreinterpretq_f64_u64 ( |
| 66 | veorq_u64 (vreinterpretq_u64_f64 (ret), sign_xy)); |
| 67 | return v_call2_f64 (atan2, y, x, ret, cmp); |
| 68 | } |
| 69 | |
| 70 | /* Returns 1 if input is the bit representation of 0, infinity or nan. */ |
| 71 | static inline uint64x2_t |
| 72 | zeroinfnan (uint64x2_t i, const struct data *d) |
| 73 | { |
| 74 | /* (2 * i - 1) >= (2 * asuint64 (INFINITY) - 1). */ |
| 75 | return vcgeq_u64 (vsubq_u64 (vaddq_u64 (i, i), v_u64 (x: 1)), d->zeroinfnan); |
| 76 | } |
| 77 | |
| 78 | /* Fast implementation of vector atan2. |
| 79 | Maximum observed error is 2.8 ulps: |
| 80 | _ZGVnN2vv_atan2 (0x1.9651a429a859ap+5, 0x1.953075f4ee26p+5) |
| 81 | got 0x1.92d628ab678ccp-1 |
| 82 | want 0x1.92d628ab678cfp-1. */ |
| 83 | float64x2_t VPCS_ATTR V_NAME_D2 (atan2) (float64x2_t y, float64x2_t x) |
| 84 | { |
| 85 | const struct data *d = ptr_barrier (&data); |
| 86 | |
| 87 | uint64x2_t ix = vreinterpretq_u64_f64 (x); |
| 88 | uint64x2_t iy = vreinterpretq_u64_f64 (y); |
| 89 | |
| 90 | uint64x2_t special_cases |
| 91 | = vorrq_u64 (zeroinfnan (ix, d), zeroinfnan (iy, d)); |
| 92 | |
| 93 | uint64x2_t sign_x = vandq_u64 (ix, SignMask); |
| 94 | uint64x2_t sign_y = vandq_u64 (iy, SignMask); |
| 95 | uint64x2_t sign_xy = veorq_u64 (sign_x, sign_y); |
| 96 | |
| 97 | float64x2_t ax = vabsq_f64 (x); |
| 98 | float64x2_t ay = vabsq_f64 (y); |
| 99 | |
| 100 | uint64x2_t pred_xlt0 = vcltzq_f64 (x); |
| 101 | uint64x2_t pred_aygtax = vcagtq_f64 (y, x); |
| 102 | |
| 103 | /* Set up z for call to atan. */ |
| 104 | float64x2_t n = vbslq_f64 (pred_aygtax, vnegq_f64 (ax), ay); |
| 105 | float64x2_t q = vbslq_f64 (pred_aygtax, ay, ax); |
| 106 | float64x2_t z = vdivq_f64 (n, q); |
| 107 | |
| 108 | /* Work out the correct shift. */ |
| 109 | float64x2_t shift |
| 110 | = vreinterpretq_f64_u64 (vandq_u64 (pred_xlt0, d->minustwo)); |
| 111 | shift = vbslq_f64 (pred_aygtax, vaddq_f64 (shift, v_f64 (1.0)), shift); |
| 112 | shift = vmulq_f64 (shift, d->pi_over_2); |
| 113 | |
| 114 | /* Calculate the polynomial approximation. |
| 115 | Use split Estrin scheme for P(z^2) with deg(P)=19. Use split instead of |
| 116 | full scheme to avoid underflow in x^16. |
| 117 | The order 19 polynomial P approximates |
| 118 | (atan(sqrt(x))-sqrt(x))/x^(3/2). */ |
| 119 | float64x2_t z2 = vmulq_f64 (z, z); |
| 120 | float64x2_t x2 = vmulq_f64 (z2, z2); |
| 121 | float64x2_t x4 = vmulq_f64 (x2, x2); |
| 122 | float64x2_t x8 = vmulq_f64 (x4, x4); |
| 123 | |
| 124 | float64x2_t c13 = vld1q_f64 (&d->c1); |
| 125 | float64x2_t c57 = vld1q_f64 (&d->c5); |
| 126 | float64x2_t c911 = vld1q_f64 (&d->c9); |
| 127 | float64x2_t c1315 = vld1q_f64 (&d->c13); |
| 128 | float64x2_t c1719 = vld1q_f64 (&d->c17); |
| 129 | |
| 130 | /* estrin_7. */ |
| 131 | float64x2_t p01 = vfmaq_laneq_f64 (d->c0, z2, c13, 0); |
| 132 | float64x2_t p23 = vfmaq_laneq_f64 (d->c2, z2, c13, 1); |
| 133 | float64x2_t p03 = vfmaq_f64 (p01, x2, p23); |
| 134 | |
| 135 | float64x2_t p45 = vfmaq_laneq_f64 (d->c4, z2, c57, 0); |
| 136 | float64x2_t p67 = vfmaq_laneq_f64 (d->c6, z2, c57, 1); |
| 137 | float64x2_t p47 = vfmaq_f64 (p45, x2, p67); |
| 138 | |
| 139 | float64x2_t p07 = vfmaq_f64 (p03, x4, p47); |
| 140 | |
| 141 | /* estrin_11. */ |
| 142 | float64x2_t p89 = vfmaq_laneq_f64 (d->c8, z2, c911, 0); |
| 143 | float64x2_t p1011 = vfmaq_laneq_f64 (d->c10, z2, c911, 1); |
| 144 | float64x2_t p811 = vfmaq_f64 (p89, x2, p1011); |
| 145 | |
| 146 | float64x2_t p1213 = vfmaq_laneq_f64 (d->c12, z2, c1315, 0); |
| 147 | float64x2_t p1415 = vfmaq_laneq_f64 (d->c14, z2, c1315, 1); |
| 148 | float64x2_t p1215 = vfmaq_f64 (p1213, x2, p1415); |
| 149 | |
| 150 | float64x2_t p1617 = vfmaq_laneq_f64 (d->c16, z2, c1719, 0); |
| 151 | float64x2_t p1819 = vfmaq_laneq_f64 (d->c18, z2, c1719, 1); |
| 152 | float64x2_t p1619 = vfmaq_f64 (p1617, x2, p1819); |
| 153 | |
| 154 | float64x2_t p815 = vfmaq_f64 (p811, x4, p1215); |
| 155 | float64x2_t p819 = vfmaq_f64 (p815, x8, p1619); |
| 156 | |
| 157 | float64x2_t ret = vfmaq_f64 (p07, p819, x8); |
| 158 | |
| 159 | /* Finalize. y = shift + z + z^3 * P(z^2). */ |
| 160 | ret = vfmaq_f64 (z, ret, vmulq_f64 (z2, z)); |
| 161 | ret = vaddq_f64 (ret, shift); |
| 162 | |
| 163 | if (__glibc_unlikely (v_any_u64 (special_cases))) |
| 164 | return special_case (y, x, ret, sign_xy, special_cases); |
| 165 | |
| 166 | /* Account for the sign of x and y. */ |
| 167 | ret = vreinterpretq_f64_u64 ( |
| 168 | veorq_u64 (vreinterpretq_u64_f64 (ret), sign_xy)); |
| 169 | |
| 170 | return ret; |
| 171 | } |
| 172 | |