| 1 | /* Function sin vectorized with AVX2. |
| 2 | Copyright (C) 2014-2024 Free Software Foundation, Inc. |
| 3 | This file is part of the GNU C Library. |
| 4 | |
| 5 | The GNU C Library is free software; you can redistribute it and/or |
| 6 | modify it under the terms of the GNU Lesser General Public |
| 7 | License as published by the Free Software Foundation; either |
| 8 | version 2.1 of the License, or (at your option) any later version. |
| 9 | |
| 10 | The GNU C Library is distributed in the hope that it will be useful, |
| 11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 13 | Lesser General Public License for more details. |
| 14 | |
| 15 | You should have received a copy of the GNU Lesser General Public |
| 16 | License along with the GNU C Library; if not, see |
| 17 | <https://www.gnu.org/licenses/>. */ |
| 18 | |
| 19 | #include <sysdep.h> |
| 20 | #include "svml_d_trig_data.h" |
| 21 | |
| 22 | .section .text.avx2, "ax" , @progbits |
| 23 | ENTRY (_ZGVdN4v_sin_avx2) |
| 24 | /* ALGORITHM DESCRIPTION: |
| 25 | |
| 26 | ( low accuracy ( < 4ulp ) or enhanced performance |
| 27 | ( half of correct mantissa ) implementation ) |
| 28 | |
| 29 | Argument representation: |
| 30 | arg = N*Pi + R |
| 31 | |
| 32 | Result calculation: |
| 33 | sin(arg) = sin(N*Pi + R) = (-1)^N * sin(R) |
| 34 | sin(R) is approximated by corresponding polynomial |
| 35 | */ |
| 36 | pushq %rbp |
| 37 | cfi_adjust_cfa_offset (8) |
| 38 | cfi_rel_offset (%rbp, 0) |
| 39 | movq %rsp, %rbp |
| 40 | cfi_def_cfa_register (%rbp) |
| 41 | andq $-64, %rsp |
| 42 | subq $448, %rsp |
| 43 | movq __svml_d_trig_data@GOTPCREL(%rip), %rax |
| 44 | vmovdqa %ymm0, %ymm4 |
| 45 | vmovupd __dAbsMask(%rax), %ymm2 |
| 46 | vmovupd __dInvPI(%rax), %ymm6 |
| 47 | vmovupd __dRShifter(%rax), %ymm5 |
| 48 | vmovupd __dPI1_FMA(%rax), %ymm7 |
| 49 | /* |
| 50 | ARGUMENT RANGE REDUCTION: |
| 51 | X' = |X| |
| 52 | */ |
| 53 | vandpd %ymm2, %ymm4, %ymm3 |
| 54 | |
| 55 | /* Y = X'*InvPi + RS : right shifter add */ |
| 56 | vfmadd213pd %ymm5, %ymm3, %ymm6 |
| 57 | |
| 58 | /* N = Y - RS : right shifter sub */ |
| 59 | vsubpd %ymm5, %ymm6, %ymm1 |
| 60 | |
| 61 | /* SignRes = Y<<63 : shift LSB to MSB place for result sign */ |
| 62 | vpsllq $63, %ymm6, %ymm5 |
| 63 | |
| 64 | /* R = X' - N*Pi1 */ |
| 65 | vmovapd %ymm3, %ymm0 |
| 66 | vfnmadd231pd %ymm1, %ymm7, %ymm0 |
| 67 | vcmpnle_uqpd __dRangeVal(%rax), %ymm3, %ymm3 |
| 68 | |
| 69 | /* R = R - N*Pi2 */ |
| 70 | vfnmadd231pd __dPI2_FMA(%rax), %ymm1, %ymm0 |
| 71 | |
| 72 | /* R = R - N*Pi3 */ |
| 73 | vfnmadd132pd __dPI3_FMA(%rax), %ymm0, %ymm1 |
| 74 | |
| 75 | /* |
| 76 | POLYNOMIAL APPROXIMATION: |
| 77 | R2 = R*R |
| 78 | */ |
| 79 | vmulpd %ymm1, %ymm1, %ymm0 |
| 80 | |
| 81 | /* R = R^SignRes : update sign of reduced argument */ |
| 82 | vxorpd %ymm5, %ymm1, %ymm6 |
| 83 | vmovupd __dC7_sin(%rax), %ymm1 |
| 84 | vfmadd213pd __dC6_sin(%rax), %ymm0, %ymm1 |
| 85 | vfmadd213pd __dC5_sin(%rax), %ymm0, %ymm1 |
| 86 | vfmadd213pd __dC4_sin(%rax), %ymm0, %ymm1 |
| 87 | |
| 88 | /* Poly = C3+R2*(C4+R2*(C5+R2*(C6+R2*C7))) */ |
| 89 | vfmadd213pd __dC3_sin(%rax), %ymm0, %ymm1 |
| 90 | |
| 91 | /* Poly = R2*(C1+R2*(C2+R2*Poly)) */ |
| 92 | vfmadd213pd __dC2_sin(%rax), %ymm0, %ymm1 |
| 93 | vfmadd213pd __dC1_sin(%rax), %ymm0, %ymm1 |
| 94 | |
| 95 | /* SignX - sign bit of X */ |
| 96 | vandnpd %ymm4, %ymm2, %ymm7 |
| 97 | vmulpd %ymm0, %ymm1, %ymm2 |
| 98 | |
| 99 | /* Poly = Poly*R + R */ |
| 100 | vfmadd213pd %ymm6, %ymm6, %ymm2 |
| 101 | vmovmskpd %ymm3, %ecx |
| 102 | |
| 103 | /* |
| 104 | RECONSTRUCTION: |
| 105 | Final sign setting: Res = Poly^SignX |
| 106 | */ |
| 107 | vxorpd %ymm7, %ymm2, %ymm0 |
| 108 | testl %ecx, %ecx |
| 109 | jne .LBL_1_3 |
| 110 | |
| 111 | .LBL_1_2: |
| 112 | cfi_remember_state |
| 113 | movq %rbp, %rsp |
| 114 | cfi_def_cfa_register (%rsp) |
| 115 | popq %rbp |
| 116 | cfi_adjust_cfa_offset (-8) |
| 117 | cfi_restore (%rbp) |
| 118 | ret |
| 119 | |
| 120 | .LBL_1_3: |
| 121 | cfi_restore_state |
| 122 | vmovupd %ymm4, 320(%rsp) |
| 123 | vmovupd %ymm0, 384(%rsp) |
| 124 | je .LBL_1_2 |
| 125 | |
| 126 | xorb %dl, %dl |
| 127 | xorl %eax, %eax |
| 128 | vmovups %ymm8, 224(%rsp) |
| 129 | vmovups %ymm9, 192(%rsp) |
| 130 | vmovups %ymm10, 160(%rsp) |
| 131 | vmovups %ymm11, 128(%rsp) |
| 132 | vmovups %ymm12, 96(%rsp) |
| 133 | vmovups %ymm13, 64(%rsp) |
| 134 | vmovups %ymm14, 32(%rsp) |
| 135 | vmovups %ymm15, (%rsp) |
| 136 | movq %rsi, 264(%rsp) |
| 137 | movq %rdi, 256(%rsp) |
| 138 | movq %r12, 296(%rsp) |
| 139 | cfi_offset_rel_rsp (12, 296) |
| 140 | movb %dl, %r12b |
| 141 | movq %r13, 288(%rsp) |
| 142 | cfi_offset_rel_rsp (13, 288) |
| 143 | movl %ecx, %r13d |
| 144 | movq %r14, 280(%rsp) |
| 145 | cfi_offset_rel_rsp (14, 280) |
| 146 | movl %eax, %r14d |
| 147 | movq %r15, 272(%rsp) |
| 148 | cfi_offset_rel_rsp (15, 272) |
| 149 | cfi_remember_state |
| 150 | |
| 151 | .LBL_1_6: |
| 152 | btl %r14d, %r13d |
| 153 | jc .LBL_1_12 |
| 154 | |
| 155 | .LBL_1_7: |
| 156 | lea 1(%r14), %esi |
| 157 | btl %esi, %r13d |
| 158 | jc .LBL_1_10 |
| 159 | |
| 160 | .LBL_1_8: |
| 161 | incb %r12b |
| 162 | addl $2, %r14d |
| 163 | cmpb $16, %r12b |
| 164 | jb .LBL_1_6 |
| 165 | |
| 166 | vmovups 224(%rsp), %ymm8 |
| 167 | vmovups 192(%rsp), %ymm9 |
| 168 | vmovups 160(%rsp), %ymm10 |
| 169 | vmovups 128(%rsp), %ymm11 |
| 170 | vmovups 96(%rsp), %ymm12 |
| 171 | vmovups 64(%rsp), %ymm13 |
| 172 | vmovups 32(%rsp), %ymm14 |
| 173 | vmovups (%rsp), %ymm15 |
| 174 | vmovupd 384(%rsp), %ymm0 |
| 175 | movq 264(%rsp), %rsi |
| 176 | movq 256(%rsp), %rdi |
| 177 | movq 296(%rsp), %r12 |
| 178 | cfi_restore (%r12) |
| 179 | movq 288(%rsp), %r13 |
| 180 | cfi_restore (%r13) |
| 181 | movq 280(%rsp), %r14 |
| 182 | cfi_restore (%r14) |
| 183 | movq 272(%rsp), %r15 |
| 184 | cfi_restore (%r15) |
| 185 | jmp .LBL_1_2 |
| 186 | |
| 187 | .LBL_1_10: |
| 188 | cfi_restore_state |
| 189 | movzbl %r12b, %r15d |
| 190 | shlq $4, %r15 |
| 191 | vmovsd 328(%rsp,%r15), %xmm0 |
| 192 | vzeroupper |
| 193 | |
| 194 | call JUMPTARGET(sin) |
| 195 | |
| 196 | vmovsd %xmm0, 392(%rsp,%r15) |
| 197 | jmp .LBL_1_8 |
| 198 | |
| 199 | .LBL_1_12: |
| 200 | movzbl %r12b, %r15d |
| 201 | shlq $4, %r15 |
| 202 | vmovsd 320(%rsp,%r15), %xmm0 |
| 203 | vzeroupper |
| 204 | |
| 205 | call JUMPTARGET(sin) |
| 206 | |
| 207 | vmovsd %xmm0, 384(%rsp,%r15) |
| 208 | jmp .LBL_1_7 |
| 209 | |
| 210 | END (_ZGVdN4v_sin_avx2) |
| 211 | |