| 1 | /* Add two limb vectors of the same length > 0 and store sum in a third |
| 2 | limb vector. |
| 3 | Copyright (C) 1992-2024 Free Software Foundation, Inc. |
| 4 | This file is part of the GNU MP Library. |
| 5 | |
| 6 | The GNU MP Library is free software; you can redistribute it and/or modify |
| 7 | it under the terms of the GNU Lesser General Public License as published by |
| 8 | the Free Software Foundation; either version 2.1 of the License, or (at your |
| 9 | option) any later version. |
| 10 | |
| 11 | The GNU MP Library is distributed in the hope that it will be useful, but |
| 12 | WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY |
| 13 | or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public |
| 14 | License for more details. |
| 15 | |
| 16 | You should have received a copy of the GNU Lesser General Public License |
| 17 | along with the GNU MP Library; see the file COPYING.LIB. If not, |
| 18 | see <https://www.gnu.org/licenses/>. */ |
| 19 | |
| 20 | #include <sysdep.h> |
| 21 | #include "asm-syntax.h" |
| 22 | |
| 23 | #define PARMS 4+8 /* space for 2 saved regs */ |
| 24 | #define RES PARMS |
| 25 | #define S1 RES+4 |
| 26 | #define S2 S1+4 |
| 27 | #define SIZE S2+4 |
| 28 | |
| 29 | .text |
| 30 | #ifdef PIC |
| 31 | L(1): addl (%esp), %eax |
| 32 | ret |
| 33 | #endif |
| 34 | ENTRY (__mpn_add_n) |
| 35 | |
| 36 | pushl %edi |
| 37 | cfi_adjust_cfa_offset (4) |
| 38 | pushl %esi |
| 39 | cfi_adjust_cfa_offset (4) |
| 40 | |
| 41 | movl RES(%esp),%edi |
| 42 | cfi_rel_offset (edi, 4) |
| 43 | movl S1(%esp),%esi |
| 44 | cfi_rel_offset (esi, 0) |
| 45 | movl S2(%esp),%edx |
| 46 | movl SIZE(%esp),%ecx |
| 47 | movl %ecx,%eax |
| 48 | shrl $3,%ecx /* compute count for unrolled loop */ |
| 49 | negl %eax |
| 50 | andl $7,%eax /* get index where to start loop */ |
| 51 | jz L(oop) /* necessary special case for 0 */ |
| 52 | incl %ecx /* adjust loop count */ |
| 53 | shll $2,%eax /* adjustment for pointers... */ |
| 54 | subl %eax,%edi /* ... since they are offset ... */ |
| 55 | subl %eax,%esi /* ... by a constant when we ... */ |
| 56 | subl %eax,%edx /* ... enter the loop */ |
| 57 | shrl $2,%eax /* restore previous value */ |
| 58 | #ifdef PIC |
| 59 | /* Calculate start address in loop for PIC. */ |
| 60 | leal (L(oop)-L(0)-3)(%eax,%eax,8),%eax |
| 61 | call L(1) |
| 62 | L(0): |
| 63 | #else |
| 64 | /* Calculate start address in loop for non-PIC. */ |
| 65 | leal (L(oop) - 3)(%eax,%eax,8),%eax |
| 66 | #endif |
| 67 | jmp *%eax /* jump into loop */ |
| 68 | ALIGN (3) |
| 69 | L(oop): movl (%esi),%eax |
| 70 | adcl (%edx),%eax |
| 71 | movl %eax,(%edi) |
| 72 | movl 4(%esi),%eax |
| 73 | adcl 4(%edx),%eax |
| 74 | movl %eax,4(%edi) |
| 75 | movl 8(%esi),%eax |
| 76 | adcl 8(%edx),%eax |
| 77 | movl %eax,8(%edi) |
| 78 | movl 12(%esi),%eax |
| 79 | adcl 12(%edx),%eax |
| 80 | movl %eax,12(%edi) |
| 81 | movl 16(%esi),%eax |
| 82 | adcl 16(%edx),%eax |
| 83 | movl %eax,16(%edi) |
| 84 | movl 20(%esi),%eax |
| 85 | adcl 20(%edx),%eax |
| 86 | movl %eax,20(%edi) |
| 87 | movl 24(%esi),%eax |
| 88 | adcl 24(%edx),%eax |
| 89 | movl %eax,24(%edi) |
| 90 | movl 28(%esi),%eax |
| 91 | adcl 28(%edx),%eax |
| 92 | movl %eax,28(%edi) |
| 93 | leal 32(%edi),%edi |
| 94 | leal 32(%esi),%esi |
| 95 | leal 32(%edx),%edx |
| 96 | decl %ecx |
| 97 | jnz L(oop) |
| 98 | |
| 99 | sbbl %eax,%eax |
| 100 | negl %eax |
| 101 | |
| 102 | popl %esi |
| 103 | cfi_adjust_cfa_offset (-4) |
| 104 | cfi_restore (esi) |
| 105 | popl %edi |
| 106 | cfi_adjust_cfa_offset (-4) |
| 107 | cfi_restore (edi) |
| 108 | |
| 109 | ret |
| 110 | END (__mpn_add_n) |
| 111 | |