| 1 | //===----------------------------------------------------------------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | |
| 9 | #include "assembly.h" |
| 10 | |
| 11 | #define FROM_0_TO_15 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 |
| 12 | #define FROM_16_TO_31 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 |
| 13 | |
| 14 | #define FROM_0_TO_31 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 |
| 15 | #define FROM_32_TO_63 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63 |
| 16 | |
| 17 | #if defined(_AIX) |
| 18 | .toc |
| 19 | #else |
| 20 | .text |
| 21 | #endif |
| 22 | |
| 23 | #if !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__wasm__) |
| 24 | |
| 25 | #if defined(__i386__) |
| 26 | DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_x86_jumpto) |
| 27 | # |
| 28 | # extern "C" void __libunwind_Registers_x86_jumpto(Registers_x86 *); |
| 29 | # |
| 30 | # On entry: |
| 31 | # + + |
| 32 | # +-----------------------+ |
| 33 | # + thread_state pointer + |
| 34 | # +-----------------------+ |
| 35 | # + return address + |
| 36 | # +-----------------------+ <-- SP |
| 37 | # + + |
| 38 | |
| 39 | _LIBUNWIND_CET_ENDBR |
| 40 | movl 4(%esp), %eax |
| 41 | # set up eax and ret on new stack location |
| 42 | movl 28(%eax), %edx # edx holds new stack pointer |
| 43 | subl $8,%edx |
| 44 | movl %edx, 28(%eax) |
| 45 | movl 0(%eax), %ebx |
| 46 | movl %ebx, 0(%edx) |
| 47 | movl 40(%eax), %ebx |
| 48 | movl %ebx, 4(%edx) |
| 49 | # we now have ret and eax pushed onto where new stack will be |
| 50 | # restore all registers |
| 51 | movl 4(%eax), %ebx |
| 52 | movl 8(%eax), %ecx |
| 53 | movl 12(%eax), %edx |
| 54 | movl 16(%eax), %edi |
| 55 | movl 20(%eax), %esi |
| 56 | movl 24(%eax), %ebp |
| 57 | movl 28(%eax), %esp |
| 58 | # skip ss |
| 59 | # skip eflags |
| 60 | pop %eax # eax was already pushed on new stack |
| 61 | pop %ecx |
| 62 | jmp *%ecx |
| 63 | # skip cs |
| 64 | # skip ds |
| 65 | # skip es |
| 66 | # skip fs |
| 67 | # skip gs |
| 68 | |
| 69 | #elif defined(__x86_64__) && !defined(__arm64ec__) |
| 70 | |
| 71 | DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_x86_64_jumpto) |
| 72 | # |
| 73 | # extern "C" void __libunwind_Registers_x86_64_jumpto(Registers_x86_64 *); |
| 74 | # |
| 75 | #if defined(_WIN64) |
| 76 | # On entry, thread_state pointer is in rcx; move it into rdi |
| 77 | # to share restore code below. Since this routine restores and |
| 78 | # overwrites all registers, we can use the same registers for |
| 79 | # pointers and temporaries as on unix even though win64 normally |
| 80 | # mustn't clobber some of them. |
| 81 | movq %rcx, %rdi |
| 82 | #else |
| 83 | # On entry, thread_state pointer is in rdi |
| 84 | #endif |
| 85 | |
| 86 | _LIBUNWIND_CET_ENDBR |
| 87 | movq 56(%rdi), %rax # rax holds new stack pointer |
| 88 | subq $16, %rax |
| 89 | movq %rax, 56(%rdi) |
| 90 | movq 32(%rdi), %rbx # store new rdi on new stack |
| 91 | movq %rbx, 0(%rax) |
| 92 | movq 128(%rdi), %rbx # store new rip on new stack |
| 93 | movq %rbx, 8(%rax) |
| 94 | # restore all registers |
| 95 | movq 0(%rdi), %rax |
| 96 | movq 8(%rdi), %rbx |
| 97 | movq 16(%rdi), %rcx |
| 98 | movq 24(%rdi), %rdx |
| 99 | # restore rdi later |
| 100 | movq 40(%rdi), %rsi |
| 101 | movq 48(%rdi), %rbp |
| 102 | # restore rsp later |
| 103 | movq 64(%rdi), %r8 |
| 104 | movq 72(%rdi), %r9 |
| 105 | movq 80(%rdi), %r10 |
| 106 | movq 88(%rdi), %r11 |
| 107 | movq 96(%rdi), %r12 |
| 108 | movq 104(%rdi), %r13 |
| 109 | movq 112(%rdi), %r14 |
| 110 | movq 120(%rdi), %r15 |
| 111 | # skip rflags |
| 112 | # skip cs |
| 113 | # skip fs |
| 114 | # skip gs |
| 115 | |
| 116 | #if defined(_WIN64) |
| 117 | movdqu 176(%rdi),%xmm0 |
| 118 | movdqu 192(%rdi),%xmm1 |
| 119 | movdqu 208(%rdi),%xmm2 |
| 120 | movdqu 224(%rdi),%xmm3 |
| 121 | movdqu 240(%rdi),%xmm4 |
| 122 | movdqu 256(%rdi),%xmm5 |
| 123 | movdqu 272(%rdi),%xmm6 |
| 124 | movdqu 288(%rdi),%xmm7 |
| 125 | movdqu 304(%rdi),%xmm8 |
| 126 | movdqu 320(%rdi),%xmm9 |
| 127 | movdqu 336(%rdi),%xmm10 |
| 128 | movdqu 352(%rdi),%xmm11 |
| 129 | movdqu 368(%rdi),%xmm12 |
| 130 | movdqu 384(%rdi),%xmm13 |
| 131 | movdqu 400(%rdi),%xmm14 |
| 132 | movdqu 416(%rdi),%xmm15 |
| 133 | #endif |
| 134 | movq 56(%rdi), %rsp # cut back rsp to new location |
| 135 | pop %rdi # rdi was saved here earlier |
| 136 | pop %rcx |
| 137 | jmpq *%rcx |
| 138 | |
| 139 | |
| 140 | #elif defined(__powerpc64__) |
| 141 | |
| 142 | DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_ppc646jumptoEv) |
| 143 | // |
| 144 | // void libunwind::Registers_ppc64::jumpto() |
| 145 | // |
| 146 | // On entry: |
| 147 | // thread_state pointer is in r3 |
| 148 | // |
| 149 | |
| 150 | // load register (GPR) |
| 151 | #define PPC64_LR(n) \ |
| 152 | ld n, (8 * (n + 2))(3) |
| 153 | |
| 154 | // restore integral registers |
| 155 | // skip r0 for now |
| 156 | // skip r1 for now |
| 157 | PPC64_LR(2) |
| 158 | // skip r3 for now |
| 159 | // skip r4 for now |
| 160 | // skip r5 for now |
| 161 | PPC64_LR(6) |
| 162 | PPC64_LR(7) |
| 163 | PPC64_LR(8) |
| 164 | PPC64_LR(9) |
| 165 | PPC64_LR(10) |
| 166 | PPC64_LR(11) |
| 167 | PPC64_LR(12) |
| 168 | PPC64_LR(13) |
| 169 | PPC64_LR(14) |
| 170 | PPC64_LR(15) |
| 171 | PPC64_LR(16) |
| 172 | PPC64_LR(17) |
| 173 | PPC64_LR(18) |
| 174 | PPC64_LR(19) |
| 175 | PPC64_LR(20) |
| 176 | PPC64_LR(21) |
| 177 | PPC64_LR(22) |
| 178 | PPC64_LR(23) |
| 179 | PPC64_LR(24) |
| 180 | PPC64_LR(25) |
| 181 | PPC64_LR(26) |
| 182 | PPC64_LR(27) |
| 183 | PPC64_LR(28) |
| 184 | PPC64_LR(29) |
| 185 | PPC64_LR(30) |
| 186 | PPC64_LR(31) |
| 187 | |
| 188 | #if defined(__VSX__) |
| 189 | |
| 190 | // restore VS registers |
| 191 | // (note that this also restores floating point registers and V registers, |
| 192 | // because part of VS is mapped to these registers) |
| 193 | |
| 194 | addi 4, 3, PPC64_OFFS_FP |
| 195 | |
| 196 | // load VS register |
| 197 | #ifdef __LITTLE_ENDIAN__ |
| 198 | // For little-endian targets, we need a swap since lxvd2x will load the register |
| 199 | // in the incorrect doubleword order. |
| 200 | // FIXME: when supporting targets older than Power9 on LE is no longer required, |
| 201 | // this can be changed to simply `lxv n, (16 * n)(4)`. |
| 202 | #define PPC64_LVS(n) \ |
| 203 | lxvd2x n, 0, 4 ;\ |
| 204 | xxswapd n, n ;\ |
| 205 | addi 4, 4, 16 |
| 206 | #else |
| 207 | #define PPC64_LVS(n) \ |
| 208 | lxvd2x n, 0, 4 ;\ |
| 209 | addi 4, 4, 16 |
| 210 | #endif |
| 211 | |
| 212 | // restore the first 32 VS regs (and also all floating point regs) |
| 213 | PPC64_LVS(0) |
| 214 | PPC64_LVS(1) |
| 215 | PPC64_LVS(2) |
| 216 | PPC64_LVS(3) |
| 217 | PPC64_LVS(4) |
| 218 | PPC64_LVS(5) |
| 219 | PPC64_LVS(6) |
| 220 | PPC64_LVS(7) |
| 221 | PPC64_LVS(8) |
| 222 | PPC64_LVS(9) |
| 223 | PPC64_LVS(10) |
| 224 | PPC64_LVS(11) |
| 225 | PPC64_LVS(12) |
| 226 | PPC64_LVS(13) |
| 227 | PPC64_LVS(14) |
| 228 | PPC64_LVS(15) |
| 229 | PPC64_LVS(16) |
| 230 | PPC64_LVS(17) |
| 231 | PPC64_LVS(18) |
| 232 | PPC64_LVS(19) |
| 233 | PPC64_LVS(20) |
| 234 | PPC64_LVS(21) |
| 235 | PPC64_LVS(22) |
| 236 | PPC64_LVS(23) |
| 237 | PPC64_LVS(24) |
| 238 | PPC64_LVS(25) |
| 239 | PPC64_LVS(26) |
| 240 | PPC64_LVS(27) |
| 241 | PPC64_LVS(28) |
| 242 | PPC64_LVS(29) |
| 243 | PPC64_LVS(30) |
| 244 | PPC64_LVS(31) |
| 245 | |
| 246 | #ifdef __LITTLE_ENDIAN__ |
| 247 | #define PPC64_CLVS_RESTORE(n) \ |
| 248 | addi 4, 3, PPC64_OFFS_FP + n * 16 ;\ |
| 249 | lxvd2x n, 0, 4 ;\ |
| 250 | xxswapd n, n |
| 251 | #else |
| 252 | #define PPC64_CLVS_RESTORE(n) \ |
| 253 | addi 4, 3, PPC64_OFFS_FP + n * 16 ;\ |
| 254 | lxvd2x n, 0, 4 |
| 255 | #endif |
| 256 | |
| 257 | #if !defined(_AIX) |
| 258 | // use VRSAVE to conditionally restore the remaining VS regs, that are |
| 259 | // where the V regs are mapped. In the AIX ABI, VRSAVE is not used. |
| 260 | ld 5, PPC64_OFFS_VRSAVE(3) // test VRsave |
| 261 | cmpwi 5, 0 |
| 262 | beq Lnovec |
| 263 | |
| 264 | // conditionally load VS |
| 265 | #define PPC64_CLVSl(n) \ |
| 266 | andis. 0, 5, (1 PPC_LEFT_SHIFT(47-n)) ;\ |
| 267 | beq Ldone##n ;\ |
| 268 | PPC64_CLVS_RESTORE(n) ;\ |
| 269 | Ldone##n: |
| 270 | |
| 271 | #define PPC64_CLVSh(n) \ |
| 272 | andi. 0, 5, (1 PPC_LEFT_SHIFT(63-n)) ;\ |
| 273 | beq Ldone##n ;\ |
| 274 | PPC64_CLVS_RESTORE(n) ;\ |
| 275 | Ldone##n: |
| 276 | |
| 277 | #else |
| 278 | |
| 279 | #define PPC64_CLVSl(n) PPC64_CLVS_RESTORE(n) |
| 280 | #define PPC64_CLVSh(n) PPC64_CLVS_RESTORE(n) |
| 281 | |
| 282 | #endif // !defined(_AIX) |
| 283 | |
| 284 | PPC64_CLVSl(32) |
| 285 | PPC64_CLVSl(33) |
| 286 | PPC64_CLVSl(34) |
| 287 | PPC64_CLVSl(35) |
| 288 | PPC64_CLVSl(36) |
| 289 | PPC64_CLVSl(37) |
| 290 | PPC64_CLVSl(38) |
| 291 | PPC64_CLVSl(39) |
| 292 | PPC64_CLVSl(40) |
| 293 | PPC64_CLVSl(41) |
| 294 | PPC64_CLVSl(42) |
| 295 | PPC64_CLVSl(43) |
| 296 | PPC64_CLVSl(44) |
| 297 | PPC64_CLVSl(45) |
| 298 | PPC64_CLVSl(46) |
| 299 | PPC64_CLVSl(47) |
| 300 | PPC64_CLVSh(48) |
| 301 | PPC64_CLVSh(49) |
| 302 | PPC64_CLVSh(50) |
| 303 | PPC64_CLVSh(51) |
| 304 | PPC64_CLVSh(52) |
| 305 | PPC64_CLVSh(53) |
| 306 | PPC64_CLVSh(54) |
| 307 | PPC64_CLVSh(55) |
| 308 | PPC64_CLVSh(56) |
| 309 | PPC64_CLVSh(57) |
| 310 | PPC64_CLVSh(58) |
| 311 | PPC64_CLVSh(59) |
| 312 | PPC64_CLVSh(60) |
| 313 | PPC64_CLVSh(61) |
| 314 | PPC64_CLVSh(62) |
| 315 | PPC64_CLVSh(63) |
| 316 | |
| 317 | #else |
| 318 | |
| 319 | // load FP register |
| 320 | #define PPC64_LF(n) \ |
| 321 | lfd n, (PPC64_OFFS_FP + n * 16)(3) |
| 322 | |
| 323 | // restore float registers |
| 324 | PPC64_LF(0) |
| 325 | PPC64_LF(1) |
| 326 | PPC64_LF(2) |
| 327 | PPC64_LF(3) |
| 328 | PPC64_LF(4) |
| 329 | PPC64_LF(5) |
| 330 | PPC64_LF(6) |
| 331 | PPC64_LF(7) |
| 332 | PPC64_LF(8) |
| 333 | PPC64_LF(9) |
| 334 | PPC64_LF(10) |
| 335 | PPC64_LF(11) |
| 336 | PPC64_LF(12) |
| 337 | PPC64_LF(13) |
| 338 | PPC64_LF(14) |
| 339 | PPC64_LF(15) |
| 340 | PPC64_LF(16) |
| 341 | PPC64_LF(17) |
| 342 | PPC64_LF(18) |
| 343 | PPC64_LF(19) |
| 344 | PPC64_LF(20) |
| 345 | PPC64_LF(21) |
| 346 | PPC64_LF(22) |
| 347 | PPC64_LF(23) |
| 348 | PPC64_LF(24) |
| 349 | PPC64_LF(25) |
| 350 | PPC64_LF(26) |
| 351 | PPC64_LF(27) |
| 352 | PPC64_LF(28) |
| 353 | PPC64_LF(29) |
| 354 | PPC64_LF(30) |
| 355 | PPC64_LF(31) |
| 356 | |
| 357 | #if defined(__ALTIVEC__) |
| 358 | |
| 359 | #define PPC64_CLV_UNALIGNED_RESTORE(n) \ |
| 360 | ld 0, (PPC64_OFFS_V + n * 16)(3) ;\ |
| 361 | std 0, 0(4) ;\ |
| 362 | ld 0, (PPC64_OFFS_V + n * 16 + 8)(3) ;\ |
| 363 | std 0, 8(4) ;\ |
| 364 | lvx n, 0, 4 |
| 365 | |
| 366 | #if !defined(_AIX) |
| 367 | // restore vector registers if any are in use. In the AIX ABI, VRSAVE is |
| 368 | // not used. |
| 369 | ld 5, PPC64_OFFS_VRSAVE(3) // test VRsave |
| 370 | cmpwi 5, 0 |
| 371 | beq Lnovec |
| 372 | |
| 373 | #define PPC64_CLV_UNALIGNEDl(n) \ |
| 374 | andis. 0, 5, (1 PPC_LEFT_SHIFT(15-n)) ;\ |
| 375 | beq Ldone##n ;\ |
| 376 | PPC64_CLV_UNALIGNED_RESTORE(n) ;\ |
| 377 | Ldone ## n: |
| 378 | |
| 379 | #define PPC64_CLV_UNALIGNEDh(n) \ |
| 380 | andi. 0, 5, (1 PPC_LEFT_SHIFT(31-n)) ;\ |
| 381 | beq Ldone##n ;\ |
| 382 | PPC64_CLV_UNALIGNED_RESTORE(n) ;\ |
| 383 | Ldone ## n: |
| 384 | |
| 385 | #else |
| 386 | |
| 387 | #define PPC64_CLV_UNALIGNEDl(n) PPC64_CLV_UNALIGNED_RESTORE(n) |
| 388 | #define PPC64_CLV_UNALIGNEDh(n) PPC64_CLV_UNALIGNED_RESTORE(n) |
| 389 | |
| 390 | #endif // !defined(_AIX) |
| 391 | |
| 392 | subi 4, 1, 16 |
| 393 | // r4 is now a 16-byte aligned pointer into the red zone |
| 394 | // the _vectorScalarRegisters may not be 16-byte aligned |
| 395 | // so copy via red zone temp buffer |
| 396 | |
| 397 | PPC64_CLV_UNALIGNEDl(0) |
| 398 | PPC64_CLV_UNALIGNEDl(1) |
| 399 | PPC64_CLV_UNALIGNEDl(2) |
| 400 | PPC64_CLV_UNALIGNEDl(3) |
| 401 | PPC64_CLV_UNALIGNEDl(4) |
| 402 | PPC64_CLV_UNALIGNEDl(5) |
| 403 | PPC64_CLV_UNALIGNEDl(6) |
| 404 | PPC64_CLV_UNALIGNEDl(7) |
| 405 | PPC64_CLV_UNALIGNEDl(8) |
| 406 | PPC64_CLV_UNALIGNEDl(9) |
| 407 | PPC64_CLV_UNALIGNEDl(10) |
| 408 | PPC64_CLV_UNALIGNEDl(11) |
| 409 | PPC64_CLV_UNALIGNEDl(12) |
| 410 | PPC64_CLV_UNALIGNEDl(13) |
| 411 | PPC64_CLV_UNALIGNEDl(14) |
| 412 | PPC64_CLV_UNALIGNEDl(15) |
| 413 | PPC64_CLV_UNALIGNEDh(16) |
| 414 | PPC64_CLV_UNALIGNEDh(17) |
| 415 | PPC64_CLV_UNALIGNEDh(18) |
| 416 | PPC64_CLV_UNALIGNEDh(19) |
| 417 | PPC64_CLV_UNALIGNEDh(20) |
| 418 | PPC64_CLV_UNALIGNEDh(21) |
| 419 | PPC64_CLV_UNALIGNEDh(22) |
| 420 | PPC64_CLV_UNALIGNEDh(23) |
| 421 | PPC64_CLV_UNALIGNEDh(24) |
| 422 | PPC64_CLV_UNALIGNEDh(25) |
| 423 | PPC64_CLV_UNALIGNEDh(26) |
| 424 | PPC64_CLV_UNALIGNEDh(27) |
| 425 | PPC64_CLV_UNALIGNEDh(28) |
| 426 | PPC64_CLV_UNALIGNEDh(29) |
| 427 | PPC64_CLV_UNALIGNEDh(30) |
| 428 | PPC64_CLV_UNALIGNEDh(31) |
| 429 | |
| 430 | #endif |
| 431 | #endif |
| 432 | |
| 433 | Lnovec: |
| 434 | ld 0, PPC64_OFFS_CR(3) |
| 435 | mtcr 0 |
| 436 | ld 0, PPC64_OFFS_SRR0(3) |
| 437 | mtctr 0 |
| 438 | |
| 439 | #if defined(_AIX) |
| 440 | // After setting GPR1 to a higher address, AIX wipes out the original |
| 441 | // stack space below that address invalidated by the new GPR1 value. Use |
| 442 | // GPR0 to save the value of GPR3 in the context before it is wiped out. |
| 443 | // This compromises the content of GPR0 which is a volatile register. |
| 444 | ld 0, (8 * (3 + 2))(3) |
| 445 | #else |
| 446 | PPC64_LR(0) |
| 447 | #endif |
| 448 | PPC64_LR(5) |
| 449 | PPC64_LR(4) |
| 450 | PPC64_LR(1) |
| 451 | #if defined(_AIX) |
| 452 | mr 3, 0 |
| 453 | #else |
| 454 | PPC64_LR(3) |
| 455 | #endif |
| 456 | bctr |
| 457 | |
| 458 | #elif defined(__powerpc__) |
| 459 | |
| 460 | DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_ppc6jumptoEv) |
| 461 | // |
| 462 | // void libunwind::Registers_ppc::jumpto() |
| 463 | // |
| 464 | // On entry: |
| 465 | // thread_state pointer is in r3 |
| 466 | // |
| 467 | |
| 468 | // restore integral registers |
| 469 | // skip r0 for now |
| 470 | // skip r1 for now |
| 471 | lwz 2, 16(3) |
| 472 | // skip r3 for now |
| 473 | // skip r4 for now |
| 474 | // skip r5 for now |
| 475 | lwz 6, 32(3) |
| 476 | lwz 7, 36(3) |
| 477 | lwz 8, 40(3) |
| 478 | lwz 9, 44(3) |
| 479 | lwz 10, 48(3) |
| 480 | lwz 11, 52(3) |
| 481 | lwz 12, 56(3) |
| 482 | lwz 13, 60(3) |
| 483 | lwz 14, 64(3) |
| 484 | lwz 15, 68(3) |
| 485 | lwz 16, 72(3) |
| 486 | lwz 17, 76(3) |
| 487 | lwz 18, 80(3) |
| 488 | lwz 19, 84(3) |
| 489 | lwz 20, 88(3) |
| 490 | lwz 21, 92(3) |
| 491 | lwz 22, 96(3) |
| 492 | lwz 23,100(3) |
| 493 | lwz 24,104(3) |
| 494 | lwz 25,108(3) |
| 495 | lwz 26,112(3) |
| 496 | lwz 27,116(3) |
| 497 | lwz 28,120(3) |
| 498 | lwz 29,124(3) |
| 499 | lwz 30,128(3) |
| 500 | lwz 31,132(3) |
| 501 | |
| 502 | #ifndef __NO_FPRS__ |
| 503 | // restore float registers |
| 504 | lfd 0, 160(3) |
| 505 | lfd 1, 168(3) |
| 506 | lfd 2, 176(3) |
| 507 | lfd 3, 184(3) |
| 508 | lfd 4, 192(3) |
| 509 | lfd 5, 200(3) |
| 510 | lfd 6, 208(3) |
| 511 | lfd 7, 216(3) |
| 512 | lfd 8, 224(3) |
| 513 | lfd 9, 232(3) |
| 514 | lfd 10,240(3) |
| 515 | lfd 11,248(3) |
| 516 | lfd 12,256(3) |
| 517 | lfd 13,264(3) |
| 518 | lfd 14,272(3) |
| 519 | lfd 15,280(3) |
| 520 | lfd 16,288(3) |
| 521 | lfd 17,296(3) |
| 522 | lfd 18,304(3) |
| 523 | lfd 19,312(3) |
| 524 | lfd 20,320(3) |
| 525 | lfd 21,328(3) |
| 526 | lfd 22,336(3) |
| 527 | lfd 23,344(3) |
| 528 | lfd 24,352(3) |
| 529 | lfd 25,360(3) |
| 530 | lfd 26,368(3) |
| 531 | lfd 27,376(3) |
| 532 | lfd 28,384(3) |
| 533 | lfd 29,392(3) |
| 534 | lfd 30,400(3) |
| 535 | lfd 31,408(3) |
| 536 | #endif |
| 537 | |
| 538 | #if defined(__ALTIVEC__) |
| 539 | |
| 540 | #define LOAD_VECTOR_RESTORE(_index) \ |
| 541 | lwz 0, 424+_index*16(3) SEPARATOR \ |
| 542 | stw 0, 0(4) SEPARATOR \ |
| 543 | lwz 0, 424+_index*16+4(3) SEPARATOR \ |
| 544 | stw 0, 4(4) SEPARATOR \ |
| 545 | lwz 0, 424+_index*16+8(3) SEPARATOR \ |
| 546 | stw 0, 8(4) SEPARATOR \ |
| 547 | lwz 0, 424+_index*16+12(3) SEPARATOR \ |
| 548 | stw 0, 12(4) SEPARATOR \ |
| 549 | lvx _index, 0, 4 |
| 550 | |
| 551 | #if !defined(_AIX) |
| 552 | // restore vector registers if any are in use. In the AIX ABI, VRSAVE |
| 553 | // is not used. |
| 554 | lwz 5, 156(3) // test VRsave |
| 555 | cmpwi 5, 0 |
| 556 | beq Lnovec |
| 557 | |
| 558 | #define LOAD_VECTOR_UNALIGNEDl(_index) \ |
| 559 | andis. 0, 5, (1 PPC_LEFT_SHIFT(15-_index)) SEPARATOR \ |
| 560 | beq Ldone ## _index SEPARATOR \ |
| 561 | LOAD_VECTOR_RESTORE(_index) SEPARATOR \ |
| 562 | Ldone ## _index: |
| 563 | |
| 564 | #define LOAD_VECTOR_UNALIGNEDh(_index) \ |
| 565 | andi. 0, 5, (1 PPC_LEFT_SHIFT(31-_index)) SEPARATOR \ |
| 566 | beq Ldone ## _index SEPARATOR \ |
| 567 | LOAD_VECTOR_RESTORE(_index) SEPARATOR \ |
| 568 | Ldone ## _index: |
| 569 | |
| 570 | #else |
| 571 | |
| 572 | #define LOAD_VECTOR_UNALIGNEDl(_index) LOAD_VECTOR_RESTORE(_index) |
| 573 | #define LOAD_VECTOR_UNALIGNEDh(_index) LOAD_VECTOR_RESTORE(_index) |
| 574 | |
| 575 | #endif // !defined(_AIX) |
| 576 | |
| 577 | subi 4, 1, 16 |
| 578 | rlwinm 4, 4, 0, 0, 27 // mask low 4-bits |
| 579 | // r4 is now a 16-byte aligned pointer into the red zone |
| 580 | // the _vectorRegisters may not be 16-byte aligned so copy via red zone temp buffer |
| 581 | |
| 582 | LOAD_VECTOR_UNALIGNEDl(0) |
| 583 | LOAD_VECTOR_UNALIGNEDl(1) |
| 584 | LOAD_VECTOR_UNALIGNEDl(2) |
| 585 | LOAD_VECTOR_UNALIGNEDl(3) |
| 586 | LOAD_VECTOR_UNALIGNEDl(4) |
| 587 | LOAD_VECTOR_UNALIGNEDl(5) |
| 588 | LOAD_VECTOR_UNALIGNEDl(6) |
| 589 | LOAD_VECTOR_UNALIGNEDl(7) |
| 590 | LOAD_VECTOR_UNALIGNEDl(8) |
| 591 | LOAD_VECTOR_UNALIGNEDl(9) |
| 592 | LOAD_VECTOR_UNALIGNEDl(10) |
| 593 | LOAD_VECTOR_UNALIGNEDl(11) |
| 594 | LOAD_VECTOR_UNALIGNEDl(12) |
| 595 | LOAD_VECTOR_UNALIGNEDl(13) |
| 596 | LOAD_VECTOR_UNALIGNEDl(14) |
| 597 | LOAD_VECTOR_UNALIGNEDl(15) |
| 598 | LOAD_VECTOR_UNALIGNEDh(16) |
| 599 | LOAD_VECTOR_UNALIGNEDh(17) |
| 600 | LOAD_VECTOR_UNALIGNEDh(18) |
| 601 | LOAD_VECTOR_UNALIGNEDh(19) |
| 602 | LOAD_VECTOR_UNALIGNEDh(20) |
| 603 | LOAD_VECTOR_UNALIGNEDh(21) |
| 604 | LOAD_VECTOR_UNALIGNEDh(22) |
| 605 | LOAD_VECTOR_UNALIGNEDh(23) |
| 606 | LOAD_VECTOR_UNALIGNEDh(24) |
| 607 | LOAD_VECTOR_UNALIGNEDh(25) |
| 608 | LOAD_VECTOR_UNALIGNEDh(26) |
| 609 | LOAD_VECTOR_UNALIGNEDh(27) |
| 610 | LOAD_VECTOR_UNALIGNEDh(28) |
| 611 | LOAD_VECTOR_UNALIGNEDh(29) |
| 612 | LOAD_VECTOR_UNALIGNEDh(30) |
| 613 | LOAD_VECTOR_UNALIGNEDh(31) |
| 614 | #endif |
| 615 | |
| 616 | Lnovec: |
| 617 | lwz 0, 136(3) // __cr |
| 618 | mtcr 0 |
| 619 | lwz 0, 148(3) // __ctr |
| 620 | mtctr 0 |
| 621 | lwz 0, 0(3) // __ssr0 |
| 622 | mtctr 0 |
| 623 | lwz 0, 8(3) // do r0 now |
| 624 | lwz 5, 28(3) // do r5 now |
| 625 | lwz 4, 24(3) // do r4 now |
| 626 | lwz 1, 12(3) // do sp now |
| 627 | lwz 3, 20(3) // do r3 last |
| 628 | bctr |
| 629 | |
| 630 | #elif defined(__aarch64__) |
| 631 | |
| 632 | #if defined(__ARM_FEATURE_GCS_DEFAULT) |
| 633 | .arch_extension gcs |
| 634 | #endif |
| 635 | |
| 636 | // |
| 637 | // extern "C" void __libunwind_Registers_arm64_jumpto(Registers_arm64 *); |
| 638 | // |
| 639 | // On entry: |
| 640 | // thread_state pointer is in x0 |
| 641 | // |
| 642 | .p2align 2 |
| 643 | DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_arm64_jumpto) |
| 644 | // skip restore of x0,x1 for now |
| 645 | ldp x2, x3, [x0, #0x010] |
| 646 | ldp x4, x5, [x0, #0x020] |
| 647 | ldp x6, x7, [x0, #0x030] |
| 648 | ldp x8, x9, [x0, #0x040] |
| 649 | ldp x10,x11, [x0, #0x050] |
| 650 | ldp x12,x13, [x0, #0x060] |
| 651 | ldp x14,x15, [x0, #0x070] |
| 652 | // x16 and x17 were clobbered by the call into the unwinder, so no point in |
| 653 | // restoring them. |
| 654 | ldp x18,x19, [x0, #0x090] |
| 655 | ldp x20,x21, [x0, #0x0A0] |
| 656 | ldp x22,x23, [x0, #0x0B0] |
| 657 | ldp x24,x25, [x0, #0x0C0] |
| 658 | ldp x26,x27, [x0, #0x0D0] |
| 659 | ldp x28,x29, [x0, #0x0E0] |
| 660 | ldr x30, [x0, #0x100] // restore pc into lr |
| 661 | #if defined(__ARM_FP) && __ARM_FP != 0 |
| 662 | ldp d0, d1, [x0, #0x110] |
| 663 | ldp d2, d3, [x0, #0x120] |
| 664 | ldp d4, d5, [x0, #0x130] |
| 665 | ldp d6, d7, [x0, #0x140] |
| 666 | ldp d8, d9, [x0, #0x150] |
| 667 | ldp d10,d11, [x0, #0x160] |
| 668 | ldp d12,d13, [x0, #0x170] |
| 669 | ldp d14,d15, [x0, #0x180] |
| 670 | ldp d16,d17, [x0, #0x190] |
| 671 | ldp d18,d19, [x0, #0x1A0] |
| 672 | ldp d20,d21, [x0, #0x1B0] |
| 673 | ldp d22,d23, [x0, #0x1C0] |
| 674 | ldp d24,d25, [x0, #0x1D0] |
| 675 | ldp d26,d27, [x0, #0x1E0] |
| 676 | ldp d28,d29, [x0, #0x1F0] |
| 677 | ldr d30, [x0, #0x200] |
| 678 | ldr d31, [x0, #0x208] |
| 679 | #endif |
| 680 | // Finally, restore sp. This must be done after the last read from the |
| 681 | // context struct, because it is allocated on the stack, and an exception |
| 682 | // could clobber the de-allocated portion of the stack after sp has been |
| 683 | // restored. |
| 684 | ldr x16, [x0, #0x0F8] |
| 685 | ldp x0, x1, [x0, #0x000] // restore x0,x1 |
| 686 | mov sp,x16 // restore sp |
| 687 | #if defined(__ARM_FEATURE_GCS_DEFAULT) |
| 688 | // If GCS is enabled we need to push the address we're returning to onto the |
| 689 | // GCS stack. We can't just return using br, as there won't be a BTI landing |
| 690 | // pad instruction at the destination. |
| 691 | mov x16, #1 |
| 692 | chkfeat x16 |
| 693 | cbnz x16, Lnogcs |
| 694 | gcspushm x30 |
| 695 | Lnogcs: |
| 696 | #endif |
| 697 | ret x30 // jump to pc |
| 698 | |
| 699 | #elif defined(__arm__) && !defined(__APPLE__) |
| 700 | |
| 701 | #if !defined(__ARM_ARCH_ISA_ARM) |
| 702 | #if (__ARM_ARCH_ISA_THUMB == 2) |
| 703 | .syntax unified |
| 704 | #endif |
| 705 | .thumb |
| 706 | #endif |
| 707 | |
| 708 | @ |
| 709 | @ void libunwind::Registers_arm::restoreCoreAndJumpTo() |
| 710 | @ |
| 711 | @ On entry: |
| 712 | @ thread_state pointer is in r0 |
| 713 | @ |
| 714 | .p2align 2 |
| 715 | DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm20restoreCoreAndJumpToEv) |
| 716 | #if !defined(__ARM_ARCH_ISA_ARM) && __ARM_ARCH_ISA_THUMB == 1 |
| 717 | @ r8-r11: ldm into r1-r4, then mov to r8-r11 |
| 718 | adds r0, #0x20 |
| 719 | ldm r0!, {r1-r4} |
| 720 | subs r0, #0x30 |
| 721 | mov r8, r1 |
| 722 | mov r9, r2 |
| 723 | mov r10, r3 |
| 724 | mov r11, r4 |
| 725 | @ r12 does not need loading, it it the intra-procedure-call scratch register |
| 726 | ldr r2, [r0, #0x34] |
| 727 | ldr r3, [r0, #0x3c] |
| 728 | mov sp, r2 |
| 729 | mov lr, r3 @ restore pc into lr |
| 730 | ldm r0, {r0-r7} |
| 731 | #else |
| 732 | @ Use lr as base so that r0 can be restored. |
| 733 | mov lr, r0 |
| 734 | @ 32bit thumb-2 restrictions for ldm: |
| 735 | @ . the sp (r13) cannot be in the list |
| 736 | @ . the pc (r15) and lr (r14) cannot both be in the list in an LDM instruction |
| 737 | ldm lr, {r0-r12} |
| 738 | ldr sp, [lr, #52] |
| 739 | ldr lr, [lr, #60] @ restore pc into lr |
| 740 | #endif |
| 741 | #if defined(__ARM_FEATURE_BTI_DEFAULT) && !defined(__ARM_ARCH_ISA_ARM) |
| 742 | // 'bx' is not BTI setting when used with lr, therefore r12 is used instead |
| 743 | mov r12, lr |
| 744 | JMP(r12) |
| 745 | #else |
| 746 | JMP(lr) |
| 747 | #endif |
| 748 | |
| 749 | @ |
| 750 | @ static void libunwind::Registers_arm::restoreVFPWithFLDMD(unw_fpreg_t* values) |
| 751 | @ |
| 752 | @ On entry: |
| 753 | @ values pointer is in r0 |
| 754 | @ |
| 755 | .p2align 2 |
| 756 | #if defined(__ELF__) |
| 757 | .fpu vfpv3-d16 |
| 758 | #endif |
| 759 | DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMDEPv) |
| 760 | @ VFP and iwMMX instructions are only available when compiling with the flags |
| 761 | @ that enable them. We do not want to do that in the library (because we do not |
| 762 | @ want the compiler to generate instructions that access those) but this is |
| 763 | @ only accessed if the personality routine needs these registers. Use of |
| 764 | @ these registers implies they are, actually, available on the target, so |
| 765 | @ it's ok to execute. |
| 766 | @ So, generate the instruction using the corresponding coprocessor mnemonic. |
| 767 | vldmia r0, {d0-d15} |
| 768 | JMP(lr) |
| 769 | |
| 770 | @ |
| 771 | @ static void libunwind::Registers_arm::restoreVFPWithFLDMX(unw_fpreg_t* values) |
| 772 | @ |
| 773 | @ On entry: |
| 774 | @ values pointer is in r0 |
| 775 | @ |
| 776 | .p2align 2 |
| 777 | #if defined(__ELF__) |
| 778 | .fpu vfpv3-d16 |
| 779 | #endif |
| 780 | DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMXEPv) |
| 781 | vldmia r0, {d0-d15} @ fldmiax is deprecated in ARMv7+ and now behaves like vldmia |
| 782 | JMP(lr) |
| 783 | |
| 784 | @ |
| 785 | @ static void libunwind::Registers_arm::restoreVFPv3(unw_fpreg_t* values) |
| 786 | @ |
| 787 | @ On entry: |
| 788 | @ values pointer is in r0 |
| 789 | @ |
| 790 | .p2align 2 |
| 791 | #if defined(__ELF__) |
| 792 | .fpu vfpv3 |
| 793 | #endif |
| 794 | DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm12restoreVFPv3EPv) |
| 795 | vldmia r0, {d16-d31} |
| 796 | JMP(lr) |
| 797 | |
| 798 | #if defined(__ARM_WMMX) |
| 799 | |
| 800 | @ |
| 801 | @ static void libunwind::Registers_arm::restoreiWMMX(unw_fpreg_t* values) |
| 802 | @ |
| 803 | @ On entry: |
| 804 | @ values pointer is in r0 |
| 805 | @ |
| 806 | .p2align 2 |
| 807 | #if defined(__ELF__) |
| 808 | .arch armv5te |
| 809 | #endif |
| 810 | DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm12restoreiWMMXEPv) |
| 811 | ldcl p1, cr0, [r0], #8 @ wldrd wR0, [r0], #8 |
| 812 | ldcl p1, cr1, [r0], #8 @ wldrd wR1, [r0], #8 |
| 813 | ldcl p1, cr2, [r0], #8 @ wldrd wR2, [r0], #8 |
| 814 | ldcl p1, cr3, [r0], #8 @ wldrd wR3, [r0], #8 |
| 815 | ldcl p1, cr4, [r0], #8 @ wldrd wR4, [r0], #8 |
| 816 | ldcl p1, cr5, [r0], #8 @ wldrd wR5, [r0], #8 |
| 817 | ldcl p1, cr6, [r0], #8 @ wldrd wR6, [r0], #8 |
| 818 | ldcl p1, cr7, [r0], #8 @ wldrd wR7, [r0], #8 |
| 819 | ldcl p1, cr8, [r0], #8 @ wldrd wR8, [r0], #8 |
| 820 | ldcl p1, cr9, [r0], #8 @ wldrd wR9, [r0], #8 |
| 821 | ldcl p1, cr10, [r0], #8 @ wldrd wR10, [r0], #8 |
| 822 | ldcl p1, cr11, [r0], #8 @ wldrd wR11, [r0], #8 |
| 823 | ldcl p1, cr12, [r0], #8 @ wldrd wR12, [r0], #8 |
| 824 | ldcl p1, cr13, [r0], #8 @ wldrd wR13, [r0], #8 |
| 825 | ldcl p1, cr14, [r0], #8 @ wldrd wR14, [r0], #8 |
| 826 | ldcl p1, cr15, [r0], #8 @ wldrd wR15, [r0], #8 |
| 827 | JMP(lr) |
| 828 | |
| 829 | @ |
| 830 | @ static void libunwind::Registers_arm::restoreiWMMXControl(unw_uint32_t* values) |
| 831 | @ |
| 832 | @ On entry: |
| 833 | @ values pointer is in r0 |
| 834 | @ |
| 835 | .p2align 2 |
| 836 | #if defined(__ELF__) |
| 837 | .arch armv5te |
| 838 | #endif |
| 839 | DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreiWMMXControlEPj) |
| 840 | ldc2 p1, cr8, [r0], #4 @ wldrw wCGR0, [r0], #4 |
| 841 | ldc2 p1, cr9, [r0], #4 @ wldrw wCGR1, [r0], #4 |
| 842 | ldc2 p1, cr10, [r0], #4 @ wldrw wCGR2, [r0], #4 |
| 843 | ldc2 p1, cr11, [r0], #4 @ wldrw wCGR3, [r0], #4 |
| 844 | JMP(lr) |
| 845 | |
| 846 | #endif |
| 847 | |
| 848 | #elif defined(__or1k__) |
| 849 | |
| 850 | DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind14Registers_or1k6jumptoEv) |
| 851 | # |
| 852 | # void libunwind::Registers_or1k::jumpto() |
| 853 | # |
| 854 | # On entry: |
| 855 | # thread_state pointer is in r3 |
| 856 | # |
| 857 | |
| 858 | # restore integral registers |
| 859 | l.lwz r0, 0(r3) |
| 860 | l.lwz r1, 4(r3) |
| 861 | l.lwz r2, 8(r3) |
| 862 | # skip r3 for now |
| 863 | l.lwz r4, 16(r3) |
| 864 | l.lwz r5, 20(r3) |
| 865 | l.lwz r6, 24(r3) |
| 866 | l.lwz r7, 28(r3) |
| 867 | l.lwz r8, 32(r3) |
| 868 | # skip r9 |
| 869 | l.lwz r10, 40(r3) |
| 870 | l.lwz r11, 44(r3) |
| 871 | l.lwz r12, 48(r3) |
| 872 | l.lwz r13, 52(r3) |
| 873 | l.lwz r14, 56(r3) |
| 874 | l.lwz r15, 60(r3) |
| 875 | l.lwz r16, 64(r3) |
| 876 | l.lwz r17, 68(r3) |
| 877 | l.lwz r18, 72(r3) |
| 878 | l.lwz r19, 76(r3) |
| 879 | l.lwz r20, 80(r3) |
| 880 | l.lwz r21, 84(r3) |
| 881 | l.lwz r22, 88(r3) |
| 882 | l.lwz r23, 92(r3) |
| 883 | l.lwz r24, 96(r3) |
| 884 | l.lwz r25,100(r3) |
| 885 | l.lwz r26,104(r3) |
| 886 | l.lwz r27,108(r3) |
| 887 | l.lwz r28,112(r3) |
| 888 | l.lwz r29,116(r3) |
| 889 | l.lwz r30,120(r3) |
| 890 | l.lwz r31,124(r3) |
| 891 | |
| 892 | # load new pc into ra |
| 893 | l.lwz r9, 128(r3) |
| 894 | |
| 895 | # at last, restore r3 |
| 896 | l.lwz r3, 12(r3) |
| 897 | |
| 898 | # jump to pc |
| 899 | l.jr r9 |
| 900 | l.nop |
| 901 | |
| 902 | #elif defined(__hexagon__) |
| 903 | # On entry: |
| 904 | # thread_state pointer is in r2 |
| 905 | DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind17Registers_hexagon6jumptoEv) |
| 906 | # |
| 907 | # void libunwind::Registers_hexagon::jumpto() |
| 908 | # |
| 909 | r8 = memw(r0+#32) |
| 910 | r9 = memw(r0+#36) |
| 911 | r10 = memw(r0+#40) |
| 912 | r11 = memw(r0+#44) |
| 913 | |
| 914 | r12 = memw(r0+#48) |
| 915 | r13 = memw(r0+#52) |
| 916 | r14 = memw(r0+#56) |
| 917 | r15 = memw(r0+#60) |
| 918 | |
| 919 | r16 = memw(r0+#64) |
| 920 | r17 = memw(r0+#68) |
| 921 | r18 = memw(r0+#72) |
| 922 | r19 = memw(r0+#76) |
| 923 | |
| 924 | r20 = memw(r0+#80) |
| 925 | r21 = memw(r0+#84) |
| 926 | r22 = memw(r0+#88) |
| 927 | r23 = memw(r0+#92) |
| 928 | |
| 929 | r24 = memw(r0+#96) |
| 930 | r25 = memw(r0+#100) |
| 931 | r26 = memw(r0+#104) |
| 932 | r27 = memw(r0+#108) |
| 933 | |
| 934 | r28 = memw(r0+#112) |
| 935 | r29 = memw(r0+#116) |
| 936 | r30 = memw(r0+#120) |
| 937 | r31 = memw(r0+#132) |
| 938 | |
| 939 | r1 = memw(r0+#128) |
| 940 | c4 = r1 // Predicate register |
| 941 | r1 = memw(r0+#4) |
| 942 | r0 = memw(r0) |
| 943 | jumpr r31 |
| 944 | #elif defined(__mips__) && defined(_ABIO32) && _MIPS_SIM == _ABIO32 |
| 945 | |
| 946 | // |
| 947 | // void libunwind::Registers_mips_o32::jumpto() |
| 948 | // |
| 949 | // On entry: |
| 950 | // thread state pointer is in a0 ($4) |
| 951 | // |
| 952 | DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind18Registers_mips_o326jumptoEv) |
| 953 | .set push |
| 954 | .set noat |
| 955 | .set noreorder |
| 956 | .set nomacro |
| 957 | #ifdef __mips_hard_float |
| 958 | #if __mips_fpr != 64 |
| 959 | ldc1 $f0, (4 * 36 + 8 * 0)($4) |
| 960 | ldc1 $f2, (4 * 36 + 8 * 2)($4) |
| 961 | ldc1 $f4, (4 * 36 + 8 * 4)($4) |
| 962 | ldc1 $f6, (4 * 36 + 8 * 6)($4) |
| 963 | ldc1 $f8, (4 * 36 + 8 * 8)($4) |
| 964 | ldc1 $f10, (4 * 36 + 8 * 10)($4) |
| 965 | ldc1 $f12, (4 * 36 + 8 * 12)($4) |
| 966 | ldc1 $f14, (4 * 36 + 8 * 14)($4) |
| 967 | ldc1 $f16, (4 * 36 + 8 * 16)($4) |
| 968 | ldc1 $f18, (4 * 36 + 8 * 18)($4) |
| 969 | ldc1 $f20, (4 * 36 + 8 * 20)($4) |
| 970 | ldc1 $f22, (4 * 36 + 8 * 22)($4) |
| 971 | ldc1 $f24, (4 * 36 + 8 * 24)($4) |
| 972 | ldc1 $f26, (4 * 36 + 8 * 26)($4) |
| 973 | ldc1 $f28, (4 * 36 + 8 * 28)($4) |
| 974 | ldc1 $f30, (4 * 36 + 8 * 30)($4) |
| 975 | #else |
| 976 | ldc1 $f0, (4 * 36 + 8 * 0)($4) |
| 977 | ldc1 $f1, (4 * 36 + 8 * 1)($4) |
| 978 | ldc1 $f2, (4 * 36 + 8 * 2)($4) |
| 979 | ldc1 $f3, (4 * 36 + 8 * 3)($4) |
| 980 | ldc1 $f4, (4 * 36 + 8 * 4)($4) |
| 981 | ldc1 $f5, (4 * 36 + 8 * 5)($4) |
| 982 | ldc1 $f6, (4 * 36 + 8 * 6)($4) |
| 983 | ldc1 $f7, (4 * 36 + 8 * 7)($4) |
| 984 | ldc1 $f8, (4 * 36 + 8 * 8)($4) |
| 985 | ldc1 $f9, (4 * 36 + 8 * 9)($4) |
| 986 | ldc1 $f10, (4 * 36 + 8 * 10)($4) |
| 987 | ldc1 $f11, (4 * 36 + 8 * 11)($4) |
| 988 | ldc1 $f12, (4 * 36 + 8 * 12)($4) |
| 989 | ldc1 $f13, (4 * 36 + 8 * 13)($4) |
| 990 | ldc1 $f14, (4 * 36 + 8 * 14)($4) |
| 991 | ldc1 $f15, (4 * 36 + 8 * 15)($4) |
| 992 | ldc1 $f16, (4 * 36 + 8 * 16)($4) |
| 993 | ldc1 $f17, (4 * 36 + 8 * 17)($4) |
| 994 | ldc1 $f18, (4 * 36 + 8 * 18)($4) |
| 995 | ldc1 $f19, (4 * 36 + 8 * 19)($4) |
| 996 | ldc1 $f20, (4 * 36 + 8 * 20)($4) |
| 997 | ldc1 $f21, (4 * 36 + 8 * 21)($4) |
| 998 | ldc1 $f22, (4 * 36 + 8 * 22)($4) |
| 999 | ldc1 $f23, (4 * 36 + 8 * 23)($4) |
| 1000 | ldc1 $f24, (4 * 36 + 8 * 24)($4) |
| 1001 | ldc1 $f25, (4 * 36 + 8 * 25)($4) |
| 1002 | ldc1 $f26, (4 * 36 + 8 * 26)($4) |
| 1003 | ldc1 $f27, (4 * 36 + 8 * 27)($4) |
| 1004 | ldc1 $f28, (4 * 36 + 8 * 28)($4) |
| 1005 | ldc1 $f29, (4 * 36 + 8 * 29)($4) |
| 1006 | ldc1 $f30, (4 * 36 + 8 * 30)($4) |
| 1007 | ldc1 $f31, (4 * 36 + 8 * 31)($4) |
| 1008 | #endif |
| 1009 | #endif |
| 1010 | #if __mips_isa_rev < 6 |
| 1011 | // restore hi and lo |
| 1012 | lw $8, (4 * 33)($4) |
| 1013 | mthi $8 |
| 1014 | lw $8, (4 * 34)($4) |
| 1015 | mtlo $8 |
| 1016 | #endif |
| 1017 | // r0 is zero |
| 1018 | lw $1, (4 * 1)($4) |
| 1019 | lw $2, (4 * 2)($4) |
| 1020 | lw $3, (4 * 3)($4) |
| 1021 | // skip a0 for now |
| 1022 | lw $5, (4 * 5)($4) |
| 1023 | lw $6, (4 * 6)($4) |
| 1024 | lw $7, (4 * 7)($4) |
| 1025 | lw $8, (4 * 8)($4) |
| 1026 | lw $9, (4 * 9)($4) |
| 1027 | lw $10, (4 * 10)($4) |
| 1028 | lw $11, (4 * 11)($4) |
| 1029 | lw $12, (4 * 12)($4) |
| 1030 | lw $13, (4 * 13)($4) |
| 1031 | lw $14, (4 * 14)($4) |
| 1032 | lw $15, (4 * 15)($4) |
| 1033 | lw $16, (4 * 16)($4) |
| 1034 | lw $17, (4 * 17)($4) |
| 1035 | lw $18, (4 * 18)($4) |
| 1036 | lw $19, (4 * 19)($4) |
| 1037 | lw $20, (4 * 20)($4) |
| 1038 | lw $21, (4 * 21)($4) |
| 1039 | lw $22, (4 * 22)($4) |
| 1040 | lw $23, (4 * 23)($4) |
| 1041 | lw $24, (4 * 24)($4) |
| 1042 | lw $25, (4 * 25)($4) |
| 1043 | lw $26, (4 * 26)($4) |
| 1044 | lw $27, (4 * 27)($4) |
| 1045 | lw $28, (4 * 28)($4) |
| 1046 | lw $29, (4 * 29)($4) |
| 1047 | lw $30, (4 * 30)($4) |
| 1048 | // load new pc into ra |
| 1049 | lw $31, (4 * 32)($4) |
| 1050 | // jump to ra, load a0 in the delay slot |
| 1051 | jr $31 |
| 1052 | lw $4, (4 * 4)($4) |
| 1053 | .set pop |
| 1054 | |
| 1055 | #elif defined(__mips64) |
| 1056 | |
| 1057 | // |
| 1058 | // void libunwind::Registers_mips_newabi::jumpto() |
| 1059 | // |
| 1060 | // On entry: |
| 1061 | // thread state pointer is in a0 ($4) |
| 1062 | // |
| 1063 | DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind21Registers_mips_newabi6jumptoEv) |
| 1064 | .set push |
| 1065 | .set noat |
| 1066 | .set noreorder |
| 1067 | .set nomacro |
| 1068 | #ifdef __mips_hard_float |
| 1069 | .irp i,FROM_0_TO_31 |
| 1070 | ldc1 $f\i, (280+8*\i)($4) |
| 1071 | .endr |
| 1072 | #endif |
| 1073 | #if __mips_isa_rev < 6 |
| 1074 | // restore hi and lo |
| 1075 | ld $8, (8 * 33)($4) |
| 1076 | mthi $8 |
| 1077 | ld $8, (8 * 34)($4) |
| 1078 | mtlo $8 |
| 1079 | #endif |
| 1080 | // r0 is zero |
| 1081 | ld $1, (8 * 1)($4) |
| 1082 | ld $2, (8 * 2)($4) |
| 1083 | ld $3, (8 * 3)($4) |
| 1084 | // skip a0 for now |
| 1085 | .irp i,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30 |
| 1086 | ld $\i, (8 * \i)($4) |
| 1087 | .endr |
| 1088 | // load new pc into ra |
| 1089 | ld $31, (8 * 32)($4) |
| 1090 | // jump to ra, load a0 in the delay slot |
| 1091 | jr $31 |
| 1092 | ld $4, (8 * 4)($4) |
| 1093 | .set pop |
| 1094 | |
| 1095 | #elif defined(__sparc__) && defined(__arch64__) |
| 1096 | |
| 1097 | DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind17Registers_sparc646jumptoEv) |
| 1098 | // |
| 1099 | // void libunwind::Registers_sparc64::jumpto() |
| 1100 | // |
| 1101 | // On entry: |
| 1102 | // thread_state pointer is in %o0 |
| 1103 | // |
| 1104 | .register %g2, #scratch |
| 1105 | .register %g3, #scratch |
| 1106 | .register %g6, #scratch |
| 1107 | .register %g7, #scratch |
| 1108 | flushw |
| 1109 | ldx [%o0 + 0x08], %g1 |
| 1110 | ldx [%o0 + 0x10], %g2 |
| 1111 | ldx [%o0 + 0x18], %g3 |
| 1112 | ldx [%o0 + 0x20], %g4 |
| 1113 | ldx [%o0 + 0x28], %g5 |
| 1114 | ldx [%o0 + 0x30], %g6 |
| 1115 | ldx [%o0 + 0x38], %g7 |
| 1116 | ldx [%o0 + 0x48], %o1 |
| 1117 | ldx [%o0 + 0x50], %o2 |
| 1118 | ldx [%o0 + 0x58], %o3 |
| 1119 | ldx [%o0 + 0x60], %o4 |
| 1120 | ldx [%o0 + 0x68], %o5 |
| 1121 | ldx [%o0 + 0x70], %o6 |
| 1122 | ldx [%o0 + 0x78], %o7 |
| 1123 | ldx [%o0 + 0x80], %l0 |
| 1124 | ldx [%o0 + 0x88], %l1 |
| 1125 | ldx [%o0 + 0x90], %l2 |
| 1126 | ldx [%o0 + 0x98], %l3 |
| 1127 | ldx [%o0 + 0xa0], %l4 |
| 1128 | ldx [%o0 + 0xa8], %l5 |
| 1129 | ldx [%o0 + 0xb0], %l6 |
| 1130 | ldx [%o0 + 0xb8], %l7 |
| 1131 | ldx [%o0 + 0xc0], %i0 |
| 1132 | ldx [%o0 + 0xc8], %i1 |
| 1133 | ldx [%o0 + 0xd0], %i2 |
| 1134 | ldx [%o0 + 0xd8], %i3 |
| 1135 | ldx [%o0 + 0xe0], %i4 |
| 1136 | ldx [%o0 + 0xe8], %i5 |
| 1137 | ldx [%o0 + 0xf0], %i6 |
| 1138 | ldx [%o0 + 0xf8], %i7 |
| 1139 | jmp %o7 |
| 1140 | ldx [%o0 + 0x40], %o0 |
| 1141 | |
| 1142 | #elif defined(__sparc__) |
| 1143 | |
| 1144 | // |
| 1145 | // void libunwind::Registers_sparc_o32::jumpto() |
| 1146 | // |
| 1147 | // On entry: |
| 1148 | // thread_state pointer is in o0 |
| 1149 | // |
| 1150 | DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_sparc6jumptoEv) |
| 1151 | ta 3 |
| 1152 | ldd [%o0 + 64], %l0 |
| 1153 | ldd [%o0 + 72], %l2 |
| 1154 | ldd [%o0 + 80], %l4 |
| 1155 | ldd [%o0 + 88], %l6 |
| 1156 | ldd [%o0 + 96], %i0 |
| 1157 | ldd [%o0 + 104], %i2 |
| 1158 | ldd [%o0 + 112], %i4 |
| 1159 | ldd [%o0 + 120], %i6 |
| 1160 | ld [%o0 + 60], %o7 |
| 1161 | jmp %o7 |
| 1162 | nop |
| 1163 | |
| 1164 | #elif defined(__riscv) |
| 1165 | |
| 1166 | // |
| 1167 | // void libunwind::Registers_riscv::jumpto() |
| 1168 | // |
| 1169 | // On entry: |
| 1170 | // thread_state pointer is in a0 |
| 1171 | // |
| 1172 | .p2align 2 |
| 1173 | DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_riscv6jumptoEv) |
| 1174 | # if defined(__riscv_flen) |
| 1175 | .irp i,FROM_0_TO_31 |
| 1176 | FLOAD f\i, (RISCV_FOFFSET + RISCV_FSIZE * \i)(a0) |
| 1177 | .endr |
| 1178 | # endif |
| 1179 | |
| 1180 | // x0 is zero |
| 1181 | ILOAD x1, (RISCV_ISIZE * 0)(a0) // restore pc into ra |
| 1182 | .irp i,2,3,4,5,6,7,8,9 |
| 1183 | ILOAD x\i, (RISCV_ISIZE * \i)(a0) |
| 1184 | .endr |
| 1185 | // skip a0 for now |
| 1186 | #if defined(__riscv_32e) |
| 1187 | .irp i,11,12,13,14,15 |
| 1188 | #else |
| 1189 | .irp i,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 |
| 1190 | #endif |
| 1191 | ILOAD x\i, (RISCV_ISIZE * \i)(a0) |
| 1192 | .endr |
| 1193 | ILOAD x10, (RISCV_ISIZE * 10)(a0) // restore a0 |
| 1194 | |
| 1195 | ret // jump to ra |
| 1196 | |
| 1197 | #elif defined(__s390x__) |
| 1198 | |
| 1199 | DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_s390x6jumptoEv) |
| 1200 | // |
| 1201 | // void libunwind::Registers_s390x::jumpto() |
| 1202 | // |
| 1203 | // On entry: |
| 1204 | // thread_state pointer is in r2 |
| 1205 | // |
| 1206 | |
| 1207 | // Skip PSWM, but load PSWA into r1 |
| 1208 | lg %r1, 8(%r2) |
| 1209 | |
| 1210 | // Restore FPRs |
| 1211 | .irp i,FROM_0_TO_15 |
| 1212 | ld %f\i, (144+8*\i)(%r2) |
| 1213 | .endr |
| 1214 | |
| 1215 | // Restore GPRs - skipping %r0 and %r1 |
| 1216 | lmg %r2, %r15, 32(%r2) |
| 1217 | |
| 1218 | // Return to PSWA (was loaded into %r1 above) |
| 1219 | br %r1 |
| 1220 | |
| 1221 | #elif defined(__loongarch__) && __loongarch_grlen == 64 |
| 1222 | |
| 1223 | // |
| 1224 | // void libunwind::Registers_loongarch::jumpto() |
| 1225 | // |
| 1226 | // On entry: |
| 1227 | // thread_state pointer is in $a0($r4) |
| 1228 | // |
| 1229 | .p2align 2 |
| 1230 | DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind19Registers_loongarch6jumptoEv) |
| 1231 | # if __loongarch_frlen == 64 |
| 1232 | .irp i,FROM_0_TO_31 |
| 1233 | fld.d $f\i, $a0, (8 * 33 + 8 * \i) |
| 1234 | .endr |
| 1235 | # endif |
| 1236 | |
| 1237 | // $r0 is zero |
| 1238 | .irp i,1,2,3 |
| 1239 | ld.d $r\i, $a0, (8 * \i) |
| 1240 | .endr |
| 1241 | // skip $a0 for now |
| 1242 | .irp i,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 |
| 1243 | ld.d $r\i, $a0, (8 * \i) |
| 1244 | .endr |
| 1245 | |
| 1246 | ld.d $ra, $a0, (8 * 32) // load new pc into $ra |
| 1247 | ld.d $a0, $a0, (8 * 4) // restore $a0 last |
| 1248 | |
| 1249 | jr $ra |
| 1250 | |
| 1251 | #endif |
| 1252 | |
| 1253 | #endif /* !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__wasm__) */ |
| 1254 | |
| 1255 | NO_EXEC_STACK_DIRECTIVE |
| 1256 | |
| 1257 | |