1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
2 | /* |
3 | * Linux/PA-RISC Project (http://www.parisc-linux.org/) |
4 | * |
5 | * kernel entry points (interruptions, system call wrappers) |
6 | * Copyright (C) 1999,2000 Philipp Rumpf |
7 | * Copyright (C) 1999 SuSE GmbH Nuernberg |
8 | * Copyright (C) 2000 Hewlett-Packard (John Marvin) |
9 | * Copyright (C) 1999 Hewlett-Packard (Frank Rowand) |
10 | */ |
11 | |
12 | #include <asm/asm-offsets.h> |
13 | |
14 | /* we have the following possibilities to act on an interruption: |
15 | * - handle in assembly and use shadowed registers only |
16 | * - save registers to kernel stack and handle in assembly or C */ |
17 | |
18 | |
19 | #include <asm/psw.h> |
20 | #include <asm/cache.h> /* for L1_CACHE_SHIFT */ |
21 | #include <asm/assembly.h> /* for LDREG/STREG defines */ |
22 | #include <asm/signal.h> |
23 | #include <asm/unistd.h> |
24 | #include <asm/ldcw.h> |
25 | #include <asm/traps.h> |
26 | #include <asm/thread_info.h> |
27 | #include <asm/alternative.h> |
28 | #include <asm/spinlock_types.h> |
29 | |
30 | #include <linux/linkage.h> |
31 | #include <linux/pgtable.h> |
32 | |
33 | #ifdef CONFIG_64BIT |
34 | .level 2.0w |
35 | #else |
36 | .level 2.0 |
37 | #endif |
38 | |
39 | /* |
40 | * We need seven instructions after a TLB insert for it to take effect. |
41 | * The PA8800/PA8900 processors are an exception and need 12 instructions. |
42 | * The RFI changes both IAOQ_Back and IAOQ_Front, so it counts as one. |
43 | */ |
44 | #ifdef CONFIG_64BIT |
45 | #define NUM_PIPELINE_INSNS 12 |
46 | #else |
47 | #define NUM_PIPELINE_INSNS 7 |
48 | #endif |
49 | |
50 | /* Insert num nops */ |
51 | .macro insert_nops num |
52 | .rept \num |
53 | nop |
54 | .endr |
55 | .endm |
56 | |
57 | /* Get aligned page_table_lock address for this mm from cr28/tr4 */ |
58 | .macro get_ptl reg |
59 | mfctl %cr28,\reg |
60 | .endm |
61 | |
62 | /* space_to_prot macro creates a prot id from a space id */ |
63 | |
64 | #if (SPACEID_SHIFT) == 0 |
65 | .macro space_to_prot spc prot |
66 | depd,z \spc,62,31,\prot |
67 | .endm |
68 | #else |
69 | .macro space_to_prot spc prot |
70 | extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot |
71 | .endm |
72 | #endif |
73 | /* |
74 | * The "get_stack" macros are responsible for determining the |
75 | * kernel stack value. |
76 | * |
77 | * If sr7 == 0 |
78 | * Already using a kernel stack, so call the |
79 | * get_stack_use_r30 macro to push a pt_regs structure |
80 | * on the stack, and store registers there. |
81 | * else |
82 | * Need to set up a kernel stack, so call the |
83 | * get_stack_use_cr30 macro to set up a pointer |
84 | * to the pt_regs structure contained within the |
85 | * task pointer pointed to by cr30. Load the stack |
86 | * pointer from the task structure. |
87 | * |
88 | * Note that we use shadowed registers for temps until |
89 | * we can save %r26 and %r29. %r26 is used to preserve |
90 | * %r8 (a shadowed register) which temporarily contained |
91 | * either the fault type ("code") or the eirr. We need |
92 | * to use a non-shadowed register to carry the value over |
93 | * the rfir in virt_map. We use %r26 since this value winds |
94 | * up being passed as the argument to either do_cpu_irq_mask |
95 | * or handle_interruption. %r29 is used to hold a pointer |
96 | * the register save area, and once again, it needs to |
97 | * be a non-shadowed register so that it survives the rfir. |
98 | */ |
99 | |
100 | .macro get_stack_use_cr30 |
101 | |
102 | /* we save the registers in the task struct */ |
103 | |
104 | copy %r30, %r17 |
105 | mfctl %cr30, %r1 |
106 | tophys %r1,%r9 /* task_struct */ |
107 | LDREG TASK_STACK(%r9),%r30 |
108 | ldo PT_SZ_ALGN(%r30),%r30 |
109 | mtsp %r0,%sr7 /* clear sr7 after kernel stack was set! */ |
110 | mtsp %r16,%sr3 |
111 | ldo TASK_REGS(%r9),%r9 |
112 | STREG %r17,PT_GR30(%r9) |
113 | STREG %r29,PT_GR29(%r9) |
114 | STREG %r26,PT_GR26(%r9) |
115 | STREG %r16,PT_SR7(%r9) |
116 | copy %r9,%r29 |
117 | .endm |
118 | |
119 | .macro get_stack_use_r30 |
120 | |
121 | /* we put a struct pt_regs on the stack and save the registers there */ |
122 | |
123 | tophys %r30,%r9 |
124 | copy %r30,%r1 |
125 | ldo PT_SZ_ALGN(%r30),%r30 |
126 | STREG %r1,PT_GR30(%r9) |
127 | STREG %r29,PT_GR29(%r9) |
128 | STREG %r26,PT_GR26(%r9) |
129 | STREG %r16,PT_SR7(%r9) |
130 | copy %r9,%r29 |
131 | .endm |
132 | |
133 | .macro rest_stack |
134 | LDREG PT_GR1(%r29), %r1 |
135 | LDREG PT_GR30(%r29),%r30 |
136 | LDREG PT_GR29(%r29),%r29 |
137 | .endm |
138 | |
139 | /* default interruption handler |
140 | * (calls traps.c:handle_interruption) */ |
141 | .macro def code |
142 | b intr_save |
143 | ldi \code, %r8 |
144 | .align 32 |
145 | .endm |
146 | |
147 | /* Interrupt interruption handler |
148 | * (calls irq.c:do_cpu_irq_mask) */ |
149 | .macro extint code |
150 | b intr_extint |
151 | mfsp %sr7,%r16 |
152 | .align 32 |
153 | .endm |
154 | |
155 | .import os_hpmc, code |
156 | |
157 | /* HPMC handler */ |
158 | .macro hpmc code |
159 | nop /* must be a NOP, will be patched later */ |
160 | load32 PA(os_hpmc), %r3 |
161 | bv,n 0(%r3) |
162 | nop |
163 | .word 0 /* checksum (will be patched) */ |
164 | .word 0 /* address of handler */ |
165 | .word 0 /* length of handler */ |
166 | .endm |
167 | |
168 | /* |
169 | * Performance Note: Instructions will be moved up into |
170 | * this part of the code later on, once we are sure |
171 | * that the tlb miss handlers are close to final form. |
172 | */ |
173 | |
174 | /* Register definitions for tlb miss handler macros */ |
175 | |
176 | va = r8 /* virtual address for which the trap occurred */ |
177 | spc = r24 /* space for which the trap occurred */ |
178 | |
179 | #ifndef CONFIG_64BIT |
180 | |
181 | /* |
182 | * itlb miss interruption handler (parisc 1.1 - 32 bit) |
183 | */ |
184 | |
185 | .macro itlb_11 code |
186 | |
187 | mfctl %pcsq, spc |
188 | b itlb_miss_11 |
189 | mfctl %pcoq, va |
190 | |
191 | .align 32 |
192 | .endm |
193 | #endif |
194 | |
195 | /* |
196 | * itlb miss interruption handler (parisc 2.0) |
197 | */ |
198 | |
199 | .macro itlb_20 code |
200 | mfctl %pcsq, spc |
201 | #ifdef CONFIG_64BIT |
202 | b itlb_miss_20w |
203 | #else |
204 | b itlb_miss_20 |
205 | #endif |
206 | mfctl %pcoq, va |
207 | |
208 | .align 32 |
209 | .endm |
210 | |
211 | #ifndef CONFIG_64BIT |
212 | /* |
213 | * naitlb miss interruption handler (parisc 1.1 - 32 bit) |
214 | */ |
215 | |
216 | .macro naitlb_11 code |
217 | |
218 | mfctl %isr,spc |
219 | b naitlb_miss_11 |
220 | mfctl %ior,va |
221 | |
222 | .align 32 |
223 | .endm |
224 | #endif |
225 | |
226 | /* |
227 | * naitlb miss interruption handler (parisc 2.0) |
228 | */ |
229 | |
230 | .macro naitlb_20 code |
231 | |
232 | mfctl %isr,spc |
233 | #ifdef CONFIG_64BIT |
234 | b naitlb_miss_20w |
235 | #else |
236 | b naitlb_miss_20 |
237 | #endif |
238 | mfctl %ior,va |
239 | |
240 | .align 32 |
241 | .endm |
242 | |
243 | #ifndef CONFIG_64BIT |
244 | /* |
245 | * dtlb miss interruption handler (parisc 1.1 - 32 bit) |
246 | */ |
247 | |
248 | .macro dtlb_11 code |
249 | |
250 | mfctl %isr, spc |
251 | b dtlb_miss_11 |
252 | mfctl %ior, va |
253 | |
254 | .align 32 |
255 | .endm |
256 | #endif |
257 | |
258 | /* |
259 | * dtlb miss interruption handler (parisc 2.0) |
260 | */ |
261 | |
262 | .macro dtlb_20 code |
263 | |
264 | mfctl %isr, spc |
265 | #ifdef CONFIG_64BIT |
266 | b dtlb_miss_20w |
267 | #else |
268 | b dtlb_miss_20 |
269 | #endif |
270 | mfctl %ior, va |
271 | |
272 | .align 32 |
273 | .endm |
274 | |
275 | #ifndef CONFIG_64BIT |
276 | /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */ |
277 | |
278 | .macro nadtlb_11 code |
279 | |
280 | mfctl %isr,spc |
281 | b nadtlb_miss_11 |
282 | mfctl %ior,va |
283 | |
284 | .align 32 |
285 | .endm |
286 | #endif |
287 | |
288 | /* nadtlb miss interruption handler (parisc 2.0) */ |
289 | |
290 | .macro nadtlb_20 code |
291 | |
292 | mfctl %isr,spc |
293 | #ifdef CONFIG_64BIT |
294 | b nadtlb_miss_20w |
295 | #else |
296 | b nadtlb_miss_20 |
297 | #endif |
298 | mfctl %ior,va |
299 | |
300 | .align 32 |
301 | .endm |
302 | |
303 | #ifndef CONFIG_64BIT |
304 | /* |
305 | * dirty bit trap interruption handler (parisc 1.1 - 32 bit) |
306 | */ |
307 | |
308 | .macro dbit_11 code |
309 | |
310 | mfctl %isr,spc |
311 | b dbit_trap_11 |
312 | mfctl %ior,va |
313 | |
314 | .align 32 |
315 | .endm |
316 | #endif |
317 | |
318 | /* |
319 | * dirty bit trap interruption handler (parisc 2.0) |
320 | */ |
321 | |
322 | .macro dbit_20 code |
323 | |
324 | mfctl %isr,spc |
325 | #ifdef CONFIG_64BIT |
326 | b dbit_trap_20w |
327 | #else |
328 | b dbit_trap_20 |
329 | #endif |
330 | mfctl %ior,va |
331 | |
332 | .align 32 |
333 | .endm |
334 | |
335 | /* In LP64, the space contains part of the upper 32 bits of the |
336 | * fault. We have to extract this and place it in the va, |
337 | * zeroing the corresponding bits in the space register */ |
338 | .macro space_adjust spc,va,tmp |
339 | #ifdef CONFIG_64BIT |
340 | extrd,u \spc,63,SPACEID_SHIFT,\tmp |
341 | depd %r0,63,SPACEID_SHIFT,\spc |
342 | depd \tmp,31,SPACEID_SHIFT,\va |
343 | #endif |
344 | .endm |
345 | |
346 | .import swapper_pg_dir,code |
347 | |
348 | /* Get the pgd. For faults on space zero (kernel space), this |
349 | * is simply swapper_pg_dir. For user space faults, the |
350 | * pgd is stored in %cr25 */ |
351 | .macro get_pgd spc,reg |
352 | ldil L%PA(swapper_pg_dir),\reg |
353 | ldo R%PA(swapper_pg_dir)(\reg),\reg |
354 | or,COND(=) %r0,\spc,%r0 |
355 | mfctl %cr25,\reg |
356 | .endm |
357 | |
358 | /* |
359 | space_check(spc,tmp,fault) |
360 | |
361 | spc - The space we saw the fault with. |
362 | tmp - The place to store the current space. |
363 | fault - Function to call on failure. |
364 | |
365 | Only allow faults on different spaces from the |
366 | currently active one if we're the kernel |
367 | |
368 | */ |
369 | .macro space_check spc,tmp,fault |
370 | mfsp %sr7,\tmp |
371 | /* check against %r0 which is same value as LINUX_GATEWAY_SPACE */ |
372 | or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page |
373 | * as kernel, so defeat the space |
374 | * check if it is */ |
375 | copy \spc,\tmp |
376 | or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */ |
377 | cmpb,COND(<>),n \tmp,\spc,\fault |
378 | .endm |
379 | |
380 | /* Look up a PTE in a 2-Level scheme (faulting at each |
381 | * level if the entry isn't present |
382 | * |
383 | * NOTE: we use ldw even for LP64, since the short pointers |
384 | * can address up to 1TB |
385 | */ |
386 | .macro L2_ptep pmd,pte,index,va,fault |
387 | #if CONFIG_PGTABLE_LEVELS == 3 |
388 | extru_safe \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index |
389 | #else |
390 | extru_safe \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index |
391 | #endif |
392 | dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */ |
393 | #if CONFIG_PGTABLE_LEVELS < 3 |
394 | copy %r0,\pte |
395 | #endif |
396 | ldw,s \index(\pmd),\pmd |
397 | bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault |
398 | dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */ |
399 | SHLREG \pmd,PxD_VALUE_SHIFT,\pmd |
400 | extru_safe \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index |
401 | dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */ |
402 | shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */ |
403 | .endm |
404 | |
405 | /* Look up PTE in a 3-Level scheme. */ |
406 | .macro L3_ptep pgd,pte,index,va,fault |
407 | #if CONFIG_PGTABLE_LEVELS == 3 |
408 | copy %r0,\pte |
409 | extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index |
410 | ldw,s \index(\pgd),\pgd |
411 | bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault |
412 | shld \pgd,PxD_VALUE_SHIFT,\pgd |
413 | #endif |
414 | L2_ptep \pgd,\pte,\index,\va,\fault |
415 | .endm |
416 | |
417 | /* Acquire page_table_lock and check page is present. */ |
418 | .macro ptl_lock spc,ptp,pte,tmp,tmp1,fault |
419 | #ifdef CONFIG_TLB_PTLOCK |
420 | 98: cmpib,COND(=),n 0,\spc,2f |
421 | get_ptl \tmp |
422 | 1: LDCW 0(\tmp),\tmp1 |
423 | cmpib,COND(=) 0,\tmp1,1b |
424 | nop |
425 | LDREG 0(\ptp),\pte |
426 | bb,<,n \pte,_PAGE_PRESENT_BIT,3f |
427 | b \fault |
428 | stw \tmp1,0(\tmp) |
429 | 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) |
430 | #endif |
431 | 2: LDREG 0(\ptp),\pte |
432 | bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault |
433 | 3: |
434 | .endm |
435 | |
436 | /* Release page_table_lock if for user space. We use an ordered |
437 | store to ensure all prior accesses are performed prior to |
438 | releasing the lock. Note stw may not be executed, so we |
439 | provide one extra nop when CONFIG_TLB_PTLOCK is defined. */ |
440 | .macro ptl_unlock spc,tmp,tmp2 |
441 | #ifdef CONFIG_TLB_PTLOCK |
442 | 98: get_ptl \tmp |
443 | ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, \tmp2 |
444 | or,COND(=) %r0,\spc,%r0 |
445 | stw,ma \tmp2,0(\tmp) |
446 | 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) |
447 | insert_nops NUM_PIPELINE_INSNS - 4 |
448 | #else |
449 | insert_nops NUM_PIPELINE_INSNS - 1 |
450 | #endif |
451 | .endm |
452 | |
453 | /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and |
454 | * don't needlessly dirty the cache line if it was already set */ |
455 | .macro update_accessed ptp,pte,tmp,tmp1 |
456 | ldi _PAGE_ACCESSED,\tmp1 |
457 | or \tmp1,\pte,\tmp |
458 | and,COND(<>) \tmp1,\pte,%r0 |
459 | STREG \tmp,0(\ptp) |
460 | .endm |
461 | |
462 | /* Set the dirty bit (and accessed bit). No need to be |
463 | * clever, this is only used from the dirty fault */ |
464 | .macro update_dirty ptp,pte,tmp |
465 | ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp |
466 | or \tmp,\pte,\pte |
467 | STREG \pte,0(\ptp) |
468 | .endm |
469 | |
470 | /* We have (depending on the page size): |
471 | * - 38 to 52-bit Physical Page Number |
472 | * - 12 to 26-bit page offset |
473 | */ |
474 | /* bitshift difference between a PFN (based on kernel's PAGE_SIZE) |
475 | * to a CPU TLB 4k PFN (4k => 12 bits to shift) */ |
476 | #define PAGE_ADD_SHIFT (PAGE_SHIFT-12) |
477 | #define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12) |
478 | #define PFN_START_BIT (63-ASM_PFN_PTE_SHIFT+(63-58)-PAGE_ADD_SHIFT) |
479 | |
480 | /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ |
481 | .macro convert_for_tlb_insert20 pte,tmp |
482 | #ifdef CONFIG_HUGETLB_PAGE |
483 | copy \pte,\tmp |
484 | extrd,u \tmp,PFN_START_BIT,PFN_START_BIT+1,\pte |
485 | |
486 | depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\ |
487 | (63-58)+PAGE_ADD_SHIFT,\pte |
488 | extrd,u,*= \tmp,_PAGE_HPAGE_BIT+32,1,%r0 |
489 | depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\ |
490 | (63-58)+PAGE_ADD_HUGE_SHIFT,\pte |
491 | #else /* Huge pages disabled */ |
492 | extrd,u \pte,PFN_START_BIT,PFN_START_BIT+1,\pte |
493 | depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\ |
494 | (63-58)+PAGE_ADD_SHIFT,\pte |
495 | #endif |
496 | .endm |
497 | |
498 | /* Convert the pte and prot to tlb insertion values. How |
499 | * this happens is quite subtle, read below */ |
500 | .macro make_insert_tlb spc,pte,prot,tmp |
501 | space_to_prot \spc \prot /* create prot id from space */ |
502 | /* The following is the real subtlety. This is depositing |
503 | * T <-> _PAGE_REFTRAP |
504 | * D <-> _PAGE_DIRTY |
505 | * B <-> _PAGE_DMB (memory break) |
506 | * |
507 | * Then incredible subtlety: The access rights are |
508 | * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE |
509 | * See 3-14 of the parisc 2.0 manual |
510 | * |
511 | * Finally, _PAGE_READ goes in the top bit of PL1 (so we |
512 | * trigger an access rights trap in user space if the user |
513 | * tries to read an unreadable page */ |
514 | #if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT |
515 | /* need to drop DMB bit, as it's used as SPECIAL flag */ |
516 | depi 0,_PAGE_SPECIAL_BIT,1,\pte |
517 | #endif |
518 | depd \pte,8,7,\prot |
519 | |
520 | /* PAGE_USER indicates the page can be read with user privileges, |
521 | * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1 |
522 | * contains _PAGE_READ) */ |
523 | extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0 |
524 | depdi 7,11,3,\prot |
525 | /* If we're a gateway page, drop PL2 back to zero for promotion |
526 | * to kernel privilege (so we can execute the page as kernel). |
527 | * Any privilege promotion page always denys read and write */ |
528 | extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0 |
529 | depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */ |
530 | |
531 | /* Enforce uncacheable pages. |
532 | * This should ONLY be use for MMIO on PA 2.0 machines. |
533 | * Memory/DMA is cache coherent on all PA2.0 machines we support |
534 | * (that means T-class is NOT supported) and the memory controllers |
535 | * on most of those machines only handles cache transactions. |
536 | */ |
537 | extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0 |
538 | depdi 1,12,1,\prot |
539 | |
540 | /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ |
541 | convert_for_tlb_insert20 \pte \tmp |
542 | .endm |
543 | |
544 | /* Identical macro to make_insert_tlb above, except it |
545 | * makes the tlb entry for the differently formatted pa11 |
546 | * insertion instructions */ |
547 | .macro make_insert_tlb_11 spc,pte,prot |
548 | #if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT |
549 | /* need to drop DMB bit, as it's used as SPECIAL flag */ |
550 | depi 0,_PAGE_SPECIAL_BIT,1,\pte |
551 | #endif |
552 | zdep \spc,30,15,\prot |
553 | dep \pte,8,7,\prot |
554 | extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0 |
555 | depi 1,12,1,\prot |
556 | extru,= \pte,_PAGE_USER_BIT,1,%r0 |
557 | depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */ |
558 | extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0 |
559 | depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */ |
560 | |
561 | /* Get rid of prot bits and convert to page addr for iitlba */ |
562 | |
563 | depi 0,31,ASM_PFN_PTE_SHIFT,\pte |
564 | SHRREG \pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte |
565 | .endm |
566 | |
567 | /* This is for ILP32 PA2.0 only. The TLB insertion needs |
568 | * to extend into I/O space if the address is 0xfXXXXXXX |
569 | * so we extend the f's into the top word of the pte in |
570 | * this case */ |
571 | .macro f_extend pte,tmp |
572 | extrd,s \pte,42,4,\tmp |
573 | addi,<> 1,\tmp,%r0 |
574 | extrd,s \pte,63,25,\pte |
575 | .endm |
576 | |
577 | /* The alias region is comprised of a pair of 4 MB regions |
578 | * aligned to 8 MB. It is used to clear/copy/flush user pages |
579 | * using kernel virtual addresses congruent with the user |
580 | * virtual address. |
581 | * |
582 | * To use the alias page, you set %r26 up with the to TLB |
583 | * entry (identifying the physical page) and %r23 up with |
584 | * the from tlb entry (or nothing if only a to entry---for |
585 | * clear_user_page_asm) */ |
586 | .macro do_alias spc,tmp,tmp1,va,pte,prot,fault,patype |
587 | cmpib,COND(<>),n 0,\spc,\fault |
588 | ldil L%(TMPALIAS_MAP_START),\tmp |
589 | copy \va,\tmp1 |
590 | depi_safe 0,31,TMPALIAS_SIZE_BITS+1,\tmp1 |
591 | cmpb,COND(<>),n \tmp,\tmp1,\fault |
592 | mfctl %cr19,\tmp /* iir */ |
593 | /* get the opcode (first six bits) into \tmp */ |
594 | extrw,u \tmp,5,6,\tmp |
595 | /* |
596 | * Only setting the T bit prevents data cache movein |
597 | * Setting access rights to zero prevents instruction cache movein |
598 | * |
599 | * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go |
600 | * to type field and _PAGE_READ goes to top bit of PL1 |
601 | */ |
602 | ldi (_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot |
603 | /* |
604 | * so if the opcode is one (i.e. this is a memory management |
605 | * instruction) nullify the next load so \prot is only T. |
606 | * Otherwise this is a normal data operation |
607 | */ |
608 | cmpiclr,= 0x01,\tmp,%r0 |
609 | ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot |
610 | .ifc \patype,20 |
611 | depd,z \prot,8,7,\prot |
612 | .else |
613 | .ifc \patype,11 |
614 | depw,z \prot,8,7,\prot |
615 | .else |
616 | .error "undefined PA type to do_alias" |
617 | .endif |
618 | .endif |
619 | /* |
620 | * OK, it is in the temp alias region, check whether "from" or "to". |
621 | * Check "subtle" note in pacache.S re: r23/r26. |
622 | */ |
623 | extrw,u,= \va,31-TMPALIAS_SIZE_BITS,1,%r0 |
624 | or,COND(tr) %r23,%r0,\pte |
625 | or %r26,%r0,\pte |
626 | |
627 | /* convert phys addr in \pte (from r23 or r26) to tlb insert format */ |
628 | SHRREG \pte,PAGE_SHIFT+PAGE_ADD_SHIFT-5, \pte |
629 | depi_safe _PAGE_SIZE_ENCODING_DEFAULT, 31,5, \pte |
630 | .endm |
631 | |
632 | |
633 | /* |
634 | * Fault_vectors are architecturally required to be aligned on a 2K |
635 | * boundary |
636 | */ |
637 | |
638 | .section .text.hot |
639 | .align 2048 |
640 | |
641 | ENTRY(fault_vector_20) |
642 | /* First vector is invalid (0) */ |
643 | .ascii "cows can fly" |
644 | .byte 0 |
645 | .align 32 |
646 | |
647 | hpmc 1 |
648 | def 2 |
649 | def 3 |
650 | extint 4 |
651 | def 5 |
652 | itlb_20 PARISC_ITLB_TRAP |
653 | def 7 |
654 | def 8 |
655 | def 9 |
656 | def 10 |
657 | def 11 |
658 | def 12 |
659 | def 13 |
660 | def 14 |
661 | dtlb_20 15 |
662 | naitlb_20 16 |
663 | nadtlb_20 17 |
664 | def 18 |
665 | def 19 |
666 | dbit_20 20 |
667 | def 21 |
668 | def 22 |
669 | def 23 |
670 | def 24 |
671 | def 25 |
672 | def 26 |
673 | def 27 |
674 | def 28 |
675 | def 29 |
676 | def 30 |
677 | def 31 |
678 | END(fault_vector_20) |
679 | |
680 | #ifndef CONFIG_64BIT |
681 | |
682 | .align 2048 |
683 | |
684 | ENTRY(fault_vector_11) |
685 | /* First vector is invalid (0) */ |
686 | .ascii "cows can fly" |
687 | .byte 0 |
688 | .align 32 |
689 | |
690 | hpmc 1 |
691 | def 2 |
692 | def 3 |
693 | extint 4 |
694 | def 5 |
695 | itlb_11 PARISC_ITLB_TRAP |
696 | def 7 |
697 | def 8 |
698 | def 9 |
699 | def 10 |
700 | def 11 |
701 | def 12 |
702 | def 13 |
703 | def 14 |
704 | dtlb_11 15 |
705 | naitlb_11 16 |
706 | nadtlb_11 17 |
707 | def 18 |
708 | def 19 |
709 | dbit_11 20 |
710 | def 21 |
711 | def 22 |
712 | def 23 |
713 | def 24 |
714 | def 25 |
715 | def 26 |
716 | def 27 |
717 | def 28 |
718 | def 29 |
719 | def 30 |
720 | def 31 |
721 | END(fault_vector_11) |
722 | |
723 | #endif |
724 | /* Fault vector is separately protected and *must* be on its own page */ |
725 | .align PAGE_SIZE |
726 | |
727 | .import handle_interruption,code |
728 | .import do_cpu_irq_mask,code |
729 | |
730 | /* |
731 | * Child Returns here |
732 | * |
733 | * copy_thread moved args into task save area. |
734 | */ |
735 | |
736 | ENTRY(ret_from_kernel_thread) |
737 | /* Call schedule_tail first though */ |
738 | BL schedule_tail, %r2 |
739 | nop |
740 | |
741 | mfctl %cr30,%r1 /* task_struct */ |
742 | LDREG TASK_PT_GR25(%r1), %r26 |
743 | #ifdef CONFIG_64BIT |
744 | LDREG TASK_PT_GR27(%r1), %r27 |
745 | #endif |
746 | LDREG TASK_PT_GR26(%r1), %r1 |
747 | ble 0(%sr7, %r1) |
748 | copy %r31, %r2 |
749 | b finish_child_return |
750 | nop |
751 | END(ret_from_kernel_thread) |
752 | |
753 | |
754 | /* |
755 | * struct task_struct *_switch_to(struct task_struct *prev, |
756 | * struct task_struct *next) |
757 | * |
758 | * switch kernel stacks and return prev */ |
759 | ENTRY_CFI(_switch_to) |
760 | STREG %r2, -RP_OFFSET(%r30) |
761 | |
762 | callee_save_float |
763 | callee_save |
764 | |
765 | load32 _switch_to_ret, %r2 |
766 | |
767 | STREG %r2, TASK_PT_KPC(%r26) |
768 | LDREG TASK_PT_KPC(%r25), %r2 |
769 | |
770 | STREG %r30, TASK_PT_KSP(%r26) |
771 | LDREG TASK_PT_KSP(%r25), %r30 |
772 | bv %r0(%r2) |
773 | mtctl %r25,%cr30 |
774 | |
775 | ENTRY(_switch_to_ret) |
776 | mtctl %r0, %cr0 /* Needed for single stepping */ |
777 | callee_rest |
778 | callee_rest_float |
779 | |
780 | LDREG -RP_OFFSET(%r30), %r2 |
781 | bv %r0(%r2) |
782 | copy %r26, %r28 |
783 | ENDPROC_CFI(_switch_to) |
784 | |
785 | /* |
786 | * Common rfi return path for interruptions, kernel execve, and |
787 | * sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will |
788 | * return via this path if the signal was received when the process |
789 | * was running; if the process was blocked on a syscall then the |
790 | * normal syscall_exit path is used. All syscalls for traced |
791 | * proceses exit via intr_restore. |
792 | * |
793 | * XXX If any syscalls that change a processes space id ever exit |
794 | * this way, then we will need to copy %sr3 in to PT_SR[3..7], and |
795 | * adjust IASQ[0..1]. |
796 | * |
797 | */ |
798 | |
799 | .align PAGE_SIZE |
800 | |
801 | ENTRY_CFI(syscall_exit_rfi) |
802 | mfctl %cr30,%r16 /* task_struct */ |
803 | ldo TASK_REGS(%r16),%r16 |
804 | /* Force iaoq to userspace, as the user has had access to our current |
805 | * context via sigcontext. Also Filter the PSW for the same reason. |
806 | */ |
807 | LDREG PT_IAOQ0(%r16),%r19 |
808 | depi PRIV_USER,31,2,%r19 |
809 | STREG %r19,PT_IAOQ0(%r16) |
810 | LDREG PT_IAOQ1(%r16),%r19 |
811 | depi PRIV_USER,31,2,%r19 |
812 | STREG %r19,PT_IAOQ1(%r16) |
813 | LDREG PT_PSW(%r16),%r19 |
814 | load32 USER_PSW_MASK,%r1 |
815 | #ifdef CONFIG_64BIT |
816 | load32 USER_PSW_HI_MASK,%r20 |
817 | depd %r20,31,32,%r1 |
818 | #endif |
819 | and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */ |
820 | load32 USER_PSW,%r1 |
821 | or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */ |
822 | STREG %r19,PT_PSW(%r16) |
823 | |
824 | /* |
825 | * If we aren't being traced, we never saved space registers |
826 | * (we don't store them in the sigcontext), so set them |
827 | * to "proper" values now (otherwise we'll wind up restoring |
828 | * whatever was last stored in the task structure, which might |
829 | * be inconsistent if an interrupt occurred while on the gateway |
830 | * page). Note that we may be "trashing" values the user put in |
831 | * them, but we don't support the user changing them. |
832 | */ |
833 | |
834 | STREG %r0,PT_SR2(%r16) |
835 | mfsp %sr3,%r19 |
836 | STREG %r19,PT_SR0(%r16) |
837 | STREG %r19,PT_SR1(%r16) |
838 | STREG %r19,PT_SR3(%r16) |
839 | STREG %r19,PT_SR4(%r16) |
840 | STREG %r19,PT_SR5(%r16) |
841 | STREG %r19,PT_SR6(%r16) |
842 | STREG %r19,PT_SR7(%r16) |
843 | |
844 | ENTRY(intr_return) |
845 | /* check for reschedule */ |
846 | mfctl %cr30,%r1 |
847 | LDREG TASK_TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */ |
848 | bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */ |
849 | |
850 | .import do_notify_resume,code |
851 | intr_check_sig: |
852 | /* As above */ |
853 | mfctl %cr30,%r1 |
854 | LDREG TASK_TI_FLAGS(%r1),%r19 |
855 | ldi (_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r20 |
856 | and,COND(<>) %r19, %r20, %r0 |
857 | b,n intr_restore /* skip past if we've nothing to do */ |
858 | |
859 | /* This check is critical to having LWS |
860 | * working. The IASQ is zero on the gateway |
861 | * page and we cannot deliver any signals until |
862 | * we get off the gateway page. |
863 | * |
864 | * Only do signals if we are returning to user space |
865 | */ |
866 | LDREG PT_IASQ0(%r16), %r20 |
867 | cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */ |
868 | LDREG PT_IASQ1(%r16), %r20 |
869 | cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */ |
870 | |
871 | copy %r0, %r25 /* long in_syscall = 0 */ |
872 | #ifdef CONFIG_64BIT |
873 | ldo -16(%r30),%r29 /* Reference param save area */ |
874 | #endif |
875 | |
876 | /* NOTE: We need to enable interrupts if we have to deliver |
877 | * signals. We used to do this earlier but it caused kernel |
878 | * stack overflows. */ |
879 | ssm PSW_SM_I, %r0 |
880 | |
881 | BL do_notify_resume,%r2 |
882 | copy %r16, %r26 /* struct pt_regs *regs */ |
883 | |
884 | b,n intr_check_sig |
885 | |
886 | intr_restore: |
887 | copy %r16,%r29 |
888 | ldo PT_FR31(%r29),%r1 |
889 | rest_fp %r1 |
890 | rest_general %r29 |
891 | |
892 | /* inverse of virt_map */ |
893 | pcxt_ssm_bug |
894 | rsm PSW_SM_QUIET,%r0 /* prepare for rfi */ |
895 | tophys_r1 %r29 |
896 | |
897 | /* Restore space id's and special cr's from PT_REGS |
898 | * structure pointed to by r29 |
899 | */ |
900 | rest_specials %r29 |
901 | |
902 | /* IMPORTANT: rest_stack restores r29 last (we are using it)! |
903 | * It also restores r1 and r30. |
904 | */ |
905 | rest_stack |
906 | |
907 | rfi |
908 | nop |
909 | |
910 | #ifndef CONFIG_PREEMPTION |
911 | # define intr_do_preempt intr_restore |
912 | #endif /* !CONFIG_PREEMPTION */ |
913 | |
914 | .import schedule,code |
915 | intr_do_resched: |
916 | /* Only call schedule on return to userspace. If we're returning |
917 | * to kernel space, we may schedule if CONFIG_PREEMPTION, otherwise |
918 | * we jump back to intr_restore. |
919 | */ |
920 | LDREG PT_IASQ0(%r16), %r20 |
921 | cmpib,COND(=) 0, %r20, intr_do_preempt |
922 | nop |
923 | LDREG PT_IASQ1(%r16), %r20 |
924 | cmpib,COND(=) 0, %r20, intr_do_preempt |
925 | nop |
926 | |
927 | /* NOTE: We need to enable interrupts if we schedule. We used |
928 | * to do this earlier but it caused kernel stack overflows. */ |
929 | ssm PSW_SM_I, %r0 |
930 | |
931 | #ifdef CONFIG_64BIT |
932 | ldo -16(%r30),%r29 /* Reference param save area */ |
933 | #endif |
934 | |
935 | ldil L%intr_check_sig, %r2 |
936 | #ifndef CONFIG_64BIT |
937 | b schedule |
938 | #else |
939 | load32 schedule, %r20 |
940 | bv %r0(%r20) |
941 | #endif |
942 | ldo R%intr_check_sig(%r2), %r2 |
943 | |
944 | /* preempt the current task on returning to kernel |
945 | * mode from an interrupt, iff need_resched is set, |
946 | * and preempt_count is 0. otherwise, we continue on |
947 | * our merry way back to the current running task. |
948 | */ |
949 | #ifdef CONFIG_PREEMPTION |
950 | .import preempt_schedule_irq,code |
951 | intr_do_preempt: |
952 | rsm PSW_SM_I, %r0 /* disable interrupts */ |
953 | |
954 | /* current_thread_info()->preempt_count */ |
955 | mfctl %cr30, %r1 |
956 | ldw TI_PRE_COUNT(%r1), %r19 |
957 | cmpib,<> 0, %r19, intr_restore /* if preempt_count > 0 */ |
958 | nop /* prev insn branched backwards */ |
959 | |
960 | /* check if we interrupted a critical path */ |
961 | LDREG PT_PSW(%r16), %r20 |
962 | bb,<,n %r20, 31 - PSW_SM_I, intr_restore |
963 | nop |
964 | |
965 | /* ssm PSW_SM_I done later in intr_restore */ |
966 | #ifdef CONFIG_MLONGCALLS |
967 | ldil L%intr_restore, %r2 |
968 | load32 preempt_schedule_irq, %r1 |
969 | bv %r0(%r1) |
970 | ldo R%intr_restore(%r2), %r2 |
971 | #else |
972 | ldil L%intr_restore, %r1 |
973 | BL preempt_schedule_irq, %r2 |
974 | ldo R%intr_restore(%r1), %r2 |
975 | #endif |
976 | #endif /* CONFIG_PREEMPTION */ |
977 | |
978 | /* |
979 | * External interrupts. |
980 | */ |
981 | |
982 | intr_extint: |
983 | cmpib,COND(=),n 0,%r16,1f |
984 | |
985 | get_stack_use_cr30 |
986 | b,n 2f |
987 | |
988 | 1: |
989 | get_stack_use_r30 |
990 | 2: |
991 | save_specials %r29 |
992 | virt_map |
993 | save_general %r29 |
994 | |
995 | ldo PT_FR0(%r29), %r24 |
996 | save_fp %r24 |
997 | |
998 | loadgp |
999 | |
1000 | copy %r29, %r26 /* arg0 is pt_regs */ |
1001 | copy %r29, %r16 /* save pt_regs */ |
1002 | |
1003 | ldil L%intr_return, %r2 |
1004 | |
1005 | #ifdef CONFIG_64BIT |
1006 | ldo -16(%r30),%r29 /* Reference param save area */ |
1007 | #endif |
1008 | |
1009 | b do_cpu_irq_mask |
1010 | ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */ |
1011 | ENDPROC_CFI(syscall_exit_rfi) |
1012 | |
1013 | |
1014 | /* Generic interruptions (illegal insn, unaligned, page fault, etc) */ |
1015 | |
1016 | ENTRY_CFI(intr_save) /* for os_hpmc */ |
1017 | mfsp %sr7,%r16 |
1018 | cmpib,COND(=),n 0,%r16,1f |
1019 | get_stack_use_cr30 |
1020 | b 2f |
1021 | copy %r8,%r26 |
1022 | |
1023 | 1: |
1024 | get_stack_use_r30 |
1025 | copy %r8,%r26 |
1026 | |
1027 | 2: |
1028 | save_specials %r29 |
1029 | |
1030 | /* If this trap is a itlb miss, skip saving/adjusting isr/ior */ |
1031 | cmpib,COND(=),n PARISC_ITLB_TRAP,%r26,skip_save_ior |
1032 | |
1033 | |
1034 | mfctl %isr, %r16 |
1035 | nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */ |
1036 | mfctl %ior, %r17 |
1037 | |
1038 | |
1039 | #ifdef CONFIG_64BIT |
1040 | /* |
1041 | * If the interrupted code was running with W bit off (32 bit), |
1042 | * clear the b bits (bits 0 & 1) in the ior. |
1043 | * save_specials left ipsw value in r8 for us to test. |
1044 | */ |
1045 | extrd,u,*<> %r8,PSW_W_BIT,1,%r0 |
1046 | depdi 0,1,2,%r17 |
1047 | |
1048 | /* adjust isr/ior: get high bits from isr and deposit in ior */ |
1049 | space_adjust %r16,%r17,%r1 |
1050 | #endif |
1051 | STREG %r16, PT_ISR(%r29) |
1052 | STREG %r17, PT_IOR(%r29) |
1053 | |
1054 | #if 0 && defined(CONFIG_64BIT) |
1055 | /* Revisit when we have 64-bit code above 4Gb */ |
1056 | b,n intr_save2 |
1057 | |
1058 | skip_save_ior: |
1059 | /* We have a itlb miss, and when executing code above 4 Gb on ILP64, we |
1060 | * need to adjust iasq/iaoq here in the same way we adjusted isr/ior |
1061 | * above. |
1062 | */ |
1063 | extrd,u,* %r8,PSW_W_BIT,1,%r1 |
1064 | cmpib,COND(=),n 1,%r1,intr_save2 |
1065 | LDREG PT_IASQ0(%r29), %r16 |
1066 | LDREG PT_IAOQ0(%r29), %r17 |
1067 | /* adjust iasq/iaoq */ |
1068 | space_adjust %r16,%r17,%r1 |
1069 | STREG %r16, PT_IASQ0(%r29) |
1070 | STREG %r17, PT_IAOQ0(%r29) |
1071 | #else |
1072 | skip_save_ior: |
1073 | #endif |
1074 | |
1075 | intr_save2: |
1076 | virt_map |
1077 | save_general %r29 |
1078 | |
1079 | ldo PT_FR0(%r29), %r25 |
1080 | save_fp %r25 |
1081 | |
1082 | loadgp |
1083 | |
1084 | copy %r29, %r25 /* arg1 is pt_regs */ |
1085 | #ifdef CONFIG_64BIT |
1086 | ldo -16(%r30),%r29 /* Reference param save area */ |
1087 | #endif |
1088 | |
1089 | ldil L%intr_check_sig, %r2 |
1090 | copy %r25, %r16 /* save pt_regs */ |
1091 | |
1092 | b handle_interruption |
1093 | ldo R%intr_check_sig(%r2), %r2 |
1094 | ENDPROC_CFI(intr_save) |
1095 | |
1096 | |
1097 | /* |
1098 | * Note for all tlb miss handlers: |
1099 | * |
1100 | * cr24 contains a pointer to the kernel address space |
1101 | * page directory. |
1102 | * |
1103 | * cr25 contains a pointer to the current user address |
1104 | * space page directory. |
1105 | * |
1106 | * sr3 will contain the space id of the user address space |
1107 | * of the current running thread while that thread is |
1108 | * running in the kernel. |
1109 | */ |
1110 | |
1111 | /* |
1112 | * register number allocations. Note that these are all |
1113 | * in the shadowed registers |
1114 | */ |
1115 | |
1116 | t0 = r1 /* temporary register 0 */ |
1117 | va = r8 /* virtual address for which the trap occurred */ |
1118 | t1 = r9 /* temporary register 1 */ |
1119 | pte = r16 /* pte/phys page # */ |
1120 | prot = r17 /* prot bits */ |
1121 | spc = r24 /* space for which the trap occurred */ |
1122 | ptp = r25 /* page directory/page table pointer */ |
1123 | |
1124 | #ifdef CONFIG_64BIT |
1125 | |
1126 | dtlb_miss_20w: |
1127 | space_adjust spc,va,t0 |
1128 | get_pgd spc,ptp |
1129 | space_check spc,t0,dtlb_fault |
1130 | |
1131 | L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w |
1132 | |
1133 | ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w |
1134 | update_accessed ptp,pte,t0,t1 |
1135 | |
1136 | make_insert_tlb spc,pte,prot,t1 |
1137 | |
1138 | idtlbt pte,prot |
1139 | |
1140 | ptl_unlock spc,t0,t1 |
1141 | rfir |
1142 | nop |
1143 | |
1144 | dtlb_check_alias_20w: |
1145 | do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20 |
1146 | |
1147 | idtlbt pte,prot |
1148 | |
1149 | insert_nops NUM_PIPELINE_INSNS - 1 |
1150 | rfir |
1151 | nop |
1152 | |
1153 | nadtlb_miss_20w: |
1154 | space_adjust spc,va,t0 |
1155 | get_pgd spc,ptp |
1156 | space_check spc,t0,nadtlb_fault |
1157 | |
1158 | L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w |
1159 | |
1160 | ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w |
1161 | update_accessed ptp,pte,t0,t1 |
1162 | |
1163 | make_insert_tlb spc,pte,prot,t1 |
1164 | |
1165 | idtlbt pte,prot |
1166 | |
1167 | ptl_unlock spc,t0,t1 |
1168 | rfir |
1169 | nop |
1170 | |
1171 | nadtlb_check_alias_20w: |
1172 | do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20 |
1173 | |
1174 | idtlbt pte,prot |
1175 | |
1176 | insert_nops NUM_PIPELINE_INSNS - 1 |
1177 | rfir |
1178 | nop |
1179 | |
1180 | #else |
1181 | |
1182 | dtlb_miss_11: |
1183 | get_pgd spc,ptp |
1184 | |
1185 | space_check spc,t0,dtlb_fault |
1186 | |
1187 | L2_ptep ptp,pte,t0,va,dtlb_check_alias_11 |
1188 | |
1189 | ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11 |
1190 | update_accessed ptp,pte,t0,t1 |
1191 | |
1192 | make_insert_tlb_11 spc,pte,prot |
1193 | |
1194 | mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ |
1195 | mtsp spc,%sr1 |
1196 | |
1197 | idtlba pte,(%sr1,va) |
1198 | idtlbp prot,(%sr1,va) |
1199 | |
1200 | mtsp t1, %sr1 /* Restore sr1 */ |
1201 | |
1202 | ptl_unlock spc,t0,t1 |
1203 | rfir |
1204 | nop |
1205 | |
1206 | dtlb_check_alias_11: |
1207 | do_alias spc,t0,t1,va,pte,prot,dtlb_fault,11 |
1208 | |
1209 | idtlba pte,(va) |
1210 | idtlbp prot,(va) |
1211 | |
1212 | insert_nops NUM_PIPELINE_INSNS - 1 |
1213 | rfir |
1214 | nop |
1215 | |
1216 | nadtlb_miss_11: |
1217 | get_pgd spc,ptp |
1218 | |
1219 | space_check spc,t0,nadtlb_fault |
1220 | |
1221 | L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11 |
1222 | |
1223 | ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11 |
1224 | update_accessed ptp,pte,t0,t1 |
1225 | |
1226 | make_insert_tlb_11 spc,pte,prot |
1227 | |
1228 | mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ |
1229 | mtsp spc,%sr1 |
1230 | |
1231 | idtlba pte,(%sr1,va) |
1232 | idtlbp prot,(%sr1,va) |
1233 | |
1234 | mtsp t1, %sr1 /* Restore sr1 */ |
1235 | |
1236 | ptl_unlock spc,t0,t1 |
1237 | rfir |
1238 | nop |
1239 | |
1240 | nadtlb_check_alias_11: |
1241 | do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,11 |
1242 | |
1243 | idtlba pte,(va) |
1244 | idtlbp prot,(va) |
1245 | |
1246 | insert_nops NUM_PIPELINE_INSNS - 1 |
1247 | rfir |
1248 | nop |
1249 | |
1250 | dtlb_miss_20: |
1251 | space_adjust spc,va,t0 |
1252 | get_pgd spc,ptp |
1253 | space_check spc,t0,dtlb_fault |
1254 | |
1255 | L2_ptep ptp,pte,t0,va,dtlb_check_alias_20 |
1256 | |
1257 | ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20 |
1258 | update_accessed ptp,pte,t0,t1 |
1259 | |
1260 | make_insert_tlb spc,pte,prot,t1 |
1261 | |
1262 | f_extend pte,t1 |
1263 | |
1264 | idtlbt pte,prot |
1265 | |
1266 | ptl_unlock spc,t0,t1 |
1267 | rfir |
1268 | nop |
1269 | |
1270 | dtlb_check_alias_20: |
1271 | do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20 |
1272 | |
1273 | idtlbt pte,prot |
1274 | |
1275 | insert_nops NUM_PIPELINE_INSNS - 1 |
1276 | rfir |
1277 | nop |
1278 | |
1279 | nadtlb_miss_20: |
1280 | get_pgd spc,ptp |
1281 | |
1282 | space_check spc,t0,nadtlb_fault |
1283 | |
1284 | L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20 |
1285 | |
1286 | ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20 |
1287 | update_accessed ptp,pte,t0,t1 |
1288 | |
1289 | make_insert_tlb spc,pte,prot,t1 |
1290 | |
1291 | f_extend pte,t1 |
1292 | |
1293 | idtlbt pte,prot |
1294 | |
1295 | ptl_unlock spc,t0,t1 |
1296 | rfir |
1297 | nop |
1298 | |
1299 | nadtlb_check_alias_20: |
1300 | do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20 |
1301 | |
1302 | idtlbt pte,prot |
1303 | |
1304 | insert_nops NUM_PIPELINE_INSNS - 1 |
1305 | rfir |
1306 | nop |
1307 | |
1308 | #endif |
1309 | |
1310 | nadtlb_emulate: |
1311 | |
1312 | /* |
1313 | * Non-access misses can be caused by fdc,fic,pdc,lpa,probe and |
1314 | * probei instructions. The kernel no longer faults doing flushes. |
1315 | * Use of lpa and probe instructions is rare. Given the issue |
1316 | * with shadow registers, we defer everything to the "slow" path. |
1317 | */ |
1318 | b,n nadtlb_fault |
1319 | |
1320 | #ifdef CONFIG_64BIT |
1321 | itlb_miss_20w: |
1322 | |
1323 | /* |
1324 | * I miss is a little different, since we allow users to fault |
1325 | * on the gateway page which is in the kernel address space. |
1326 | */ |
1327 | |
1328 | space_adjust spc,va,t0 |
1329 | get_pgd spc,ptp |
1330 | space_check spc,t0,itlb_fault |
1331 | |
1332 | L3_ptep ptp,pte,t0,va,itlb_fault |
1333 | |
1334 | ptl_lock spc,ptp,pte,t0,t1,itlb_fault |
1335 | update_accessed ptp,pte,t0,t1 |
1336 | |
1337 | make_insert_tlb spc,pte,prot,t1 |
1338 | |
1339 | iitlbt pte,prot |
1340 | |
1341 | ptl_unlock spc,t0,t1 |
1342 | rfir |
1343 | nop |
1344 | |
1345 | naitlb_miss_20w: |
1346 | |
1347 | /* |
1348 | * I miss is a little different, since we allow users to fault |
1349 | * on the gateway page which is in the kernel address space. |
1350 | */ |
1351 | |
1352 | space_adjust spc,va,t0 |
1353 | get_pgd spc,ptp |
1354 | space_check spc,t0,naitlb_fault |
1355 | |
1356 | L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w |
1357 | |
1358 | ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w |
1359 | update_accessed ptp,pte,t0,t1 |
1360 | |
1361 | make_insert_tlb spc,pte,prot,t1 |
1362 | |
1363 | iitlbt pte,prot |
1364 | |
1365 | ptl_unlock spc,t0,t1 |
1366 | rfir |
1367 | nop |
1368 | |
1369 | naitlb_check_alias_20w: |
1370 | do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20 |
1371 | |
1372 | iitlbt pte,prot |
1373 | |
1374 | insert_nops NUM_PIPELINE_INSNS - 1 |
1375 | rfir |
1376 | nop |
1377 | |
1378 | #else |
1379 | |
1380 | itlb_miss_11: |
1381 | get_pgd spc,ptp |
1382 | |
1383 | space_check spc,t0,itlb_fault |
1384 | |
1385 | L2_ptep ptp,pte,t0,va,itlb_fault |
1386 | |
1387 | ptl_lock spc,ptp,pte,t0,t1,itlb_fault |
1388 | update_accessed ptp,pte,t0,t1 |
1389 | |
1390 | make_insert_tlb_11 spc,pte,prot |
1391 | |
1392 | mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ |
1393 | mtsp spc,%sr1 |
1394 | |
1395 | iitlba pte,(%sr1,va) |
1396 | iitlbp prot,(%sr1,va) |
1397 | |
1398 | mtsp t1, %sr1 /* Restore sr1 */ |
1399 | |
1400 | ptl_unlock spc,t0,t1 |
1401 | rfir |
1402 | nop |
1403 | |
1404 | naitlb_miss_11: |
1405 | get_pgd spc,ptp |
1406 | |
1407 | space_check spc,t0,naitlb_fault |
1408 | |
1409 | L2_ptep ptp,pte,t0,va,naitlb_check_alias_11 |
1410 | |
1411 | ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11 |
1412 | update_accessed ptp,pte,t0,t1 |
1413 | |
1414 | make_insert_tlb_11 spc,pte,prot |
1415 | |
1416 | mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ |
1417 | mtsp spc,%sr1 |
1418 | |
1419 | iitlba pte,(%sr1,va) |
1420 | iitlbp prot,(%sr1,va) |
1421 | |
1422 | mtsp t1, %sr1 /* Restore sr1 */ |
1423 | |
1424 | ptl_unlock spc,t0,t1 |
1425 | rfir |
1426 | nop |
1427 | |
1428 | naitlb_check_alias_11: |
1429 | do_alias spc,t0,t1,va,pte,prot,itlb_fault,11 |
1430 | |
1431 | iitlba pte,(%sr0, va) |
1432 | iitlbp prot,(%sr0, va) |
1433 | |
1434 | insert_nops NUM_PIPELINE_INSNS - 1 |
1435 | rfir |
1436 | nop |
1437 | |
1438 | |
1439 | itlb_miss_20: |
1440 | get_pgd spc,ptp |
1441 | |
1442 | space_check spc,t0,itlb_fault |
1443 | |
1444 | L2_ptep ptp,pte,t0,va,itlb_fault |
1445 | |
1446 | ptl_lock spc,ptp,pte,t0,t1,itlb_fault |
1447 | update_accessed ptp,pte,t0,t1 |
1448 | |
1449 | make_insert_tlb spc,pte,prot,t1 |
1450 | |
1451 | f_extend pte,t1 |
1452 | |
1453 | iitlbt pte,prot |
1454 | |
1455 | ptl_unlock spc,t0,t1 |
1456 | rfir |
1457 | nop |
1458 | |
1459 | naitlb_miss_20: |
1460 | get_pgd spc,ptp |
1461 | |
1462 | space_check spc,t0,naitlb_fault |
1463 | |
1464 | L2_ptep ptp,pte,t0,va,naitlb_check_alias_20 |
1465 | |
1466 | ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20 |
1467 | update_accessed ptp,pte,t0,t1 |
1468 | |
1469 | make_insert_tlb spc,pte,prot,t1 |
1470 | |
1471 | f_extend pte,t1 |
1472 | |
1473 | iitlbt pte,prot |
1474 | |
1475 | ptl_unlock spc,t0,t1 |
1476 | rfir |
1477 | nop |
1478 | |
1479 | naitlb_check_alias_20: |
1480 | do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20 |
1481 | |
1482 | iitlbt pte,prot |
1483 | |
1484 | insert_nops NUM_PIPELINE_INSNS - 1 |
1485 | rfir |
1486 | nop |
1487 | |
1488 | #endif |
1489 | |
1490 | #ifdef CONFIG_64BIT |
1491 | |
1492 | dbit_trap_20w: |
1493 | space_adjust spc,va,t0 |
1494 | get_pgd spc,ptp |
1495 | space_check spc,t0,dbit_fault |
1496 | |
1497 | L3_ptep ptp,pte,t0,va,dbit_fault |
1498 | |
1499 | ptl_lock spc,ptp,pte,t0,t1,dbit_fault |
1500 | update_dirty ptp,pte,t1 |
1501 | |
1502 | make_insert_tlb spc,pte,prot,t1 |
1503 | |
1504 | idtlbt pte,prot |
1505 | |
1506 | ptl_unlock spc,t0,t1 |
1507 | rfir |
1508 | nop |
1509 | #else |
1510 | |
1511 | dbit_trap_11: |
1512 | |
1513 | get_pgd spc,ptp |
1514 | |
1515 | space_check spc,t0,dbit_fault |
1516 | |
1517 | L2_ptep ptp,pte,t0,va,dbit_fault |
1518 | |
1519 | ptl_lock spc,ptp,pte,t0,t1,dbit_fault |
1520 | update_dirty ptp,pte,t1 |
1521 | |
1522 | make_insert_tlb_11 spc,pte,prot |
1523 | |
1524 | mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ |
1525 | mtsp spc,%sr1 |
1526 | |
1527 | idtlba pte,(%sr1,va) |
1528 | idtlbp prot,(%sr1,va) |
1529 | |
1530 | mtsp t1, %sr1 /* Restore sr1 */ |
1531 | |
1532 | ptl_unlock spc,t0,t1 |
1533 | rfir |
1534 | nop |
1535 | |
1536 | dbit_trap_20: |
1537 | get_pgd spc,ptp |
1538 | |
1539 | space_check spc,t0,dbit_fault |
1540 | |
1541 | L2_ptep ptp,pte,t0,va,dbit_fault |
1542 | |
1543 | ptl_lock spc,ptp,pte,t0,t1,dbit_fault |
1544 | update_dirty ptp,pte,t1 |
1545 | |
1546 | make_insert_tlb spc,pte,prot,t1 |
1547 | |
1548 | f_extend pte,t1 |
1549 | |
1550 | idtlbt pte,prot |
1551 | |
1552 | ptl_unlock spc,t0,t1 |
1553 | rfir |
1554 | nop |
1555 | #endif |
1556 | |
1557 | .import handle_interruption,code |
1558 | |
1559 | kernel_bad_space: |
1560 | b intr_save |
1561 | ldi 31,%r8 /* Use an unused code */ |
1562 | |
1563 | dbit_fault: |
1564 | b intr_save |
1565 | ldi 20,%r8 |
1566 | |
1567 | itlb_fault: |
1568 | b intr_save |
1569 | ldi PARISC_ITLB_TRAP,%r8 |
1570 | |
1571 | nadtlb_fault: |
1572 | b intr_save |
1573 | ldi 17,%r8 |
1574 | |
1575 | naitlb_fault: |
1576 | b intr_save |
1577 | ldi 16,%r8 |
1578 | |
1579 | dtlb_fault: |
1580 | b intr_save |
1581 | ldi 15,%r8 |
1582 | |
1583 | /* Register saving semantics for system calls: |
1584 | |
1585 | %r1 clobbered by system call macro in userspace |
1586 | %r2 saved in PT_REGS by gateway page |
1587 | %r3 - %r18 preserved by C code (saved by signal code) |
1588 | %r19 - %r20 saved in PT_REGS by gateway page |
1589 | %r21 - %r22 non-standard syscall args |
1590 | stored in kernel stack by gateway page |
1591 | %r23 - %r26 arg3-arg0, saved in PT_REGS by gateway page |
1592 | %r27 - %r30 saved in PT_REGS by gateway page |
1593 | %r31 syscall return pointer |
1594 | */ |
1595 | |
1596 | /* Floating point registers (FIXME: what do we do with these?) |
1597 | |
1598 | %fr0 - %fr3 status/exception, not preserved |
1599 | %fr4 - %fr7 arguments |
1600 | %fr8 - %fr11 not preserved by C code |
1601 | %fr12 - %fr21 preserved by C code |
1602 | %fr22 - %fr31 not preserved by C code |
1603 | */ |
1604 | |
1605 | .macro reg_save regs |
1606 | STREG %r3, PT_GR3(\regs) |
1607 | STREG %r4, PT_GR4(\regs) |
1608 | STREG %r5, PT_GR5(\regs) |
1609 | STREG %r6, PT_GR6(\regs) |
1610 | STREG %r7, PT_GR7(\regs) |
1611 | STREG %r8, PT_GR8(\regs) |
1612 | STREG %r9, PT_GR9(\regs) |
1613 | STREG %r10,PT_GR10(\regs) |
1614 | STREG %r11,PT_GR11(\regs) |
1615 | STREG %r12,PT_GR12(\regs) |
1616 | STREG %r13,PT_GR13(\regs) |
1617 | STREG %r14,PT_GR14(\regs) |
1618 | STREG %r15,PT_GR15(\regs) |
1619 | STREG %r16,PT_GR16(\regs) |
1620 | STREG %r17,PT_GR17(\regs) |
1621 | STREG %r18,PT_GR18(\regs) |
1622 | .endm |
1623 | |
1624 | .macro reg_restore regs |
1625 | LDREG PT_GR3(\regs), %r3 |
1626 | LDREG PT_GR4(\regs), %r4 |
1627 | LDREG PT_GR5(\regs), %r5 |
1628 | LDREG PT_GR6(\regs), %r6 |
1629 | LDREG PT_GR7(\regs), %r7 |
1630 | LDREG PT_GR8(\regs), %r8 |
1631 | LDREG PT_GR9(\regs), %r9 |
1632 | LDREG PT_GR10(\regs),%r10 |
1633 | LDREG PT_GR11(\regs),%r11 |
1634 | LDREG PT_GR12(\regs),%r12 |
1635 | LDREG PT_GR13(\regs),%r13 |
1636 | LDREG PT_GR14(\regs),%r14 |
1637 | LDREG PT_GR15(\regs),%r15 |
1638 | LDREG PT_GR16(\regs),%r16 |
1639 | LDREG PT_GR17(\regs),%r17 |
1640 | LDREG PT_GR18(\regs),%r18 |
1641 | .endm |
1642 | |
1643 | .macro fork_like name |
1644 | ENTRY_CFI(sys_\name\()_wrapper) |
1645 | mfctl %cr30,%r1 |
1646 | ldo TASK_REGS(%r1),%r1 |
1647 | reg_save %r1 |
1648 | mfctl %cr27, %r28 |
1649 | ldil L%sys_\name, %r31 |
1650 | be R%sys_\name(%sr4,%r31) |
1651 | STREG %r28, PT_CR27(%r1) |
1652 | ENDPROC_CFI(sys_\name\()_wrapper) |
1653 | .endm |
1654 | |
1655 | fork_like clone |
1656 | fork_like clone3 |
1657 | fork_like fork |
1658 | fork_like vfork |
1659 | |
1660 | /* Set the return value for the child */ |
1661 | ENTRY(child_return) |
1662 | BL schedule_tail, %r2 |
1663 | nop |
1664 | finish_child_return: |
1665 | mfctl %cr30,%r1 |
1666 | ldo TASK_REGS(%r1),%r1 /* get pt regs */ |
1667 | |
1668 | LDREG PT_CR27(%r1), %r3 |
1669 | mtctl %r3, %cr27 |
1670 | reg_restore %r1 |
1671 | b syscall_exit |
1672 | copy %r0,%r28 |
1673 | END(child_return) |
1674 | |
1675 | ENTRY_CFI(sys_rt_sigreturn_wrapper) |
1676 | mfctl %cr30,%r26 |
1677 | ldo TASK_REGS(%r26),%r26 /* get pt regs */ |
1678 | /* Don't save regs, we are going to restore them from sigcontext. */ |
1679 | STREG %r2, -RP_OFFSET(%r30) |
1680 | #ifdef CONFIG_64BIT |
1681 | ldo FRAME_SIZE(%r30), %r30 |
1682 | BL sys_rt_sigreturn,%r2 |
1683 | ldo -16(%r30),%r29 /* Reference param save area */ |
1684 | #else |
1685 | BL sys_rt_sigreturn,%r2 |
1686 | ldo FRAME_SIZE(%r30), %r30 |
1687 | #endif |
1688 | |
1689 | ldo -FRAME_SIZE(%r30), %r30 |
1690 | LDREG -RP_OFFSET(%r30), %r2 |
1691 | |
1692 | /* FIXME: I think we need to restore a few more things here. */ |
1693 | mfctl %cr30,%r1 |
1694 | ldo TASK_REGS(%r1),%r1 /* get pt regs */ |
1695 | reg_restore %r1 |
1696 | |
1697 | /* If the signal was received while the process was blocked on a |
1698 | * syscall, then r2 will take us to syscall_exit; otherwise r2 will |
1699 | * take us to syscall_exit_rfi and on to intr_return. |
1700 | */ |
1701 | bv %r0(%r2) |
1702 | LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */ |
1703 | ENDPROC_CFI(sys_rt_sigreturn_wrapper) |
1704 | |
1705 | ENTRY(syscall_exit) |
1706 | /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit |
1707 | * via syscall_exit_rfi if the signal was received while the process |
1708 | * was running. |
1709 | */ |
1710 | |
1711 | /* save return value now */ |
1712 | mfctl %cr30, %r1 |
1713 | STREG %r28,TASK_PT_GR28(%r1) |
1714 | |
1715 | /* Seems to me that dp could be wrong here, if the syscall involved |
1716 | * calling a module, and nothing got round to restoring dp on return. |
1717 | */ |
1718 | loadgp |
1719 | |
1720 | syscall_check_resched: |
1721 | |
1722 | /* check for reschedule */ |
1723 | mfctl %cr30,%r19 |
1724 | LDREG TASK_TI_FLAGS(%r19),%r19 /* long */ |
1725 | bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */ |
1726 | |
1727 | .import do_signal,code |
1728 | syscall_check_sig: |
1729 | mfctl %cr30,%r19 |
1730 | LDREG TASK_TI_FLAGS(%r19),%r19 |
1731 | ldi (_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r26 |
1732 | and,COND(<>) %r19, %r26, %r0 |
1733 | b,n syscall_restore /* skip past if we've nothing to do */ |
1734 | |
1735 | syscall_do_signal: |
1736 | /* Save callee-save registers (for sigcontext). |
1737 | * FIXME: After this point the process structure should be |
1738 | * consistent with all the relevant state of the process |
1739 | * before the syscall. We need to verify this. |
1740 | */ |
1741 | mfctl %cr30,%r1 |
1742 | ldo TASK_REGS(%r1), %r26 /* struct pt_regs *regs */ |
1743 | reg_save %r26 |
1744 | |
1745 | #ifdef CONFIG_64BIT |
1746 | ldo -16(%r30),%r29 /* Reference param save area */ |
1747 | #endif |
1748 | |
1749 | BL do_notify_resume,%r2 |
1750 | ldi 1, %r25 /* long in_syscall = 1 */ |
1751 | |
1752 | mfctl %cr30,%r1 |
1753 | ldo TASK_REGS(%r1), %r20 /* reload pt_regs */ |
1754 | reg_restore %r20 |
1755 | |
1756 | b,n syscall_check_sig |
1757 | |
1758 | syscall_restore: |
1759 | mfctl %cr30,%r1 |
1760 | |
1761 | /* Are we being ptraced? */ |
1762 | LDREG TASK_TI_FLAGS(%r1),%r19 |
1763 | ldi _TIF_SINGLESTEP|_TIF_BLOCKSTEP,%r2 |
1764 | and,COND(=) %r19,%r2,%r0 |
1765 | b,n syscall_restore_rfi |
1766 | |
1767 | ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */ |
1768 | rest_fp %r19 |
1769 | |
1770 | LDREG TASK_PT_SAR(%r1),%r19 /* restore SAR */ |
1771 | mtsar %r19 |
1772 | |
1773 | LDREG TASK_PT_GR2(%r1),%r2 /* restore user rp */ |
1774 | LDREG TASK_PT_GR19(%r1),%r19 |
1775 | LDREG TASK_PT_GR20(%r1),%r20 |
1776 | LDREG TASK_PT_GR21(%r1),%r21 |
1777 | LDREG TASK_PT_GR22(%r1),%r22 |
1778 | LDREG TASK_PT_GR23(%r1),%r23 |
1779 | LDREG TASK_PT_GR24(%r1),%r24 |
1780 | LDREG TASK_PT_GR25(%r1),%r25 |
1781 | LDREG TASK_PT_GR26(%r1),%r26 |
1782 | LDREG TASK_PT_GR27(%r1),%r27 /* restore user dp */ |
1783 | LDREG TASK_PT_GR28(%r1),%r28 /* syscall return value */ |
1784 | LDREG TASK_PT_GR29(%r1),%r29 |
1785 | LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */ |
1786 | |
1787 | /* NOTE: We use rsm/ssm pair to make this operation atomic */ |
1788 | LDREG TASK_PT_GR30(%r1),%r1 /* Get user sp */ |
1789 | rsm PSW_SM_I, %r0 |
1790 | copy %r1,%r30 /* Restore user sp */ |
1791 | mfsp %sr3,%r1 /* Get user space id */ |
1792 | mtsp %r1,%sr7 /* Restore sr7 */ |
1793 | ssm PSW_SM_I, %r0 |
1794 | |
1795 | /* Set sr2 to zero for userspace syscalls to work. */ |
1796 | mtsp %r0,%sr2 |
1797 | mtsp %r1,%sr4 /* Restore sr4 */ |
1798 | mtsp %r1,%sr5 /* Restore sr5 */ |
1799 | mtsp %r1,%sr6 /* Restore sr6 */ |
1800 | |
1801 | depi PRIV_USER,31,2,%r31 /* ensure return to user mode. */ |
1802 | |
1803 | #ifdef CONFIG_64BIT |
1804 | /* decide whether to reset the wide mode bit |
1805 | * |
1806 | * For a syscall, the W bit is stored in the lowest bit |
1807 | * of sp. Extract it and reset W if it is zero */ |
1808 | extrd,u,*<> %r30,63,1,%r1 |
1809 | rsm PSW_SM_W, %r0 |
1810 | /* now reset the lowest bit of sp if it was set */ |
1811 | xor %r30,%r1,%r30 |
1812 | #endif |
1813 | be,n 0(%sr3,%r31) /* return to user space */ |
1814 | |
1815 | /* We have to return via an RFI, so that PSW T and R bits can be set |
1816 | * appropriately. |
1817 | * This sets up pt_regs so we can return via intr_restore, which is not |
1818 | * the most efficient way of doing things, but it works. |
1819 | */ |
1820 | syscall_restore_rfi: |
1821 | ldo -1(%r0),%r2 /* Set recovery cntr to -1 */ |
1822 | mtctl %r2,%cr0 /* for immediate trap */ |
1823 | LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */ |
1824 | ldi 0x0b,%r20 /* Create new PSW */ |
1825 | depi -1,13,1,%r20 /* C, Q, D, and I bits */ |
1826 | |
1827 | /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are |
1828 | * set in thread_info.h and converted to PA bitmap |
1829 | * numbers in asm-offsets.c */ |
1830 | |
1831 | /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */ |
1832 | extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0 |
1833 | depi -1,27,1,%r20 /* R bit */ |
1834 | |
1835 | /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */ |
1836 | extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0 |
1837 | depi -1,7,1,%r20 /* T bit */ |
1838 | |
1839 | STREG %r20,TASK_PT_PSW(%r1) |
1840 | |
1841 | /* Always store space registers, since sr3 can be changed (e.g. fork) */ |
1842 | |
1843 | mfsp %sr3,%r25 |
1844 | STREG %r25,TASK_PT_SR3(%r1) |
1845 | STREG %r25,TASK_PT_SR4(%r1) |
1846 | STREG %r25,TASK_PT_SR5(%r1) |
1847 | STREG %r25,TASK_PT_SR6(%r1) |
1848 | STREG %r25,TASK_PT_SR7(%r1) |
1849 | STREG %r25,TASK_PT_IASQ0(%r1) |
1850 | STREG %r25,TASK_PT_IASQ1(%r1) |
1851 | |
1852 | /* XXX W bit??? */ |
1853 | /* Now if old D bit is clear, it means we didn't save all registers |
1854 | * on syscall entry, so do that now. This only happens on TRACEME |
1855 | * calls, or if someone attached to us while we were on a syscall. |
1856 | * We could make this more efficient by not saving r3-r18, but |
1857 | * then we wouldn't be able to use the common intr_restore path. |
1858 | * It is only for traced processes anyway, so performance is not |
1859 | * an issue. |
1860 | */ |
1861 | bb,< %r2,30,pt_regs_ok /* Branch if D set */ |
1862 | ldo TASK_REGS(%r1),%r25 |
1863 | reg_save %r25 /* Save r3 to r18 */ |
1864 | |
1865 | /* Save the current sr */ |
1866 | mfsp %sr0,%r2 |
1867 | STREG %r2,TASK_PT_SR0(%r1) |
1868 | |
1869 | /* Save the scratch sr */ |
1870 | mfsp %sr1,%r2 |
1871 | STREG %r2,TASK_PT_SR1(%r1) |
1872 | |
1873 | /* sr2 should be set to zero for userspace syscalls */ |
1874 | STREG %r0,TASK_PT_SR2(%r1) |
1875 | |
1876 | LDREG TASK_PT_GR31(%r1),%r2 |
1877 | depi PRIV_USER,31,2,%r2 /* ensure return to user mode. */ |
1878 | STREG %r2,TASK_PT_IAOQ0(%r1) |
1879 | ldo 4(%r2),%r2 |
1880 | STREG %r2,TASK_PT_IAOQ1(%r1) |
1881 | b intr_restore |
1882 | copy %r25,%r16 |
1883 | |
1884 | pt_regs_ok: |
1885 | LDREG TASK_PT_IAOQ0(%r1),%r2 |
1886 | depi PRIV_USER,31,2,%r2 /* ensure return to user mode. */ |
1887 | STREG %r2,TASK_PT_IAOQ0(%r1) |
1888 | LDREG TASK_PT_IAOQ1(%r1),%r2 |
1889 | depi PRIV_USER,31,2,%r2 |
1890 | STREG %r2,TASK_PT_IAOQ1(%r1) |
1891 | b intr_restore |
1892 | copy %r25,%r16 |
1893 | |
1894 | syscall_do_resched: |
1895 | load32 syscall_check_resched,%r2 /* if resched, we start over again */ |
1896 | load32 schedule,%r19 |
1897 | bv %r0(%r19) /* jumps to schedule() */ |
1898 | #ifdef CONFIG_64BIT |
1899 | ldo -16(%r30),%r29 /* Reference param save area */ |
1900 | #else |
1901 | nop |
1902 | #endif |
1903 | END(syscall_exit) |
1904 | |
1905 | |
1906 | #ifdef CONFIG_FUNCTION_TRACER |
1907 | |
1908 | .import ftrace_function_trampoline,code |
1909 | .align L1_CACHE_BYTES |
1910 | ENTRY_CFI(mcount, caller) |
1911 | _mcount: |
1912 | .export _mcount,data |
1913 | /* |
1914 | * The 64bit mcount() function pointer needs 4 dwords, of which the |
1915 | * first two are free. We optimize it here and put 2 instructions for |
1916 | * calling mcount(), and 2 instructions for ftrace_stub(). That way we |
1917 | * have all on one L1 cacheline. |
1918 | */ |
1919 | ldi 0, %arg3 |
1920 | b ftrace_function_trampoline |
1921 | copy %r3, %arg2 /* caller original %sp */ |
1922 | ftrace_stub: |
1923 | .globl ftrace_stub |
1924 | .type ftrace_stub, @function |
1925 | #ifdef CONFIG_64BIT |
1926 | bve (%rp) |
1927 | #else |
1928 | bv %r0(%rp) |
1929 | #endif |
1930 | nop |
1931 | #ifdef CONFIG_64BIT |
1932 | .dword mcount |
1933 | .dword 0 /* code in head.S puts value of global gp here */ |
1934 | #endif |
1935 | ENDPROC_CFI(mcount) |
1936 | |
1937 | #ifdef CONFIG_DYNAMIC_FTRACE |
1938 | |
1939 | #ifdef CONFIG_64BIT |
1940 | #define FTRACE_FRAME_SIZE (2*FRAME_SIZE) |
1941 | #else |
1942 | #define FTRACE_FRAME_SIZE FRAME_SIZE |
1943 | #endif |
1944 | ENTRY_CFI(ftrace_caller, caller,frame=FTRACE_FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP) |
1945 | ftrace_caller: |
1946 | .global ftrace_caller |
1947 | |
1948 | STREG %r3, -FTRACE_FRAME_SIZE+1*REG_SZ(%sp) |
1949 | ldo -FTRACE_FRAME_SIZE(%sp), %r3 |
1950 | STREG %rp, -RP_OFFSET(%r3) |
1951 | |
1952 | /* Offset 0 is already allocated for %r1 */ |
1953 | STREG %r23, 2*REG_SZ(%r3) |
1954 | STREG %r24, 3*REG_SZ(%r3) |
1955 | STREG %r25, 4*REG_SZ(%r3) |
1956 | STREG %r26, 5*REG_SZ(%r3) |
1957 | STREG %r28, 6*REG_SZ(%r3) |
1958 | STREG %r29, 7*REG_SZ(%r3) |
1959 | #ifdef CONFIG_64BIT |
1960 | STREG %r19, 8*REG_SZ(%r3) |
1961 | STREG %r20, 9*REG_SZ(%r3) |
1962 | STREG %r21, 10*REG_SZ(%r3) |
1963 | STREG %r22, 11*REG_SZ(%r3) |
1964 | STREG %r27, 12*REG_SZ(%r3) |
1965 | STREG %r31, 13*REG_SZ(%r3) |
1966 | loadgp |
1967 | ldo -16(%sp),%r29 |
1968 | #endif |
1969 | LDREG 0(%r3), %r25 |
1970 | copy %rp, %r26 |
1971 | ldo -8(%r25), %r25 |
1972 | ldi 0, %r23 /* no pt_regs */ |
1973 | b,l ftrace_function_trampoline, %rp |
1974 | copy %r3, %r24 |
1975 | |
1976 | LDREG -RP_OFFSET(%r3), %rp |
1977 | LDREG 2*REG_SZ(%r3), %r23 |
1978 | LDREG 3*REG_SZ(%r3), %r24 |
1979 | LDREG 4*REG_SZ(%r3), %r25 |
1980 | LDREG 5*REG_SZ(%r3), %r26 |
1981 | LDREG 6*REG_SZ(%r3), %r28 |
1982 | LDREG 7*REG_SZ(%r3), %r29 |
1983 | #ifdef CONFIG_64BIT |
1984 | LDREG 8*REG_SZ(%r3), %r19 |
1985 | LDREG 9*REG_SZ(%r3), %r20 |
1986 | LDREG 10*REG_SZ(%r3), %r21 |
1987 | LDREG 11*REG_SZ(%r3), %r22 |
1988 | LDREG 12*REG_SZ(%r3), %r27 |
1989 | LDREG 13*REG_SZ(%r3), %r31 |
1990 | #endif |
1991 | LDREG 1*REG_SZ(%r3), %r3 |
1992 | |
1993 | LDREGM -FTRACE_FRAME_SIZE(%sp), %r1 |
1994 | /* Adjust return point to jump back to beginning of traced function */ |
1995 | ldo -4(%r1), %r1 |
1996 | bv,n (%r1) |
1997 | |
1998 | ENDPROC_CFI(ftrace_caller) |
1999 | |
2000 | #ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS |
2001 | ENTRY_CFI(ftrace_regs_caller,caller,frame=FTRACE_FRAME_SIZE+PT_SZ_ALGN, |
2002 | CALLS,SAVE_RP,SAVE_SP) |
2003 | ftrace_regs_caller: |
2004 | .global ftrace_regs_caller |
2005 | |
2006 | ldo -FTRACE_FRAME_SIZE(%sp), %r1 |
2007 | STREG %rp, -RP_OFFSET(%r1) |
2008 | |
2009 | copy %sp, %r1 |
2010 | ldo PT_SZ_ALGN(%sp), %sp |
2011 | |
2012 | STREG %rp, PT_GR2(%r1) |
2013 | STREG %r3, PT_GR3(%r1) |
2014 | STREG %r4, PT_GR4(%r1) |
2015 | STREG %r5, PT_GR5(%r1) |
2016 | STREG %r6, PT_GR6(%r1) |
2017 | STREG %r7, PT_GR7(%r1) |
2018 | STREG %r8, PT_GR8(%r1) |
2019 | STREG %r9, PT_GR9(%r1) |
2020 | STREG %r10, PT_GR10(%r1) |
2021 | STREG %r11, PT_GR11(%r1) |
2022 | STREG %r12, PT_GR12(%r1) |
2023 | STREG %r13, PT_GR13(%r1) |
2024 | STREG %r14, PT_GR14(%r1) |
2025 | STREG %r15, PT_GR15(%r1) |
2026 | STREG %r16, PT_GR16(%r1) |
2027 | STREG %r17, PT_GR17(%r1) |
2028 | STREG %r18, PT_GR18(%r1) |
2029 | STREG %r19, PT_GR19(%r1) |
2030 | STREG %r20, PT_GR20(%r1) |
2031 | STREG %r21, PT_GR21(%r1) |
2032 | STREG %r22, PT_GR22(%r1) |
2033 | STREG %r23, PT_GR23(%r1) |
2034 | STREG %r24, PT_GR24(%r1) |
2035 | STREG %r25, PT_GR25(%r1) |
2036 | STREG %r26, PT_GR26(%r1) |
2037 | STREG %r27, PT_GR27(%r1) |
2038 | STREG %r28, PT_GR28(%r1) |
2039 | STREG %r29, PT_GR29(%r1) |
2040 | STREG %r30, PT_GR30(%r1) |
2041 | STREG %r31, PT_GR31(%r1) |
2042 | mfctl %cr11, %r26 |
2043 | STREG %r26, PT_SAR(%r1) |
2044 | |
2045 | copy %rp, %r26 |
2046 | LDREG -FTRACE_FRAME_SIZE-PT_SZ_ALGN(%sp), %r25 |
2047 | ldo -8(%r25), %r25 |
2048 | ldo -FTRACE_FRAME_SIZE(%r1), %arg2 |
2049 | b,l ftrace_function_trampoline, %rp |
2050 | copy %r1, %arg3 /* struct pt_regs */ |
2051 | |
2052 | ldo -PT_SZ_ALGN(%sp), %r1 |
2053 | |
2054 | LDREG PT_SAR(%r1), %rp |
2055 | mtctl %rp, %cr11 |
2056 | |
2057 | LDREG PT_GR2(%r1), %rp |
2058 | LDREG PT_GR3(%r1), %r3 |
2059 | LDREG PT_GR4(%r1), %r4 |
2060 | LDREG PT_GR5(%r1), %r5 |
2061 | LDREG PT_GR6(%r1), %r6 |
2062 | LDREG PT_GR7(%r1), %r7 |
2063 | LDREG PT_GR8(%r1), %r8 |
2064 | LDREG PT_GR9(%r1), %r9 |
2065 | LDREG PT_GR10(%r1),%r10 |
2066 | LDREG PT_GR11(%r1),%r11 |
2067 | LDREG PT_GR12(%r1),%r12 |
2068 | LDREG PT_GR13(%r1),%r13 |
2069 | LDREG PT_GR14(%r1),%r14 |
2070 | LDREG PT_GR15(%r1),%r15 |
2071 | LDREG PT_GR16(%r1),%r16 |
2072 | LDREG PT_GR17(%r1),%r17 |
2073 | LDREG PT_GR18(%r1),%r18 |
2074 | LDREG PT_GR19(%r1),%r19 |
2075 | LDREG PT_GR20(%r1),%r20 |
2076 | LDREG PT_GR21(%r1),%r21 |
2077 | LDREG PT_GR22(%r1),%r22 |
2078 | LDREG PT_GR23(%r1),%r23 |
2079 | LDREG PT_GR24(%r1),%r24 |
2080 | LDREG PT_GR25(%r1),%r25 |
2081 | LDREG PT_GR26(%r1),%r26 |
2082 | LDREG PT_GR27(%r1),%r27 |
2083 | LDREG PT_GR28(%r1),%r28 |
2084 | LDREG PT_GR29(%r1),%r29 |
2085 | LDREG PT_GR30(%r1),%r30 |
2086 | LDREG PT_GR31(%r1),%r31 |
2087 | |
2088 | ldo -PT_SZ_ALGN(%sp), %sp |
2089 | LDREGM -FTRACE_FRAME_SIZE(%sp), %r1 |
2090 | /* Adjust return point to jump back to beginning of traced function */ |
2091 | ldo -4(%r1), %r1 |
2092 | bv,n (%r1) |
2093 | |
2094 | ENDPROC_CFI(ftrace_regs_caller) |
2095 | |
2096 | #endif |
2097 | #endif |
2098 | |
2099 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
2100 | .align 8 |
2101 | ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE) |
2102 | .export parisc_return_to_handler,data |
2103 | parisc_return_to_handler: |
2104 | copy %r3,%r1 |
2105 | STREG %r0,-RP_OFFSET(%sp) /* store 0 as %rp */ |
2106 | copy %sp,%r3 |
2107 | STREGM %r1,FRAME_SIZE(%sp) |
2108 | STREG %ret0,8(%r3) |
2109 | STREG %ret1,16(%r3) |
2110 | |
2111 | #ifdef CONFIG_64BIT |
2112 | loadgp |
2113 | #endif |
2114 | |
2115 | /* call ftrace_return_to_handler(0) */ |
2116 | .import ftrace_return_to_handler,code |
2117 | load32 ftrace_return_to_handler,%ret0 |
2118 | load32 .Lftrace_ret,%r2 |
2119 | #ifdef CONFIG_64BIT |
2120 | ldo -16(%sp),%ret1 /* Reference param save area */ |
2121 | bve (%ret0) |
2122 | #else |
2123 | bv %r0(%ret0) |
2124 | #endif |
2125 | ldi 0,%r26 |
2126 | .Lftrace_ret: |
2127 | copy %ret0,%rp |
2128 | |
2129 | /* restore original return values */ |
2130 | LDREG 8(%r3),%ret0 |
2131 | LDREG 16(%r3),%ret1 |
2132 | |
2133 | /* return from function */ |
2134 | #ifdef CONFIG_64BIT |
2135 | bve (%rp) |
2136 | #else |
2137 | bv %r0(%rp) |
2138 | #endif |
2139 | LDREGM -FRAME_SIZE(%sp),%r3 |
2140 | ENDPROC_CFI(return_to_handler) |
2141 | |
2142 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
2143 | |
2144 | #endif /* CONFIG_FUNCTION_TRACER */ |
2145 | |
2146 | #ifdef CONFIG_IRQSTACKS |
2147 | /* void call_on_stack(unsigned long param1, void *func, |
2148 | unsigned long new_stack) */ |
2149 | ENTRY_CFI(call_on_stack, FRAME=2*FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP) |
2150 | ENTRY(_call_on_stack) |
2151 | copy %sp, %r1 |
2152 | |
2153 | /* Regarding the HPPA calling conventions for function pointers, |
2154 | we assume the PIC register is not changed across call. For |
2155 | CONFIG_64BIT, the argument pointer is left to point at the |
2156 | argument region allocated for the call to call_on_stack. */ |
2157 | |
2158 | /* Switch to new stack. We allocate two frames. */ |
2159 | ldo 2*FRAME_SIZE(%arg2), %sp |
2160 | # ifdef CONFIG_64BIT |
2161 | /* Save previous stack pointer and return pointer in frame marker */ |
2162 | STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp) |
2163 | /* Calls always use function descriptor */ |
2164 | LDREG 16(%arg1), %arg1 |
2165 | bve,l (%arg1), %rp |
2166 | STREG %r1, -FRAME_SIZE-REG_SZ(%sp) |
2167 | LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp |
2168 | bve (%rp) |
2169 | LDREG -FRAME_SIZE-REG_SZ(%sp), %sp |
2170 | # else |
2171 | /* Save previous stack pointer and return pointer in frame marker */ |
2172 | STREG %r1, -FRAME_SIZE-REG_SZ(%sp) |
2173 | STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp) |
2174 | /* Calls use function descriptor if PLABEL bit is set */ |
2175 | bb,>=,n %arg1, 30, 1f |
2176 | depwi 0,31,2, %arg1 |
2177 | LDREG 0(%arg1), %arg1 |
2178 | 1: |
2179 | be,l 0(%sr4,%arg1), %sr0, %r31 |
2180 | copy %r31, %rp |
2181 | LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp |
2182 | bv (%rp) |
2183 | LDREG -FRAME_SIZE-REG_SZ(%sp), %sp |
2184 | # endif /* CONFIG_64BIT */ |
2185 | ENDPROC_CFI(call_on_stack) |
2186 | #endif /* CONFIG_IRQSTACKS */ |
2187 | |
2188 | ENTRY_CFI(get_register) |
2189 | /* |
2190 | * get_register is used by the non access tlb miss handlers to |
2191 | * copy the value of the general register specified in r8 into |
2192 | * r1. This routine can't be used for shadowed registers, since |
2193 | * the rfir will restore the original value. So, for the shadowed |
2194 | * registers we put a -1 into r1 to indicate that the register |
2195 | * should not be used (the register being copied could also have |
2196 | * a -1 in it, but that is OK, it just means that we will have |
2197 | * to use the slow path instead). |
2198 | */ |
2199 | blr %r8,%r0 |
2200 | nop |
2201 | bv %r0(%r25) /* r0 */ |
2202 | copy %r0,%r1 |
2203 | bv %r0(%r25) /* r1 - shadowed */ |
2204 | ldi -1,%r1 |
2205 | bv %r0(%r25) /* r2 */ |
2206 | copy %r2,%r1 |
2207 | bv %r0(%r25) /* r3 */ |
2208 | copy %r3,%r1 |
2209 | bv %r0(%r25) /* r4 */ |
2210 | copy %r4,%r1 |
2211 | bv %r0(%r25) /* r5 */ |
2212 | copy %r5,%r1 |
2213 | bv %r0(%r25) /* r6 */ |
2214 | copy %r6,%r1 |
2215 | bv %r0(%r25) /* r7 */ |
2216 | copy %r7,%r1 |
2217 | bv %r0(%r25) /* r8 - shadowed */ |
2218 | ldi -1,%r1 |
2219 | bv %r0(%r25) /* r9 - shadowed */ |
2220 | ldi -1,%r1 |
2221 | bv %r0(%r25) /* r10 */ |
2222 | copy %r10,%r1 |
2223 | bv %r0(%r25) /* r11 */ |
2224 | copy %r11,%r1 |
2225 | bv %r0(%r25) /* r12 */ |
2226 | copy %r12,%r1 |
2227 | bv %r0(%r25) /* r13 */ |
2228 | copy %r13,%r1 |
2229 | bv %r0(%r25) /* r14 */ |
2230 | copy %r14,%r1 |
2231 | bv %r0(%r25) /* r15 */ |
2232 | copy %r15,%r1 |
2233 | bv %r0(%r25) /* r16 - shadowed */ |
2234 | ldi -1,%r1 |
2235 | bv %r0(%r25) /* r17 - shadowed */ |
2236 | ldi -1,%r1 |
2237 | bv %r0(%r25) /* r18 */ |
2238 | copy %r18,%r1 |
2239 | bv %r0(%r25) /* r19 */ |
2240 | copy %r19,%r1 |
2241 | bv %r0(%r25) /* r20 */ |
2242 | copy %r20,%r1 |
2243 | bv %r0(%r25) /* r21 */ |
2244 | copy %r21,%r1 |
2245 | bv %r0(%r25) /* r22 */ |
2246 | copy %r22,%r1 |
2247 | bv %r0(%r25) /* r23 */ |
2248 | copy %r23,%r1 |
2249 | bv %r0(%r25) /* r24 - shadowed */ |
2250 | ldi -1,%r1 |
2251 | bv %r0(%r25) /* r25 - shadowed */ |
2252 | ldi -1,%r1 |
2253 | bv %r0(%r25) /* r26 */ |
2254 | copy %r26,%r1 |
2255 | bv %r0(%r25) /* r27 */ |
2256 | copy %r27,%r1 |
2257 | bv %r0(%r25) /* r28 */ |
2258 | copy %r28,%r1 |
2259 | bv %r0(%r25) /* r29 */ |
2260 | copy %r29,%r1 |
2261 | bv %r0(%r25) /* r30 */ |
2262 | copy %r30,%r1 |
2263 | bv %r0(%r25) /* r31 */ |
2264 | copy %r31,%r1 |
2265 | ENDPROC_CFI(get_register) |
2266 | |
2267 | |
2268 | ENTRY_CFI(set_register) |
2269 | /* |
2270 | * set_register is used by the non access tlb miss handlers to |
2271 | * copy the value of r1 into the general register specified in |
2272 | * r8. |
2273 | */ |
2274 | blr %r8,%r0 |
2275 | nop |
2276 | bv %r0(%r25) /* r0 (silly, but it is a place holder) */ |
2277 | copy %r1,%r0 |
2278 | bv %r0(%r25) /* r1 */ |
2279 | copy %r1,%r1 |
2280 | bv %r0(%r25) /* r2 */ |
2281 | copy %r1,%r2 |
2282 | bv %r0(%r25) /* r3 */ |
2283 | copy %r1,%r3 |
2284 | bv %r0(%r25) /* r4 */ |
2285 | copy %r1,%r4 |
2286 | bv %r0(%r25) /* r5 */ |
2287 | copy %r1,%r5 |
2288 | bv %r0(%r25) /* r6 */ |
2289 | copy %r1,%r6 |
2290 | bv %r0(%r25) /* r7 */ |
2291 | copy %r1,%r7 |
2292 | bv %r0(%r25) /* r8 */ |
2293 | copy %r1,%r8 |
2294 | bv %r0(%r25) /* r9 */ |
2295 | copy %r1,%r9 |
2296 | bv %r0(%r25) /* r10 */ |
2297 | copy %r1,%r10 |
2298 | bv %r0(%r25) /* r11 */ |
2299 | copy %r1,%r11 |
2300 | bv %r0(%r25) /* r12 */ |
2301 | copy %r1,%r12 |
2302 | bv %r0(%r25) /* r13 */ |
2303 | copy %r1,%r13 |
2304 | bv %r0(%r25) /* r14 */ |
2305 | copy %r1,%r14 |
2306 | bv %r0(%r25) /* r15 */ |
2307 | copy %r1,%r15 |
2308 | bv %r0(%r25) /* r16 */ |
2309 | copy %r1,%r16 |
2310 | bv %r0(%r25) /* r17 */ |
2311 | copy %r1,%r17 |
2312 | bv %r0(%r25) /* r18 */ |
2313 | copy %r1,%r18 |
2314 | bv %r0(%r25) /* r19 */ |
2315 | copy %r1,%r19 |
2316 | bv %r0(%r25) /* r20 */ |
2317 | copy %r1,%r20 |
2318 | bv %r0(%r25) /* r21 */ |
2319 | copy %r1,%r21 |
2320 | bv %r0(%r25) /* r22 */ |
2321 | copy %r1,%r22 |
2322 | bv %r0(%r25) /* r23 */ |
2323 | copy %r1,%r23 |
2324 | bv %r0(%r25) /* r24 */ |
2325 | copy %r1,%r24 |
2326 | bv %r0(%r25) /* r25 */ |
2327 | copy %r1,%r25 |
2328 | bv %r0(%r25) /* r26 */ |
2329 | copy %r1,%r26 |
2330 | bv %r0(%r25) /* r27 */ |
2331 | copy %r1,%r27 |
2332 | bv %r0(%r25) /* r28 */ |
2333 | copy %r1,%r28 |
2334 | bv %r0(%r25) /* r29 */ |
2335 | copy %r1,%r29 |
2336 | bv %r0(%r25) /* r30 */ |
2337 | copy %r1,%r30 |
2338 | bv %r0(%r25) /* r31 */ |
2339 | copy %r1,%r31 |
2340 | ENDPROC_CFI(set_register) |
2341 | |
2342 | |