1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. |
5 | * |
6 | * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle |
7 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. |
8 | * Copyright (C) 2002, 2007 Maciej W. Rozycki |
9 | * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved. |
10 | */ |
11 | #include <linux/init.h> |
12 | |
13 | #include <asm/asm.h> |
14 | #include <asm/asmmacro.h> |
15 | #include <asm/cacheops.h> |
16 | #include <asm/irqflags.h> |
17 | #include <asm/regdef.h> |
18 | #include <asm/fpregdef.h> |
19 | #include <asm/mipsregs.h> |
20 | #include <asm/stackframe.h> |
21 | #include <asm/sync.h> |
22 | #include <asm/thread_info.h> |
23 | |
24 | __INIT |
25 | |
26 | /* |
27 | * General exception vector for all other CPUs. |
28 | * |
29 | * Be careful when changing this, it has to be at most 128 bytes |
30 | * to fit into space reserved for the exception handler. |
31 | */ |
32 | NESTED(except_vec3_generic, 0, sp) |
33 | .set push |
34 | .set noat |
35 | mfc0 k1, CP0_CAUSE |
36 | andi k1, k1, 0x7c |
37 | #ifdef CONFIG_64BIT |
38 | dsll k1, k1, 1 |
39 | #endif |
40 | PTR_L k0, exception_handlers(k1) |
41 | jr k0 |
42 | .set pop |
43 | END(except_vec3_generic) |
44 | |
45 | /* |
46 | * General exception handler for CPUs with virtual coherency exception. |
47 | * |
48 | * Be careful when changing this, it has to be at most 256 (as a special |
49 | * exception) bytes to fit into space reserved for the exception handler. |
50 | */ |
51 | NESTED(except_vec3_r4000, 0, sp) |
52 | .set push |
53 | .set arch=r4000 |
54 | .set noat |
55 | mfc0 k1, CP0_CAUSE |
56 | li k0, 31<<2 |
57 | andi k1, k1, 0x7c |
58 | .set push |
59 | .set noreorder |
60 | .set nomacro |
61 | beq k1, k0, handle_vced |
62 | li k0, 14<<2 |
63 | beq k1, k0, handle_vcei |
64 | #ifdef CONFIG_64BIT |
65 | dsll k1, k1, 1 |
66 | #endif |
67 | .set pop |
68 | PTR_L k0, exception_handlers(k1) |
69 | jr k0 |
70 | |
71 | /* |
72 | * Big shit, we now may have two dirty primary cache lines for the same |
73 | * physical address. We can safely invalidate the line pointed to by |
74 | * c0_badvaddr because after return from this exception handler the |
75 | * load / store will be re-executed. |
76 | */ |
77 | handle_vced: |
78 | MFC0 k0, CP0_BADVADDR |
79 | li k1, -4 # Is this ... |
80 | and k0, k1 # ... really needed? |
81 | mtc0 zero, CP0_TAGLO |
82 | cache Index_Store_Tag_D, (k0) |
83 | cache Hit_Writeback_Inv_SD, (k0) |
84 | #ifdef CONFIG_PROC_FS |
85 | PTR_LA k0, vced_count |
86 | lw k1, (k0) |
87 | addiu k1, 1 |
88 | sw k1, (k0) |
89 | #endif |
90 | eret |
91 | |
92 | handle_vcei: |
93 | MFC0 k0, CP0_BADVADDR |
94 | cache Hit_Writeback_Inv_SD, (k0) # also cleans pi |
95 | #ifdef CONFIG_PROC_FS |
96 | PTR_LA k0, vcei_count |
97 | lw k1, (k0) |
98 | addiu k1, 1 |
99 | sw k1, (k0) |
100 | #endif |
101 | eret |
102 | .set pop |
103 | END(except_vec3_r4000) |
104 | |
105 | __FINIT |
106 | |
107 | .align 5 /* 32 byte rollback region */ |
108 | LEAF(__r4k_wait) |
109 | .set push |
110 | .set noreorder |
111 | /* start of rollback region */ |
112 | LONG_L t0, TI_FLAGS($28) |
113 | nop |
114 | andi t0, _TIF_NEED_RESCHED |
115 | bnez t0, 1f |
116 | nop |
117 | nop |
118 | nop |
119 | #ifdef CONFIG_CPU_MICROMIPS |
120 | nop |
121 | nop |
122 | nop |
123 | nop |
124 | #endif |
125 | .set MIPS_ISA_ARCH_LEVEL_RAW |
126 | wait |
127 | /* end of rollback region (the region size must be power of two) */ |
128 | 1: |
129 | jr ra |
130 | nop |
131 | .set pop |
132 | END(__r4k_wait) |
133 | |
134 | .macro BUILD_ROLLBACK_PROLOGUE handler |
135 | FEXPORT(rollback_\handler) |
136 | .set push |
137 | .set noat |
138 | MFC0 k0, CP0_EPC |
139 | PTR_LA k1, __r4k_wait |
140 | ori k0, 0x1f /* 32 byte rollback region */ |
141 | xori k0, 0x1f |
142 | bne k0, k1, \handler |
143 | MTC0 k0, CP0_EPC |
144 | .set pop |
145 | .endm |
146 | |
147 | .align 5 |
148 | BUILD_ROLLBACK_PROLOGUE handle_int |
149 | NESTED(handle_int, PT_SIZE, sp) |
150 | .cfi_signal_frame |
151 | #ifdef CONFIG_TRACE_IRQFLAGS |
152 | /* |
153 | * Check to see if the interrupted code has just disabled |
154 | * interrupts and ignore this interrupt for now if so. |
155 | * |
156 | * local_irq_disable() disables interrupts and then calls |
157 | * trace_hardirqs_off() to track the state. If an interrupt is taken |
158 | * after interrupts are disabled but before the state is updated |
159 | * it will appear to restore_all that it is incorrectly returning with |
160 | * interrupts disabled |
161 | */ |
162 | .set push |
163 | .set noat |
164 | mfc0 k0, CP0_STATUS |
165 | #if defined(CONFIG_CPU_R3000) |
166 | and k0, ST0_IEP |
167 | bnez k0, 1f |
168 | |
169 | mfc0 k0, CP0_EPC |
170 | .set noreorder |
171 | j k0 |
172 | rfe |
173 | #else |
174 | and k0, ST0_IE |
175 | bnez k0, 1f |
176 | |
177 | eret |
178 | #endif |
179 | 1: |
180 | .set pop |
181 | #endif |
182 | SAVE_ALL docfi=1 |
183 | CLI |
184 | TRACE_IRQS_OFF |
185 | |
186 | LONG_L s0, TI_REGS($28) |
187 | LONG_S sp, TI_REGS($28) |
188 | |
189 | /* |
190 | * SAVE_ALL ensures we are using a valid kernel stack for the thread. |
191 | * Check if we are already using the IRQ stack. |
192 | */ |
193 | move s1, sp # Preserve the sp |
194 | |
195 | /* Get IRQ stack for this CPU */ |
196 | ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG |
197 | #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) |
198 | lui k1, %hi(irq_stack) |
199 | #else |
200 | lui k1, %highest(irq_stack) |
201 | daddiu k1, %higher(irq_stack) |
202 | dsll k1, 16 |
203 | daddiu k1, %hi(irq_stack) |
204 | dsll k1, 16 |
205 | #endif |
206 | LONG_SRL k0, SMP_CPUID_PTRSHIFT |
207 | LONG_ADDU k1, k0 |
208 | LONG_L t0, %lo(irq_stack)(k1) |
209 | |
210 | # Check if already on IRQ stack |
211 | PTR_LI t1, ~(_THREAD_SIZE-1) |
212 | and t1, t1, sp |
213 | beq t0, t1, 2f |
214 | |
215 | /* Switch to IRQ stack */ |
216 | li t1, _IRQ_STACK_START |
217 | PTR_ADD sp, t0, t1 |
218 | |
219 | /* Save task's sp on IRQ stack so that unwinding can follow it */ |
220 | LONG_S s1, 0(sp) |
221 | 2: |
222 | jal plat_irq_dispatch |
223 | |
224 | /* Restore sp */ |
225 | move sp, s1 |
226 | |
227 | j ret_from_irq |
228 | #ifdef CONFIG_CPU_MICROMIPS |
229 | nop |
230 | #endif |
231 | END(handle_int) |
232 | |
233 | __INIT |
234 | |
235 | /* |
236 | * Special interrupt vector for MIPS64 ISA & embedded MIPS processors. |
237 | * This is a dedicated interrupt exception vector which reduces the |
238 | * interrupt processing overhead. The jump instruction will be replaced |
239 | * at the initialization time. |
240 | * |
241 | * Be careful when changing this, it has to be at most 128 bytes |
242 | * to fit into space reserved for the exception handler. |
243 | */ |
244 | NESTED(except_vec4, 0, sp) |
245 | 1: j 1b /* Dummy, will be replaced */ |
246 | END(except_vec4) |
247 | |
248 | /* |
249 | * EJTAG debug exception handler. |
250 | * The EJTAG debug exception entry point is 0xbfc00480, which |
251 | * normally is in the boot PROM, so the boot PROM must do an |
252 | * unconditional jump to this vector. |
253 | */ |
254 | NESTED(except_vec_ejtag_debug, 0, sp) |
255 | j ejtag_debug_handler |
256 | #ifdef CONFIG_CPU_MICROMIPS |
257 | nop |
258 | #endif |
259 | END(except_vec_ejtag_debug) |
260 | |
261 | __FINIT |
262 | |
263 | /* |
264 | * Vectored interrupt handler. |
265 | * This prototype is copied to ebase + n*IntCtl.VS and patched |
266 | * to invoke the handler |
267 | */ |
268 | BUILD_ROLLBACK_PROLOGUE except_vec_vi |
269 | NESTED(except_vec_vi, 0, sp) |
270 | SAVE_SOME docfi=1 |
271 | SAVE_AT docfi=1 |
272 | .set push |
273 | .set noreorder |
274 | PTR_LA v1, except_vec_vi_handler |
275 | FEXPORT(except_vec_vi_lui) |
276 | lui v0, 0 /* Patched */ |
277 | jr v1 |
278 | FEXPORT(except_vec_vi_ori) |
279 | ori v0, 0 /* Patched */ |
280 | .set pop |
281 | END(except_vec_vi) |
282 | EXPORT(except_vec_vi_end) |
283 | |
284 | /* |
285 | * Common Vectored Interrupt code |
286 | * Complete the register saves and invoke the handler which is passed in $v0 |
287 | */ |
288 | NESTED(except_vec_vi_handler, 0, sp) |
289 | SAVE_TEMP |
290 | SAVE_STATIC |
291 | CLI |
292 | #ifdef CONFIG_TRACE_IRQFLAGS |
293 | move s0, v0 |
294 | TRACE_IRQS_OFF |
295 | move v0, s0 |
296 | #endif |
297 | |
298 | LONG_L s0, TI_REGS($28) |
299 | LONG_S sp, TI_REGS($28) |
300 | |
301 | /* |
302 | * SAVE_ALL ensures we are using a valid kernel stack for the thread. |
303 | * Check if we are already using the IRQ stack. |
304 | */ |
305 | move s1, sp # Preserve the sp |
306 | |
307 | /* Get IRQ stack for this CPU */ |
308 | ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG |
309 | #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) |
310 | lui k1, %hi(irq_stack) |
311 | #else |
312 | lui k1, %highest(irq_stack) |
313 | daddiu k1, %higher(irq_stack) |
314 | dsll k1, 16 |
315 | daddiu k1, %hi(irq_stack) |
316 | dsll k1, 16 |
317 | #endif |
318 | LONG_SRL k0, SMP_CPUID_PTRSHIFT |
319 | LONG_ADDU k1, k0 |
320 | LONG_L t0, %lo(irq_stack)(k1) |
321 | |
322 | # Check if already on IRQ stack |
323 | PTR_LI t1, ~(_THREAD_SIZE-1) |
324 | and t1, t1, sp |
325 | beq t0, t1, 2f |
326 | |
327 | /* Switch to IRQ stack */ |
328 | li t1, _IRQ_STACK_START |
329 | PTR_ADD sp, t0, t1 |
330 | |
331 | /* Save task's sp on IRQ stack so that unwinding can follow it */ |
332 | LONG_S s1, 0(sp) |
333 | 2: |
334 | jalr v0 |
335 | |
336 | /* Restore sp */ |
337 | move sp, s1 |
338 | |
339 | j ret_from_irq |
340 | END(except_vec_vi_handler) |
341 | |
342 | /* |
343 | * EJTAG debug exception handler. |
344 | */ |
345 | NESTED(ejtag_debug_handler, PT_SIZE, sp) |
346 | .set push |
347 | .set noat |
348 | MTC0 k0, CP0_DESAVE |
349 | mfc0 k0, CP0_DEBUG |
350 | |
351 | andi k0, k0, MIPS_DEBUG_DBP # Check for SDBBP. |
352 | beqz k0, ejtag_return |
353 | |
354 | #ifdef CONFIG_SMP |
355 | 1: PTR_LA k0, ejtag_debug_buffer_spinlock |
356 | __SYNC(full, loongson3_war) |
357 | 2: ll k0, 0(k0) |
358 | bnez k0, 2b |
359 | PTR_LA k0, ejtag_debug_buffer_spinlock |
360 | sc k0, 0(k0) |
361 | beqz k0, 1b |
362 | # ifdef CONFIG_WEAK_REORDERING_BEYOND_LLSC |
363 | sync |
364 | # endif |
365 | |
366 | PTR_LA k0, ejtag_debug_buffer |
367 | LONG_S k1, 0(k0) |
368 | |
369 | ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG |
370 | PTR_SRL k1, SMP_CPUID_PTRSHIFT |
371 | PTR_SLL k1, LONGLOG |
372 | PTR_LA k0, ejtag_debug_buffer_per_cpu |
373 | PTR_ADDU k0, k1 |
374 | |
375 | PTR_LA k1, ejtag_debug_buffer |
376 | LONG_L k1, 0(k1) |
377 | LONG_S k1, 0(k0) |
378 | |
379 | PTR_LA k0, ejtag_debug_buffer_spinlock |
380 | sw zero, 0(k0) |
381 | #else |
382 | PTR_LA k0, ejtag_debug_buffer |
383 | LONG_S k1, 0(k0) |
384 | #endif |
385 | |
386 | SAVE_ALL |
387 | move a0, sp |
388 | jal ejtag_exception_handler |
389 | RESTORE_ALL |
390 | |
391 | #ifdef CONFIG_SMP |
392 | ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG |
393 | PTR_SRL k1, SMP_CPUID_PTRSHIFT |
394 | PTR_SLL k1, LONGLOG |
395 | PTR_LA k0, ejtag_debug_buffer_per_cpu |
396 | PTR_ADDU k0, k1 |
397 | LONG_L k1, 0(k0) |
398 | #else |
399 | PTR_LA k0, ejtag_debug_buffer |
400 | LONG_L k1, 0(k0) |
401 | #endif |
402 | |
403 | ejtag_return: |
404 | back_to_back_c0_hazard |
405 | MFC0 k0, CP0_DESAVE |
406 | .set mips32 |
407 | deret |
408 | .set pop |
409 | END(ejtag_debug_handler) |
410 | |
411 | /* |
412 | * This buffer is reserved for the use of the EJTAG debug |
413 | * handler. |
414 | */ |
415 | .data |
416 | EXPORT(ejtag_debug_buffer) |
417 | .fill LONGSIZE |
418 | #ifdef CONFIG_SMP |
419 | EXPORT(ejtag_debug_buffer_spinlock) |
420 | .fill LONGSIZE |
421 | EXPORT(ejtag_debug_buffer_per_cpu) |
422 | .fill LONGSIZE * NR_CPUS |
423 | #endif |
424 | .previous |
425 | |
426 | __INIT |
427 | |
428 | /* |
429 | * NMI debug exception handler for MIPS reference boards. |
430 | * The NMI debug exception entry point is 0xbfc00000, which |
431 | * normally is in the boot PROM, so the boot PROM must do a |
432 | * unconditional jump to this vector. |
433 | */ |
434 | NESTED(except_vec_nmi, 0, sp) |
435 | j nmi_handler |
436 | #ifdef CONFIG_CPU_MICROMIPS |
437 | nop |
438 | #endif |
439 | END(except_vec_nmi) |
440 | |
441 | __FINIT |
442 | |
443 | NESTED(nmi_handler, PT_SIZE, sp) |
444 | .cfi_signal_frame |
445 | .set push |
446 | .set noat |
447 | /* |
448 | * Clear ERL - restore segment mapping |
449 | * Clear BEV - required for page fault exception handler to work |
450 | */ |
451 | mfc0 k0, CP0_STATUS |
452 | ori k0, k0, ST0_EXL |
453 | li k1, ~(ST0_BEV | ST0_ERL) |
454 | and k0, k0, k1 |
455 | mtc0 k0, CP0_STATUS |
456 | _ehb |
457 | SAVE_ALL |
458 | move a0, sp |
459 | jal nmi_exception_handler |
460 | /* nmi_exception_handler never returns */ |
461 | .set pop |
462 | END(nmi_handler) |
463 | |
464 | .macro __build_clear_none |
465 | .endm |
466 | |
467 | .macro __build_clear_sti |
468 | TRACE_IRQS_ON |
469 | STI |
470 | .endm |
471 | |
472 | .macro __build_clear_cli |
473 | CLI |
474 | TRACE_IRQS_OFF |
475 | .endm |
476 | |
477 | .macro __build_clear_fpe |
478 | CLI |
479 | TRACE_IRQS_OFF |
480 | .set push |
481 | /* gas fails to assemble cfc1 for some archs (octeon).*/ \ |
482 | .set mips1 |
483 | .set hardfloat |
484 | cfc1 a1, fcr31 |
485 | .set pop |
486 | .endm |
487 | |
488 | .macro __build_clear_msa_fpe |
489 | CLI |
490 | TRACE_IRQS_OFF |
491 | _cfcmsa a1, MSA_CSR |
492 | .endm |
493 | |
494 | .macro __build_clear_ade |
495 | MFC0 t0, CP0_BADVADDR |
496 | PTR_S t0, PT_BVADDR(sp) |
497 | KMODE |
498 | .endm |
499 | |
500 | .macro __build_clear_gsexc |
501 | .set push |
502 | /* |
503 | * We need to specify a selector to access the CP0.Diag1 (GSCause) |
504 | * register. All GSExc-equipped processors have MIPS32. |
505 | */ |
506 | .set mips32 |
507 | mfc0 a1, CP0_DIAGNOSTIC1 |
508 | .set pop |
509 | TRACE_IRQS_ON |
510 | STI |
511 | .endm |
512 | |
513 | .macro __BUILD_silent exception |
514 | .endm |
515 | |
516 | /* Gas tries to parse the ASM_PRINT argument as a string containing |
517 | string escapes and emits bogus warnings if it believes to |
518 | recognize an unknown escape code. So make the arguments |
519 | start with an n and gas will believe \n is ok ... */ |
520 | .macro __BUILD_verbose nexception |
521 | LONG_L a1, PT_EPC(sp) |
522 | #ifdef CONFIG_32BIT |
523 | ASM_PRINT("Got \nexception at %08lx\012" ) |
524 | #endif |
525 | #ifdef CONFIG_64BIT |
526 | ASM_PRINT("Got \nexception at %016lx\012" ) |
527 | #endif |
528 | .endm |
529 | |
530 | .macro __BUILD_count exception |
531 | LONG_L t0,exception_count_\exception |
532 | LONG_ADDIU t0, 1 |
533 | LONG_S t0,exception_count_\exception |
534 | .comm exception_count\exception, 8, 8 |
535 | .endm |
536 | |
537 | .macro __BUILD_HANDLER exception handler clear verbose ext |
538 | .align 5 |
539 | NESTED(handle_\exception, PT_SIZE, sp) |
540 | .cfi_signal_frame |
541 | .set noat |
542 | SAVE_ALL |
543 | FEXPORT(handle_\exception\ext) |
544 | __build_clear_\clear |
545 | .set at |
546 | __BUILD_\verbose \exception |
547 | move a0, sp |
548 | jal do_\handler |
549 | j ret_from_exception |
550 | END(handle_\exception) |
551 | .endm |
552 | |
553 | .macro BUILD_HANDLER exception handler clear verbose |
554 | __BUILD_HANDLER \exception \handler \clear \verbose _int |
555 | .endm |
556 | |
557 | BUILD_HANDLER adel ade ade silent /* #4 */ |
558 | BUILD_HANDLER ades ade ade silent /* #5 */ |
559 | BUILD_HANDLER ibe be cli silent /* #6 */ |
560 | BUILD_HANDLER dbe be cli silent /* #7 */ |
561 | BUILD_HANDLER bp bp sti silent /* #9 */ |
562 | BUILD_HANDLER ri ri sti silent /* #10 */ |
563 | BUILD_HANDLER cpu cpu sti silent /* #11 */ |
564 | BUILD_HANDLER ov ov sti silent /* #12 */ |
565 | BUILD_HANDLER tr tr sti silent /* #13 */ |
566 | BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent /* #14 */ |
567 | #ifdef CONFIG_MIPS_FP_SUPPORT |
568 | BUILD_HANDLER fpe fpe fpe silent /* #15 */ |
569 | #endif |
570 | BUILD_HANDLER ftlb ftlb none silent /* #16 */ |
571 | BUILD_HANDLER gsexc gsexc gsexc silent /* #16 */ |
572 | BUILD_HANDLER msa msa sti silent /* #21 */ |
573 | BUILD_HANDLER mdmx mdmx sti silent /* #22 */ |
574 | #ifdef CONFIG_HARDWARE_WATCHPOINTS |
575 | /* |
576 | * For watch, interrupts will be enabled after the watch |
577 | * registers are read. |
578 | */ |
579 | BUILD_HANDLER watch watch cli silent /* #23 */ |
580 | #else |
581 | BUILD_HANDLER watch watch sti verbose /* #23 */ |
582 | #endif |
583 | BUILD_HANDLER mcheck mcheck cli verbose /* #24 */ |
584 | BUILD_HANDLER mt mt sti silent /* #25 */ |
585 | BUILD_HANDLER dsp dsp sti silent /* #26 */ |
586 | BUILD_HANDLER reserved reserved sti verbose /* others */ |
587 | |
588 | .align 5 |
589 | LEAF(handle_ri_rdhwr_tlbp) |
590 | .set push |
591 | .set noat |
592 | .set noreorder |
593 | /* check if TLB contains a entry for EPC */ |
594 | MFC0 k1, CP0_ENTRYHI |
595 | andi k1, MIPS_ENTRYHI_ASID | MIPS_ENTRYHI_ASIDX |
596 | MFC0 k0, CP0_EPC |
597 | PTR_SRL k0, _PAGE_SHIFT + 1 |
598 | PTR_SLL k0, _PAGE_SHIFT + 1 |
599 | or k1, k0 |
600 | MTC0 k1, CP0_ENTRYHI |
601 | mtc0_tlbw_hazard |
602 | tlbp |
603 | tlb_probe_hazard |
604 | mfc0 k1, CP0_INDEX |
605 | .set pop |
606 | bltz k1, handle_ri /* slow path */ |
607 | /* fall thru */ |
608 | END(handle_ri_rdhwr_tlbp) |
609 | |
610 | LEAF(handle_ri_rdhwr) |
611 | .set push |
612 | .set noat |
613 | .set noreorder |
614 | /* MIPS32: 0x7c03e83b: rdhwr v1,$29 */ |
615 | /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */ |
616 | MFC0 k1, CP0_EPC |
617 | #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2) |
618 | and k0, k1, 1 |
619 | beqz k0, 1f |
620 | xor k1, k0 |
621 | lhu k0, (k1) |
622 | lhu k1, 2(k1) |
623 | ins k1, k0, 16, 16 |
624 | lui k0, 0x007d |
625 | b docheck |
626 | ori k0, 0x6b3c |
627 | 1: |
628 | lui k0, 0x7c03 |
629 | lw k1, (k1) |
630 | ori k0, 0xe83b |
631 | #else |
632 | andi k0, k1, 1 |
633 | bnez k0, handle_ri |
634 | lui k0, 0x7c03 |
635 | lw k1, (k1) |
636 | ori k0, 0xe83b |
637 | #endif |
638 | .set reorder |
639 | docheck: |
640 | bne k0, k1, handle_ri /* if not ours */ |
641 | |
642 | isrdhwr: |
643 | /* The insn is rdhwr. No need to check CAUSE.BD here. */ |
644 | get_saved_sp /* k1 := current_thread_info */ |
645 | .set noreorder |
646 | MFC0 k0, CP0_EPC |
647 | #if defined(CONFIG_CPU_R3000) |
648 | ori k1, _THREAD_MASK |
649 | xori k1, _THREAD_MASK |
650 | LONG_L v1, TI_TP_VALUE(k1) |
651 | LONG_ADDIU k0, 4 |
652 | jr k0 |
653 | rfe |
654 | #else |
655 | #ifndef CONFIG_CPU_DADDI_WORKAROUNDS |
656 | LONG_ADDIU k0, 4 /* stall on $k0 */ |
657 | #else |
658 | .set at=v1 |
659 | LONG_ADDIU k0, 4 |
660 | .set noat |
661 | #endif |
662 | MTC0 k0, CP0_EPC |
663 | /* I hope three instructions between MTC0 and ERET are enough... */ |
664 | ori k1, _THREAD_MASK |
665 | xori k1, _THREAD_MASK |
666 | LONG_L v1, TI_TP_VALUE(k1) |
667 | .set push |
668 | .set arch=r4000 |
669 | eret |
670 | .set pop |
671 | #endif |
672 | .set pop |
673 | END(handle_ri_rdhwr) |
674 | |
675 | #ifdef CONFIG_CPU_R4X00_BUGS64 |
676 | /* A temporary overflow handler used by check_daddi(). */ |
677 | |
678 | __INIT |
679 | |
680 | BUILD_HANDLER daddi_ov daddi_ov none silent /* #12 */ |
681 | #endif |
682 | |