1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. |
5 | * |
6 | * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle |
7 | * Copyright (C) 1995, 1996 Paul M. Antoine |
8 | * Copyright (C) 1998 Ulf Carlsson |
9 | * Copyright (C) 1999 Silicon Graphics, Inc. |
10 | * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com |
11 | * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki |
12 | * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved. |
13 | * Copyright (C) 2014, Imagination Technologies Ltd. |
14 | */ |
15 | #include <linux/bitops.h> |
16 | #include <linux/bug.h> |
17 | #include <linux/compiler.h> |
18 | #include <linux/context_tracking.h> |
19 | #include <linux/cpu_pm.h> |
20 | #include <linux/kexec.h> |
21 | #include <linux/init.h> |
22 | #include <linux/kernel.h> |
23 | #include <linux/module.h> |
24 | #include <linux/extable.h> |
25 | #include <linux/mm.h> |
26 | #include <linux/sched/mm.h> |
27 | #include <linux/sched/debug.h> |
28 | #include <linux/smp.h> |
29 | #include <linux/spinlock.h> |
30 | #include <linux/kallsyms.h> |
31 | #include <linux/memblock.h> |
32 | #include <linux/interrupt.h> |
33 | #include <linux/ptrace.h> |
34 | #include <linux/kgdb.h> |
35 | #include <linux/kdebug.h> |
36 | #include <linux/kprobes.h> |
37 | #include <linux/notifier.h> |
38 | #include <linux/kdb.h> |
39 | #include <linux/irq.h> |
40 | #include <linux/perf_event.h> |
41 | |
42 | #include <asm/addrspace.h> |
43 | #include <asm/bootinfo.h> |
44 | #include <asm/branch.h> |
45 | #include <asm/break.h> |
46 | #include <asm/cop2.h> |
47 | #include <asm/cpu.h> |
48 | #include <asm/cpu-type.h> |
49 | #include <asm/dsp.h> |
50 | #include <asm/fpu.h> |
51 | #include <asm/fpu_emulator.h> |
52 | #include <asm/idle.h> |
53 | #include <asm/isa-rev.h> |
54 | #include <asm/mips-cps.h> |
55 | #include <asm/mips-r2-to-r6-emul.h> |
56 | #include <asm/mipsregs.h> |
57 | #include <asm/mipsmtregs.h> |
58 | #include <asm/module.h> |
59 | #include <asm/msa.h> |
60 | #include <asm/ptrace.h> |
61 | #include <asm/sections.h> |
62 | #include <asm/siginfo.h> |
63 | #include <asm/tlbdebug.h> |
64 | #include <asm/traps.h> |
65 | #include <linux/uaccess.h> |
66 | #include <asm/watch.h> |
67 | #include <asm/mmu_context.h> |
68 | #include <asm/types.h> |
69 | #include <asm/stacktrace.h> |
70 | #include <asm/tlbex.h> |
71 | #include <asm/uasm.h> |
72 | |
73 | #include <asm/mach-loongson64/cpucfg-emul.h> |
74 | |
75 | #include "access-helper.h" |
76 | |
77 | extern void check_wait(void); |
78 | extern asmlinkage void rollback_handle_int(void); |
79 | extern asmlinkage void handle_int(void); |
80 | extern asmlinkage void handle_adel(void); |
81 | extern asmlinkage void handle_ades(void); |
82 | extern asmlinkage void handle_ibe(void); |
83 | extern asmlinkage void handle_dbe(void); |
84 | extern asmlinkage void handle_sys(void); |
85 | extern asmlinkage void handle_bp(void); |
86 | extern asmlinkage void handle_ri(void); |
87 | extern asmlinkage void handle_ri_rdhwr_tlbp(void); |
88 | extern asmlinkage void handle_ri_rdhwr(void); |
89 | extern asmlinkage void handle_cpu(void); |
90 | extern asmlinkage void handle_ov(void); |
91 | extern asmlinkage void handle_tr(void); |
92 | extern asmlinkage void handle_msa_fpe(void); |
93 | extern asmlinkage void handle_fpe(void); |
94 | extern asmlinkage void handle_ftlb(void); |
95 | extern asmlinkage void handle_gsexc(void); |
96 | extern asmlinkage void handle_msa(void); |
97 | extern asmlinkage void handle_mdmx(void); |
98 | extern asmlinkage void handle_watch(void); |
99 | extern asmlinkage void handle_mt(void); |
100 | extern asmlinkage void handle_dsp(void); |
101 | extern asmlinkage void handle_mcheck(void); |
102 | extern asmlinkage void handle_reserved(void); |
103 | extern void tlb_do_page_fault_0(void); |
104 | |
105 | void (*board_be_init)(void); |
106 | static int (*board_be_handler)(struct pt_regs *regs, int is_fixup); |
107 | void (*board_nmi_handler_setup)(void); |
108 | void (*board_ejtag_handler_setup)(void); |
109 | void (*board_bind_eic_interrupt)(int irq, int regset); |
110 | void (*board_ebase_setup)(void); |
111 | void(*board_cache_error_setup)(void); |
112 | |
113 | void mips_set_be_handler(int (*handler)(struct pt_regs *regs, int is_fixup)) |
114 | { |
115 | board_be_handler = handler; |
116 | } |
117 | EXPORT_SYMBOL_GPL(mips_set_be_handler); |
118 | |
119 | static void show_raw_backtrace(unsigned long reg29, const char *loglvl, |
120 | bool user) |
121 | { |
122 | unsigned long *sp = (unsigned long *)(reg29 & ~3); |
123 | unsigned long addr; |
124 | |
125 | printk("%sCall Trace:" , loglvl); |
126 | #ifdef CONFIG_KALLSYMS |
127 | printk("%s\n" , loglvl); |
128 | #endif |
129 | while (!kstack_end(addr: sp)) { |
130 | if (__get_addr(a: &addr, p: sp++, user)) { |
131 | printk("%s (Bad stack address)" , loglvl); |
132 | break; |
133 | } |
134 | if (__kernel_text_address(addr)) |
135 | print_ip_sym(loglvl, ip: addr); |
136 | } |
137 | printk("%s\n" , loglvl); |
138 | } |
139 | |
140 | #ifdef CONFIG_KALLSYMS |
141 | int raw_show_trace; |
142 | static int __init set_raw_show_trace(char *str) |
143 | { |
144 | raw_show_trace = 1; |
145 | return 1; |
146 | } |
147 | __setup("raw_show_trace" , set_raw_show_trace); |
148 | #endif |
149 | |
150 | static void show_backtrace(struct task_struct *task, const struct pt_regs *regs, |
151 | const char *loglvl, bool user) |
152 | { |
153 | unsigned long sp = regs->regs[29]; |
154 | unsigned long ra = regs->regs[31]; |
155 | unsigned long pc = regs->cp0_epc; |
156 | |
157 | if (!task) |
158 | task = current; |
159 | |
160 | if (raw_show_trace || user_mode(regs) || !__kernel_text_address(addr: pc)) { |
161 | show_raw_backtrace(reg29: sp, loglvl, user); |
162 | return; |
163 | } |
164 | printk("%sCall Trace:\n" , loglvl); |
165 | do { |
166 | print_ip_sym(loglvl, ip: pc); |
167 | pc = unwind_stack(task, &sp, pc, &ra); |
168 | } while (pc); |
169 | pr_cont("\n" ); |
170 | } |
171 | |
172 | /* |
173 | * This routine abuses get_user()/put_user() to reference pointers |
174 | * with at least a bit of error checking ... |
175 | */ |
176 | static void show_stacktrace(struct task_struct *task, |
177 | const struct pt_regs *regs, const char *loglvl, bool user) |
178 | { |
179 | const int field = 2 * sizeof(unsigned long); |
180 | unsigned long stackdata; |
181 | int i; |
182 | unsigned long *sp = (unsigned long *)regs->regs[29]; |
183 | |
184 | printk("%sStack :" , loglvl); |
185 | i = 0; |
186 | while ((unsigned long) sp & (PAGE_SIZE - 1)) { |
187 | if (i && ((i % (64 / field)) == 0)) { |
188 | pr_cont("\n" ); |
189 | printk("%s " , loglvl); |
190 | } |
191 | if (i > 39) { |
192 | pr_cont(" ..." ); |
193 | break; |
194 | } |
195 | |
196 | if (__get_addr(a: &stackdata, p: sp++, user)) { |
197 | pr_cont(" (Bad stack address)" ); |
198 | break; |
199 | } |
200 | |
201 | pr_cont(" %0*lx" , field, stackdata); |
202 | i++; |
203 | } |
204 | pr_cont("\n" ); |
205 | show_backtrace(task, regs, loglvl, user); |
206 | } |
207 | |
208 | void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) |
209 | { |
210 | struct pt_regs regs; |
211 | |
212 | regs.cp0_status = KSU_KERNEL; |
213 | if (sp) { |
214 | regs.regs[29] = (unsigned long)sp; |
215 | regs.regs[31] = 0; |
216 | regs.cp0_epc = 0; |
217 | } else { |
218 | if (task && task != current) { |
219 | regs.regs[29] = task->thread.reg29; |
220 | regs.regs[31] = 0; |
221 | regs.cp0_epc = task->thread.reg31; |
222 | } else { |
223 | prepare_frametrace(®s); |
224 | } |
225 | } |
226 | show_stacktrace(task, regs: ®s, loglvl, user: false); |
227 | } |
228 | |
229 | static void show_code(void *pc, bool user) |
230 | { |
231 | long i; |
232 | unsigned short *pc16 = NULL; |
233 | |
234 | printk("Code:" ); |
235 | |
236 | if ((unsigned long)pc & 1) |
237 | pc16 = (u16 *)((unsigned long)pc & ~1); |
238 | |
239 | for(i = -3 ; i < 6 ; i++) { |
240 | if (pc16) { |
241 | u16 insn16; |
242 | |
243 | if (__get_inst16(i: &insn16, p: pc16 + i, user)) |
244 | goto bad_address; |
245 | |
246 | pr_cont("%c%04x%c" , (i?' ':'<'), insn16, (i?' ':'>')); |
247 | } else { |
248 | u32 insn32; |
249 | |
250 | if (__get_inst32(i: &insn32, p: (u32 *)pc + i, user)) |
251 | goto bad_address; |
252 | |
253 | pr_cont("%c%08x%c" , (i?' ':'<'), insn32, (i?' ':'>')); |
254 | } |
255 | } |
256 | pr_cont("\n" ); |
257 | return; |
258 | |
259 | bad_address: |
260 | pr_cont(" (Bad address in epc)\n\n" ); |
261 | } |
262 | |
263 | static void __show_regs(const struct pt_regs *regs) |
264 | { |
265 | const int field = 2 * sizeof(unsigned long); |
266 | unsigned int cause = regs->cp0_cause; |
267 | unsigned int exccode; |
268 | int i; |
269 | |
270 | show_regs_print_info(KERN_DEFAULT); |
271 | |
272 | /* |
273 | * Saved main processor registers |
274 | */ |
275 | for (i = 0; i < 32; ) { |
276 | if ((i % 4) == 0) |
277 | printk("$%2d :" , i); |
278 | if (i == 0) |
279 | pr_cont(" %0*lx" , field, 0UL); |
280 | else if (i == 26 || i == 27) |
281 | pr_cont(" %*s" , field, "" ); |
282 | else |
283 | pr_cont(" %0*lx" , field, regs->regs[i]); |
284 | |
285 | i++; |
286 | if ((i % 4) == 0) |
287 | pr_cont("\n" ); |
288 | } |
289 | |
290 | #ifdef CONFIG_CPU_HAS_SMARTMIPS |
291 | printk("Acx : %0*lx\n" , field, regs->acx); |
292 | #endif |
293 | if (MIPS_ISA_REV < 6) { |
294 | printk("Hi : %0*lx\n" , field, regs->hi); |
295 | printk("Lo : %0*lx\n" , field, regs->lo); |
296 | } |
297 | |
298 | /* |
299 | * Saved cp0 registers |
300 | */ |
301 | printk("epc : %0*lx %pS\n" , field, regs->cp0_epc, |
302 | (void *) regs->cp0_epc); |
303 | printk("ra : %0*lx %pS\n" , field, regs->regs[31], |
304 | (void *) regs->regs[31]); |
305 | |
306 | printk("Status: %08x " , (uint32_t) regs->cp0_status); |
307 | |
308 | if (cpu_has_3kex) { |
309 | if (regs->cp0_status & ST0_KUO) |
310 | pr_cont("KUo " ); |
311 | if (regs->cp0_status & ST0_IEO) |
312 | pr_cont("IEo " ); |
313 | if (regs->cp0_status & ST0_KUP) |
314 | pr_cont("KUp " ); |
315 | if (regs->cp0_status & ST0_IEP) |
316 | pr_cont("IEp " ); |
317 | if (regs->cp0_status & ST0_KUC) |
318 | pr_cont("KUc " ); |
319 | if (regs->cp0_status & ST0_IEC) |
320 | pr_cont("IEc " ); |
321 | } else if (cpu_has_4kex) { |
322 | if (regs->cp0_status & ST0_KX) |
323 | pr_cont("KX " ); |
324 | if (regs->cp0_status & ST0_SX) |
325 | pr_cont("SX " ); |
326 | if (regs->cp0_status & ST0_UX) |
327 | pr_cont("UX " ); |
328 | switch (regs->cp0_status & ST0_KSU) { |
329 | case KSU_USER: |
330 | pr_cont("USER " ); |
331 | break; |
332 | case KSU_SUPERVISOR: |
333 | pr_cont("SUPERVISOR " ); |
334 | break; |
335 | case KSU_KERNEL: |
336 | pr_cont("KERNEL " ); |
337 | break; |
338 | default: |
339 | pr_cont("BAD_MODE " ); |
340 | break; |
341 | } |
342 | if (regs->cp0_status & ST0_ERL) |
343 | pr_cont("ERL " ); |
344 | if (regs->cp0_status & ST0_EXL) |
345 | pr_cont("EXL " ); |
346 | if (regs->cp0_status & ST0_IE) |
347 | pr_cont("IE " ); |
348 | } |
349 | pr_cont("\n" ); |
350 | |
351 | exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE; |
352 | printk("Cause : %08x (ExcCode %02x)\n" , cause, exccode); |
353 | |
354 | if (1 <= exccode && exccode <= 5) |
355 | printk("BadVA : %0*lx\n" , field, regs->cp0_badvaddr); |
356 | |
357 | printk("PrId : %08x (%s)\n" , read_c0_prid(), |
358 | cpu_name_string()); |
359 | } |
360 | |
361 | /* |
362 | * FIXME: really the generic show_regs should take a const pointer argument. |
363 | */ |
364 | void show_regs(struct pt_regs *regs) |
365 | { |
366 | __show_regs(regs); |
367 | dump_stack(); |
368 | } |
369 | |
370 | void show_registers(struct pt_regs *regs) |
371 | { |
372 | const int field = 2 * sizeof(unsigned long); |
373 | |
374 | __show_regs(regs); |
375 | print_modules(); |
376 | printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n" , |
377 | current->comm, current->pid, current_thread_info(), current, |
378 | field, current_thread_info()->tp_value); |
379 | if (cpu_has_userlocal) { |
380 | unsigned long tls; |
381 | |
382 | tls = read_c0_userlocal(); |
383 | if (tls != current_thread_info()->tp_value) |
384 | printk("*HwTLS: %0*lx\n" , field, tls); |
385 | } |
386 | |
387 | show_stacktrace(current, regs, KERN_DEFAULT, user: user_mode(regs)); |
388 | show_code(pc: (void *)regs->cp0_epc, user: user_mode(regs)); |
389 | printk("\n" ); |
390 | } |
391 | |
392 | static DEFINE_RAW_SPINLOCK(die_lock); |
393 | |
394 | void __noreturn die(const char *str, struct pt_regs *regs) |
395 | { |
396 | static int die_counter; |
397 | int sig = SIGSEGV; |
398 | |
399 | oops_enter(); |
400 | |
401 | if (notify_die(val: DIE_OOPS, str, regs, err: 0, current->thread.trap_nr, |
402 | SIGSEGV) == NOTIFY_STOP) |
403 | sig = 0; |
404 | |
405 | console_verbose(); |
406 | raw_spin_lock_irq(&die_lock); |
407 | bust_spinlocks(yes: 1); |
408 | |
409 | printk("%s[#%d]:\n" , str, ++die_counter); |
410 | show_registers(regs); |
411 | add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); |
412 | raw_spin_unlock_irq(&die_lock); |
413 | |
414 | oops_exit(); |
415 | |
416 | if (in_interrupt()) |
417 | panic(fmt: "Fatal exception in interrupt" ); |
418 | |
419 | if (panic_on_oops) |
420 | panic(fmt: "Fatal exception" ); |
421 | |
422 | if (regs && kexec_should_crash(current)) |
423 | crash_kexec(regs); |
424 | |
425 | make_task_dead(signr: sig); |
426 | } |
427 | |
428 | extern struct exception_table_entry __start___dbe_table[]; |
429 | extern struct exception_table_entry __stop___dbe_table[]; |
430 | |
431 | __asm__( |
432 | " .section __dbe_table, \"a\"\n" |
433 | " .previous \n" ); |
434 | |
435 | /* Given an address, look for it in the exception tables. */ |
436 | static const struct exception_table_entry *search_dbe_tables(unsigned long addr) |
437 | { |
438 | const struct exception_table_entry *e; |
439 | |
440 | e = search_extable(base: __start___dbe_table, |
441 | num: __stop___dbe_table - __start___dbe_table, value: addr); |
442 | if (!e) |
443 | e = search_module_dbetables(addr); |
444 | return e; |
445 | } |
446 | |
447 | asmlinkage void do_be(struct pt_regs *regs) |
448 | { |
449 | const int field = 2 * sizeof(unsigned long); |
450 | const struct exception_table_entry *fixup = NULL; |
451 | int data = regs->cp0_cause & 4; |
452 | int action = MIPS_BE_FATAL; |
453 | enum ctx_state prev_state; |
454 | |
455 | prev_state = exception_enter(); |
456 | /* XXX For now. Fixme, this searches the wrong table ... */ |
457 | if (data && !user_mode(regs)) |
458 | fixup = search_dbe_tables(addr: exception_epc(regs)); |
459 | |
460 | if (fixup) |
461 | action = MIPS_BE_FIXUP; |
462 | |
463 | if (board_be_handler) |
464 | action = board_be_handler(regs, fixup != NULL); |
465 | else |
466 | mips_cm_error_report(); |
467 | |
468 | switch (action) { |
469 | case MIPS_BE_DISCARD: |
470 | goto out; |
471 | case MIPS_BE_FIXUP: |
472 | if (fixup) { |
473 | regs->cp0_epc = fixup->nextinsn; |
474 | goto out; |
475 | } |
476 | break; |
477 | default: |
478 | break; |
479 | } |
480 | |
481 | /* |
482 | * Assume it would be too dangerous to continue ... |
483 | */ |
484 | printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n" , |
485 | data ? "Data" : "Instruction" , |
486 | field, regs->cp0_epc, field, regs->regs[31]); |
487 | if (notify_die(val: DIE_OOPS, str: "bus error" , regs, err: 0, current->thread.trap_nr, |
488 | SIGBUS) == NOTIFY_STOP) |
489 | goto out; |
490 | |
491 | die_if_kernel("Oops" , regs); |
492 | force_sig(SIGBUS); |
493 | |
494 | out: |
495 | exception_exit(prev_ctx: prev_state); |
496 | } |
497 | |
498 | /* |
499 | * ll/sc, rdhwr, sync emulation |
500 | */ |
501 | |
502 | #define OPCODE 0xfc000000 |
503 | #define BASE 0x03e00000 |
504 | #define RT 0x001f0000 |
505 | #define OFFSET 0x0000ffff |
506 | #define LL 0xc0000000 |
507 | #define SC 0xe0000000 |
508 | #define SPEC0 0x00000000 |
509 | #define SPEC3 0x7c000000 |
510 | #define RD 0x0000f800 |
511 | #define FUNC 0x0000003f |
512 | #define SYNC 0x0000000f |
513 | #define RDHWR 0x0000003b |
514 | |
515 | /* microMIPS definitions */ |
516 | #define MM_POOL32A_FUNC 0xfc00ffff |
517 | #define MM_RDHWR 0x00006b3c |
518 | #define MM_RS 0x001f0000 |
519 | #define MM_RT 0x03e00000 |
520 | |
521 | /* |
522 | * The ll_bit is cleared by r*_switch.S |
523 | */ |
524 | |
525 | unsigned int ll_bit; |
526 | struct task_struct *ll_task; |
527 | |
528 | static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode) |
529 | { |
530 | unsigned long value, __user *vaddr; |
531 | long offset; |
532 | |
533 | /* |
534 | * analyse the ll instruction that just caused a ri exception |
535 | * and put the referenced address to addr. |
536 | */ |
537 | |
538 | /* sign extend offset */ |
539 | offset = opcode & OFFSET; |
540 | offset <<= 16; |
541 | offset >>= 16; |
542 | |
543 | vaddr = (unsigned long __user *) |
544 | ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset); |
545 | |
546 | if ((unsigned long)vaddr & 3) |
547 | return SIGBUS; |
548 | if (get_user(value, vaddr)) |
549 | return SIGSEGV; |
550 | |
551 | preempt_disable(); |
552 | |
553 | if (ll_task == NULL || ll_task == current) { |
554 | ll_bit = 1; |
555 | } else { |
556 | ll_bit = 0; |
557 | } |
558 | ll_task = current; |
559 | |
560 | preempt_enable(); |
561 | |
562 | regs->regs[(opcode & RT) >> 16] = value; |
563 | |
564 | return 0; |
565 | } |
566 | |
567 | static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode) |
568 | { |
569 | unsigned long __user *vaddr; |
570 | unsigned long reg; |
571 | long offset; |
572 | |
573 | /* |
574 | * analyse the sc instruction that just caused a ri exception |
575 | * and put the referenced address to addr. |
576 | */ |
577 | |
578 | /* sign extend offset */ |
579 | offset = opcode & OFFSET; |
580 | offset <<= 16; |
581 | offset >>= 16; |
582 | |
583 | vaddr = (unsigned long __user *) |
584 | ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset); |
585 | reg = (opcode & RT) >> 16; |
586 | |
587 | if ((unsigned long)vaddr & 3) |
588 | return SIGBUS; |
589 | |
590 | preempt_disable(); |
591 | |
592 | if (ll_bit == 0 || ll_task != current) { |
593 | regs->regs[reg] = 0; |
594 | preempt_enable(); |
595 | return 0; |
596 | } |
597 | |
598 | preempt_enable(); |
599 | |
600 | if (put_user(regs->regs[reg], vaddr)) |
601 | return SIGSEGV; |
602 | |
603 | regs->regs[reg] = 1; |
604 | |
605 | return 0; |
606 | } |
607 | |
608 | /* |
609 | * ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both |
610 | * opcodes are supposed to result in coprocessor unusable exceptions if |
611 | * executed on ll/sc-less processors. That's the theory. In practice a |
612 | * few processors such as NEC's VR4100 throw reserved instruction exceptions |
613 | * instead, so we're doing the emulation thing in both exception handlers. |
614 | */ |
615 | static int simulate_llsc(struct pt_regs *regs, unsigned int opcode) |
616 | { |
617 | if ((opcode & OPCODE) == LL) { |
618 | perf_sw_event(event_id: PERF_COUNT_SW_EMULATION_FAULTS, |
619 | nr: 1, regs, addr: 0); |
620 | return simulate_ll(regs, opcode); |
621 | } |
622 | if ((opcode & OPCODE) == SC) { |
623 | perf_sw_event(event_id: PERF_COUNT_SW_EMULATION_FAULTS, |
624 | nr: 1, regs, addr: 0); |
625 | return simulate_sc(regs, opcode); |
626 | } |
627 | |
628 | return -1; /* Must be something else ... */ |
629 | } |
630 | |
631 | /* |
632 | * Simulate trapping 'rdhwr' instructions to provide user accessible |
633 | * registers not implemented in hardware. |
634 | */ |
635 | static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt) |
636 | { |
637 | struct thread_info *ti = task_thread_info(current); |
638 | |
639 | perf_sw_event(event_id: PERF_COUNT_SW_EMULATION_FAULTS, |
640 | nr: 1, regs, addr: 0); |
641 | switch (rd) { |
642 | case MIPS_HWR_CPUNUM: /* CPU number */ |
643 | regs->regs[rt] = smp_processor_id(); |
644 | return 0; |
645 | case MIPS_HWR_SYNCISTEP: /* SYNCI length */ |
646 | regs->regs[rt] = min(current_cpu_data.dcache.linesz, |
647 | current_cpu_data.icache.linesz); |
648 | return 0; |
649 | case MIPS_HWR_CC: /* Read count register */ |
650 | regs->regs[rt] = read_c0_count(); |
651 | return 0; |
652 | case MIPS_HWR_CCRES: /* Count register resolution */ |
653 | switch (current_cpu_type()) { |
654 | case CPU_20KC: |
655 | case CPU_25KF: |
656 | regs->regs[rt] = 1; |
657 | break; |
658 | default: |
659 | regs->regs[rt] = 2; |
660 | } |
661 | return 0; |
662 | case MIPS_HWR_ULR: /* Read UserLocal register */ |
663 | regs->regs[rt] = ti->tp_value; |
664 | return 0; |
665 | default: |
666 | return -1; |
667 | } |
668 | } |
669 | |
670 | static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode) |
671 | { |
672 | if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) { |
673 | int rd = (opcode & RD) >> 11; |
674 | int rt = (opcode & RT) >> 16; |
675 | |
676 | simulate_rdhwr(regs, rd, rt); |
677 | return 0; |
678 | } |
679 | |
680 | /* Not ours. */ |
681 | return -1; |
682 | } |
683 | |
684 | static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned int opcode) |
685 | { |
686 | if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) { |
687 | int rd = (opcode & MM_RS) >> 16; |
688 | int rt = (opcode & MM_RT) >> 21; |
689 | simulate_rdhwr(regs, rd, rt); |
690 | return 0; |
691 | } |
692 | |
693 | /* Not ours. */ |
694 | return -1; |
695 | } |
696 | |
697 | static int simulate_sync(struct pt_regs *regs, unsigned int opcode) |
698 | { |
699 | if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) { |
700 | perf_sw_event(event_id: PERF_COUNT_SW_EMULATION_FAULTS, |
701 | nr: 1, regs, addr: 0); |
702 | return 0; |
703 | } |
704 | |
705 | return -1; /* Must be something else ... */ |
706 | } |
707 | |
708 | /* |
709 | * Loongson-3 CSR instructions emulation |
710 | */ |
711 | |
712 | #ifdef CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION |
713 | |
714 | #define LWC2 0xc8000000 |
715 | #define RS BASE |
716 | #define CSR_OPCODE2 0x00000118 |
717 | #define CSR_OPCODE2_MASK 0x000007ff |
718 | #define CSR_FUNC_MASK RT |
719 | #define CSR_FUNC_CPUCFG 0x8 |
720 | |
721 | static int simulate_loongson3_cpucfg(struct pt_regs *regs, |
722 | unsigned int opcode) |
723 | { |
724 | int op = opcode & OPCODE; |
725 | int op2 = opcode & CSR_OPCODE2_MASK; |
726 | int csr_func = (opcode & CSR_FUNC_MASK) >> 16; |
727 | |
728 | if (op == LWC2 && op2 == CSR_OPCODE2 && csr_func == CSR_FUNC_CPUCFG) { |
729 | int rd = (opcode & RD) >> 11; |
730 | int rs = (opcode & RS) >> 21; |
731 | __u64 sel = regs->regs[rs]; |
732 | |
733 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); |
734 | |
735 | /* Do not emulate on unsupported core models. */ |
736 | preempt_disable(); |
737 | if (!loongson3_cpucfg_emulation_enabled(¤t_cpu_data)) { |
738 | preempt_enable(); |
739 | return -1; |
740 | } |
741 | regs->regs[rd] = loongson3_cpucfg_read_synthesized( |
742 | ¤t_cpu_data, sel); |
743 | preempt_enable(); |
744 | return 0; |
745 | } |
746 | |
747 | /* Not ours. */ |
748 | return -1; |
749 | } |
750 | #endif /* CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION */ |
751 | |
752 | asmlinkage void do_ov(struct pt_regs *regs) |
753 | { |
754 | enum ctx_state prev_state; |
755 | |
756 | prev_state = exception_enter(); |
757 | die_if_kernel("Integer overflow" , regs); |
758 | |
759 | force_sig_fault(SIGFPE, FPE_INTOVF, addr: (void __user *)regs->cp0_epc); |
760 | exception_exit(prev_ctx: prev_state); |
761 | } |
762 | |
763 | #ifdef CONFIG_MIPS_FP_SUPPORT |
764 | |
765 | /* |
766 | * Send SIGFPE according to FCSR Cause bits, which must have already |
767 | * been masked against Enable bits. This is impotant as Inexact can |
768 | * happen together with Overflow or Underflow, and `ptrace' can set |
769 | * any bits. |
770 | */ |
771 | void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr, |
772 | struct task_struct *tsk) |
773 | { |
774 | int si_code = FPE_FLTUNK; |
775 | |
776 | if (fcr31 & FPU_CSR_INV_X) |
777 | si_code = FPE_FLTINV; |
778 | else if (fcr31 & FPU_CSR_DIV_X) |
779 | si_code = FPE_FLTDIV; |
780 | else if (fcr31 & FPU_CSR_OVF_X) |
781 | si_code = FPE_FLTOVF; |
782 | else if (fcr31 & FPU_CSR_UDF_X) |
783 | si_code = FPE_FLTUND; |
784 | else if (fcr31 & FPU_CSR_INE_X) |
785 | si_code = FPE_FLTRES; |
786 | |
787 | force_sig_fault_to_task(SIGFPE, si_code, fault_addr, tsk); |
788 | } |
789 | |
790 | int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31) |
791 | { |
792 | int si_code; |
793 | |
794 | switch (sig) { |
795 | case 0: |
796 | return 0; |
797 | |
798 | case SIGFPE: |
799 | force_fcr31_sig(fcr31, fault_addr, current); |
800 | return 1; |
801 | |
802 | case SIGBUS: |
803 | force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr); |
804 | return 1; |
805 | |
806 | case SIGSEGV: |
807 | mmap_read_lock(current->mm); |
808 | if (vma_lookup(current->mm, (unsigned long)fault_addr)) |
809 | si_code = SEGV_ACCERR; |
810 | else |
811 | si_code = SEGV_MAPERR; |
812 | mmap_read_unlock(current->mm); |
813 | force_sig_fault(SIGSEGV, si_code, fault_addr); |
814 | return 1; |
815 | |
816 | default: |
817 | force_sig(sig); |
818 | return 1; |
819 | } |
820 | } |
821 | |
822 | static int simulate_fp(struct pt_regs *regs, unsigned int opcode, |
823 | unsigned long old_epc, unsigned long old_ra) |
824 | { |
825 | union mips_instruction inst = { .word = opcode }; |
826 | void __user *fault_addr; |
827 | unsigned long fcr31; |
828 | int sig; |
829 | |
830 | /* If it's obviously not an FP instruction, skip it */ |
831 | switch (inst.i_format.opcode) { |
832 | case cop1_op: |
833 | case cop1x_op: |
834 | case lwc1_op: |
835 | case ldc1_op: |
836 | case swc1_op: |
837 | case sdc1_op: |
838 | break; |
839 | |
840 | default: |
841 | return -1; |
842 | } |
843 | |
844 | /* |
845 | * do_ri skipped over the instruction via compute_return_epc, undo |
846 | * that for the FPU emulator. |
847 | */ |
848 | regs->cp0_epc = old_epc; |
849 | regs->regs[31] = old_ra; |
850 | |
851 | /* Run the emulator */ |
852 | sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, |
853 | &fault_addr); |
854 | |
855 | /* |
856 | * We can't allow the emulated instruction to leave any |
857 | * enabled Cause bits set in $fcr31. |
858 | */ |
859 | fcr31 = mask_fcr31_x(current->thread.fpu.fcr31); |
860 | current->thread.fpu.fcr31 &= ~fcr31; |
861 | |
862 | /* Restore the hardware register state */ |
863 | own_fpu(1); |
864 | |
865 | /* Send a signal if required. */ |
866 | process_fpemu_return(sig, fault_addr, fcr31); |
867 | |
868 | return 0; |
869 | } |
870 | |
871 | /* |
872 | * XXX Delayed fp exceptions when doing a lazy ctx switch XXX |
873 | */ |
874 | asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) |
875 | { |
876 | enum ctx_state prev_state; |
877 | void __user *fault_addr; |
878 | int sig; |
879 | |
880 | prev_state = exception_enter(); |
881 | if (notify_die(DIE_FP, "FP exception" , regs, 0, current->thread.trap_nr, |
882 | SIGFPE) == NOTIFY_STOP) |
883 | goto out; |
884 | |
885 | /* Clear FCSR.Cause before enabling interrupts */ |
886 | write_32bit_cp1_register(CP1_STATUS, fcr31 & ~mask_fcr31_x(fcr31)); |
887 | local_irq_enable(); |
888 | |
889 | die_if_kernel("FP exception in kernel code" , regs); |
890 | |
891 | if (fcr31 & FPU_CSR_UNI_X) { |
892 | /* |
893 | * Unimplemented operation exception. If we've got the full |
894 | * software emulator on-board, let's use it... |
895 | * |
896 | * Force FPU to dump state into task/thread context. We're |
897 | * moving a lot of data here for what is probably a single |
898 | * instruction, but the alternative is to pre-decode the FP |
899 | * register operands before invoking the emulator, which seems |
900 | * a bit extreme for what should be an infrequent event. |
901 | */ |
902 | |
903 | /* Run the emulator */ |
904 | sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, |
905 | &fault_addr); |
906 | |
907 | /* |
908 | * We can't allow the emulated instruction to leave any |
909 | * enabled Cause bits set in $fcr31. |
910 | */ |
911 | fcr31 = mask_fcr31_x(current->thread.fpu.fcr31); |
912 | current->thread.fpu.fcr31 &= ~fcr31; |
913 | |
914 | /* Restore the hardware register state */ |
915 | own_fpu(1); /* Using the FPU again. */ |
916 | } else { |
917 | sig = SIGFPE; |
918 | fault_addr = (void __user *) regs->cp0_epc; |
919 | } |
920 | |
921 | /* Send a signal if required. */ |
922 | process_fpemu_return(sig, fault_addr, fcr31); |
923 | |
924 | out: |
925 | exception_exit(prev_state); |
926 | } |
927 | |
928 | /* |
929 | * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've |
930 | * emulated more than some threshold number of instructions, force migration to |
931 | * a "CPU" that has FP support. |
932 | */ |
933 | static void mt_ase_fp_affinity(void) |
934 | { |
935 | #ifdef CONFIG_MIPS_MT_FPAFF |
936 | if (mt_fpemul_threshold > 0 && |
937 | ((current->thread.emulated_fp++ > mt_fpemul_threshold))) { |
938 | /* |
939 | * If there's no FPU present, or if the application has already |
940 | * restricted the allowed set to exclude any CPUs with FPUs, |
941 | * we'll skip the procedure. |
942 | */ |
943 | if (cpumask_intersects(¤t->cpus_mask, &mt_fpu_cpumask)) { |
944 | cpumask_t tmask; |
945 | |
946 | current->thread.user_cpus_allowed |
947 | = current->cpus_mask; |
948 | cpumask_and(&tmask, ¤t->cpus_mask, |
949 | &mt_fpu_cpumask); |
950 | set_cpus_allowed_ptr(current, &tmask); |
951 | set_thread_flag(TIF_FPUBOUND); |
952 | } |
953 | } |
954 | #endif /* CONFIG_MIPS_MT_FPAFF */ |
955 | } |
956 | |
957 | #else /* !CONFIG_MIPS_FP_SUPPORT */ |
958 | |
959 | static int simulate_fp(struct pt_regs *regs, unsigned int opcode, |
960 | unsigned long old_epc, unsigned long old_ra) |
961 | { |
962 | return -1; |
963 | } |
964 | |
965 | #endif /* !CONFIG_MIPS_FP_SUPPORT */ |
966 | |
967 | void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code, |
968 | const char *str) |
969 | { |
970 | char b[40]; |
971 | |
972 | #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP |
973 | if (kgdb_ll_trap(cmd: DIE_TRAP, str, regs, err: code, current->thread.trap_nr, |
974 | SIGTRAP) == NOTIFY_STOP) |
975 | return; |
976 | #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ |
977 | |
978 | if (notify_die(val: DIE_TRAP, str, regs, err: code, current->thread.trap_nr, |
979 | SIGTRAP) == NOTIFY_STOP) |
980 | return; |
981 | |
982 | /* |
983 | * A short test says that IRIX 5.3 sends SIGTRAP for all trap |
984 | * insns, even for trap and break codes that indicate arithmetic |
985 | * failures. Weird ... |
986 | * But should we continue the brokenness??? --macro |
987 | */ |
988 | switch (code) { |
989 | case BRK_OVERFLOW: |
990 | case BRK_DIVZERO: |
991 | scnprintf(buf: b, size: sizeof(b), fmt: "%s instruction in kernel code" , str); |
992 | die_if_kernel(b, regs); |
993 | force_sig_fault(SIGFPE, |
994 | code == BRK_DIVZERO ? FPE_INTDIV : FPE_INTOVF, |
995 | (void __user *) regs->cp0_epc); |
996 | break; |
997 | case BRK_BUG: |
998 | die_if_kernel("Kernel bug detected" , regs); |
999 | force_sig(SIGTRAP); |
1000 | break; |
1001 | case BRK_MEMU: |
1002 | /* |
1003 | * This breakpoint code is used by the FPU emulator to retake |
1004 | * control of the CPU after executing the instruction from the |
1005 | * delay slot of an emulated branch. |
1006 | * |
1007 | * Terminate if exception was recognized as a delay slot return |
1008 | * otherwise handle as normal. |
1009 | */ |
1010 | if (do_dsemulret(regs)) |
1011 | return; |
1012 | |
1013 | die_if_kernel("Math emu break/trap" , regs); |
1014 | force_sig(SIGTRAP); |
1015 | break; |
1016 | default: |
1017 | scnprintf(buf: b, size: sizeof(b), fmt: "%s instruction in kernel code" , str); |
1018 | die_if_kernel(b, regs); |
1019 | if (si_code) { |
1020 | force_sig_fault(SIGTRAP, code: si_code, NULL); |
1021 | } else { |
1022 | force_sig(SIGTRAP); |
1023 | } |
1024 | } |
1025 | } |
1026 | |
1027 | asmlinkage void do_bp(struct pt_regs *regs) |
1028 | { |
1029 | unsigned long epc = msk_isa16_mode(exception_epc(regs)); |
1030 | unsigned int opcode, bcode; |
1031 | enum ctx_state prev_state; |
1032 | bool user = user_mode(regs); |
1033 | |
1034 | prev_state = exception_enter(); |
1035 | current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; |
1036 | if (get_isa16_mode(regs->cp0_epc)) { |
1037 | u16 instr[2]; |
1038 | |
1039 | if (__get_inst16(i: &instr[0], p: (u16 *)epc, user)) |
1040 | goto out_sigsegv; |
1041 | |
1042 | if (!cpu_has_mmips) { |
1043 | /* MIPS16e mode */ |
1044 | bcode = (instr[0] >> 5) & 0x3f; |
1045 | } else if (mm_insn_16bit(instr[0])) { |
1046 | /* 16-bit microMIPS BREAK */ |
1047 | bcode = instr[0] & 0xf; |
1048 | } else { |
1049 | /* 32-bit microMIPS BREAK */ |
1050 | if (__get_inst16(i: &instr[1], p: (u16 *)(epc + 2), user)) |
1051 | goto out_sigsegv; |
1052 | opcode = (instr[0] << 16) | instr[1]; |
1053 | bcode = (opcode >> 6) & ((1 << 20) - 1); |
1054 | } |
1055 | } else { |
1056 | if (__get_inst32(i: &opcode, p: (u32 *)epc, user)) |
1057 | goto out_sigsegv; |
1058 | bcode = (opcode >> 6) & ((1 << 20) - 1); |
1059 | } |
1060 | |
1061 | /* |
1062 | * There is the ancient bug in the MIPS assemblers that the break |
1063 | * code starts left to bit 16 instead to bit 6 in the opcode. |
1064 | * Gas is bug-compatible, but not always, grrr... |
1065 | * We handle both cases with a simple heuristics. --macro |
1066 | */ |
1067 | if (bcode >= (1 << 10)) |
1068 | bcode = ((bcode & ((1 << 10) - 1)) << 10) | (bcode >> 10); |
1069 | |
1070 | /* |
1071 | * notify the kprobe handlers, if instruction is likely to |
1072 | * pertain to them. |
1073 | */ |
1074 | switch (bcode) { |
1075 | case BRK_UPROBE: |
1076 | if (notify_die(DIE_UPROBE, "uprobe" , regs, bcode, |
1077 | current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) |
1078 | goto out; |
1079 | else |
1080 | break; |
1081 | case BRK_UPROBE_XOL: |
1082 | if (notify_die(DIE_UPROBE_XOL, "uprobe_xol" , regs, bcode, |
1083 | current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) |
1084 | goto out; |
1085 | else |
1086 | break; |
1087 | case BRK_KPROBE_BP: |
1088 | if (notify_die(DIE_BREAK, "debug" , regs, bcode, |
1089 | current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) |
1090 | goto out; |
1091 | else |
1092 | break; |
1093 | case BRK_KPROBE_SSTEPBP: |
1094 | if (notify_die(DIE_SSTEPBP, "single_step" , regs, bcode, |
1095 | current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) |
1096 | goto out; |
1097 | else |
1098 | break; |
1099 | default: |
1100 | break; |
1101 | } |
1102 | |
1103 | do_trap_or_bp(regs, code: bcode, TRAP_BRKPT, str: "Break" ); |
1104 | |
1105 | out: |
1106 | exception_exit(prev_ctx: prev_state); |
1107 | return; |
1108 | |
1109 | out_sigsegv: |
1110 | force_sig(SIGSEGV); |
1111 | goto out; |
1112 | } |
1113 | |
1114 | asmlinkage void do_tr(struct pt_regs *regs) |
1115 | { |
1116 | u32 opcode, tcode = 0; |
1117 | enum ctx_state prev_state; |
1118 | u16 instr[2]; |
1119 | bool user = user_mode(regs); |
1120 | unsigned long epc = msk_isa16_mode(exception_epc(regs)); |
1121 | |
1122 | prev_state = exception_enter(); |
1123 | current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; |
1124 | if (get_isa16_mode(regs->cp0_epc)) { |
1125 | if (__get_inst16(i: &instr[0], p: (u16 *)(epc + 0), user) || |
1126 | __get_inst16(i: &instr[1], p: (u16 *)(epc + 2), user)) |
1127 | goto out_sigsegv; |
1128 | opcode = (instr[0] << 16) | instr[1]; |
1129 | /* Immediate versions don't provide a code. */ |
1130 | if (!(opcode & OPCODE)) |
1131 | tcode = (opcode >> 12) & ((1 << 4) - 1); |
1132 | } else { |
1133 | if (__get_inst32(i: &opcode, p: (u32 *)epc, user)) |
1134 | goto out_sigsegv; |
1135 | /* Immediate versions don't provide a code. */ |
1136 | if (!(opcode & OPCODE)) |
1137 | tcode = (opcode >> 6) & ((1 << 10) - 1); |
1138 | } |
1139 | |
1140 | do_trap_or_bp(regs, code: tcode, si_code: 0, str: "Trap" ); |
1141 | |
1142 | out: |
1143 | exception_exit(prev_ctx: prev_state); |
1144 | return; |
1145 | |
1146 | out_sigsegv: |
1147 | force_sig(SIGSEGV); |
1148 | goto out; |
1149 | } |
1150 | |
1151 | asmlinkage void do_ri(struct pt_regs *regs) |
1152 | { |
1153 | unsigned int __user *epc = (unsigned int __user *)exception_epc(regs); |
1154 | unsigned long old_epc = regs->cp0_epc; |
1155 | unsigned long old31 = regs->regs[31]; |
1156 | enum ctx_state prev_state; |
1157 | unsigned int opcode = 0; |
1158 | int status = -1; |
1159 | |
1160 | /* |
1161 | * Avoid any kernel code. Just emulate the R2 instruction |
1162 | * as quickly as possible. |
1163 | */ |
1164 | if (mipsr2_emulation && cpu_has_mips_r6 && |
1165 | likely(user_mode(regs)) && |
1166 | likely(get_user(opcode, epc) >= 0)) { |
1167 | unsigned long fcr31 = 0; |
1168 | |
1169 | status = mipsr2_decoder(regs, opcode, &fcr31); |
1170 | switch (status) { |
1171 | case 0: |
1172 | case SIGEMT: |
1173 | return; |
1174 | case SIGILL: |
1175 | goto no_r2_instr; |
1176 | default: |
1177 | process_fpemu_return(status, |
1178 | ¤t->thread.cp0_baduaddr, |
1179 | fcr31); |
1180 | return; |
1181 | } |
1182 | } |
1183 | |
1184 | no_r2_instr: |
1185 | |
1186 | prev_state = exception_enter(); |
1187 | current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; |
1188 | |
1189 | if (notify_die(DIE_RI, "RI Fault" , regs, 0, current->thread.trap_nr, |
1190 | SIGILL) == NOTIFY_STOP) |
1191 | goto out; |
1192 | |
1193 | die_if_kernel("Reserved instruction in kernel code" , regs); |
1194 | |
1195 | if (unlikely(compute_return_epc(regs) < 0)) |
1196 | goto out; |
1197 | |
1198 | if (!get_isa16_mode(regs->cp0_epc)) { |
1199 | if (unlikely(get_user(opcode, epc) < 0)) |
1200 | status = SIGSEGV; |
1201 | |
1202 | if (!cpu_has_llsc && status < 0) |
1203 | status = simulate_llsc(regs, opcode); |
1204 | |
1205 | if (status < 0) |
1206 | status = simulate_rdhwr_normal(regs, opcode); |
1207 | |
1208 | if (status < 0) |
1209 | status = simulate_sync(regs, opcode); |
1210 | |
1211 | if (status < 0) |
1212 | status = simulate_fp(regs, opcode, old_epc, old_ra: old31); |
1213 | |
1214 | #ifdef CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION |
1215 | if (status < 0) |
1216 | status = simulate_loongson3_cpucfg(regs, opcode); |
1217 | #endif |
1218 | } else if (cpu_has_mmips) { |
1219 | unsigned short mmop[2] = { 0 }; |
1220 | |
1221 | if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0)) |
1222 | status = SIGSEGV; |
1223 | if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0)) |
1224 | status = SIGSEGV; |
1225 | opcode = mmop[0]; |
1226 | opcode = (opcode << 16) | mmop[1]; |
1227 | |
1228 | if (status < 0) |
1229 | status = simulate_rdhwr_mm(regs, opcode); |
1230 | } |
1231 | |
1232 | if (status < 0) |
1233 | status = SIGILL; |
1234 | |
1235 | if (unlikely(status > 0)) { |
1236 | regs->cp0_epc = old_epc; /* Undo skip-over. */ |
1237 | regs->regs[31] = old31; |
1238 | force_sig(status); |
1239 | } |
1240 | |
1241 | out: |
1242 | exception_exit(prev_ctx: prev_state); |
1243 | } |
1244 | |
1245 | /* |
1246 | * No lock; only written during early bootup by CPU 0. |
1247 | */ |
1248 | static RAW_NOTIFIER_HEAD(cu2_chain); |
1249 | |
1250 | int __ref register_cu2_notifier(struct notifier_block *nb) |
1251 | { |
1252 | return raw_notifier_chain_register(nh: &cu2_chain, nb); |
1253 | } |
1254 | |
1255 | int cu2_notifier_call_chain(unsigned long val, void *v) |
1256 | { |
1257 | return raw_notifier_call_chain(nh: &cu2_chain, val, v); |
1258 | } |
1259 | |
1260 | static int default_cu2_call(struct notifier_block *nfb, unsigned long action, |
1261 | void *data) |
1262 | { |
1263 | struct pt_regs *regs = data; |
1264 | |
1265 | die_if_kernel("COP2: Unhandled kernel unaligned access or invalid " |
1266 | "instruction" , regs); |
1267 | force_sig(SIGILL); |
1268 | |
1269 | return NOTIFY_OK; |
1270 | } |
1271 | |
1272 | #ifdef CONFIG_MIPS_FP_SUPPORT |
1273 | |
1274 | static int enable_restore_fp_context(int msa) |
1275 | { |
1276 | int err, was_fpu_owner, prior_msa; |
1277 | bool first_fp; |
1278 | |
1279 | /* Initialize context if it hasn't been used already */ |
1280 | first_fp = init_fp_ctx(current); |
1281 | |
1282 | if (first_fp) { |
1283 | preempt_disable(); |
1284 | err = own_fpu_inatomic(1); |
1285 | if (msa && !err) { |
1286 | enable_msa(); |
1287 | /* |
1288 | * with MSA enabled, userspace can see MSACSR |
1289 | * and MSA regs, but the values in them are from |
1290 | * other task before current task, restore them |
1291 | * from saved fp/msa context |
1292 | */ |
1293 | write_msa_csr(current->thread.fpu.msacsr); |
1294 | /* |
1295 | * own_fpu_inatomic(1) just restore low 64bit, |
1296 | * fix the high 64bit |
1297 | */ |
1298 | init_msa_upper(); |
1299 | set_thread_flag(TIF_USEDMSA); |
1300 | set_thread_flag(TIF_MSA_CTX_LIVE); |
1301 | } |
1302 | preempt_enable(); |
1303 | return err; |
1304 | } |
1305 | |
1306 | /* |
1307 | * This task has formerly used the FP context. |
1308 | * |
1309 | * If this thread has no live MSA vector context then we can simply |
1310 | * restore the scalar FP context. If it has live MSA vector context |
1311 | * (that is, it has or may have used MSA since last performing a |
1312 | * function call) then we'll need to restore the vector context. This |
1313 | * applies even if we're currently only executing a scalar FP |
1314 | * instruction. This is because if we were to later execute an MSA |
1315 | * instruction then we'd either have to: |
1316 | * |
1317 | * - Restore the vector context & clobber any registers modified by |
1318 | * scalar FP instructions between now & then. |
1319 | * |
1320 | * or |
1321 | * |
1322 | * - Not restore the vector context & lose the most significant bits |
1323 | * of all vector registers. |
1324 | * |
1325 | * Neither of those options is acceptable. We cannot restore the least |
1326 | * significant bits of the registers now & only restore the most |
1327 | * significant bits later because the most significant bits of any |
1328 | * vector registers whose aliased FP register is modified now will have |
1329 | * been zeroed. We'd have no way to know that when restoring the vector |
1330 | * context & thus may load an outdated value for the most significant |
1331 | * bits of a vector register. |
1332 | */ |
1333 | if (!msa && !thread_msa_context_live()) |
1334 | return own_fpu(1); |
1335 | |
1336 | /* |
1337 | * This task is using or has previously used MSA. Thus we require |
1338 | * that Status.FR == 1. |
1339 | */ |
1340 | preempt_disable(); |
1341 | was_fpu_owner = is_fpu_owner(); |
1342 | err = own_fpu_inatomic(0); |
1343 | if (err) |
1344 | goto out; |
1345 | |
1346 | enable_msa(); |
1347 | write_msa_csr(current->thread.fpu.msacsr); |
1348 | set_thread_flag(TIF_USEDMSA); |
1349 | |
1350 | /* |
1351 | * If this is the first time that the task is using MSA and it has |
1352 | * previously used scalar FP in this time slice then we already nave |
1353 | * FP context which we shouldn't clobber. We do however need to clear |
1354 | * the upper 64b of each vector register so that this task has no |
1355 | * opportunity to see data left behind by another. |
1356 | */ |
1357 | prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE); |
1358 | if (!prior_msa && was_fpu_owner) { |
1359 | init_msa_upper(); |
1360 | |
1361 | goto out; |
1362 | } |
1363 | |
1364 | if (!prior_msa) { |
1365 | /* |
1366 | * Restore the least significant 64b of each vector register |
1367 | * from the existing scalar FP context. |
1368 | */ |
1369 | _restore_fp(current); |
1370 | |
1371 | /* |
1372 | * The task has not formerly used MSA, so clear the upper 64b |
1373 | * of each vector register such that it cannot see data left |
1374 | * behind by another task. |
1375 | */ |
1376 | init_msa_upper(); |
1377 | } else { |
1378 | /* We need to restore the vector context. */ |
1379 | restore_msa(current); |
1380 | |
1381 | /* Restore the scalar FP control & status register */ |
1382 | if (!was_fpu_owner) |
1383 | write_32bit_cp1_register(CP1_STATUS, |
1384 | current->thread.fpu.fcr31); |
1385 | } |
1386 | |
1387 | out: |
1388 | preempt_enable(); |
1389 | |
1390 | return 0; |
1391 | } |
1392 | |
1393 | #else /* !CONFIG_MIPS_FP_SUPPORT */ |
1394 | |
1395 | static int enable_restore_fp_context(int msa) |
1396 | { |
1397 | return SIGILL; |
1398 | } |
1399 | |
1400 | #endif /* CONFIG_MIPS_FP_SUPPORT */ |
1401 | |
1402 | asmlinkage void do_cpu(struct pt_regs *regs) |
1403 | { |
1404 | enum ctx_state prev_state; |
1405 | unsigned int __user *epc; |
1406 | unsigned long old_epc, old31; |
1407 | unsigned int opcode; |
1408 | unsigned int cpid; |
1409 | int status; |
1410 | |
1411 | prev_state = exception_enter(); |
1412 | cpid = (regs->cp0_cause >> CAUSEB_CE) & 3; |
1413 | |
1414 | if (cpid != 2) |
1415 | die_if_kernel("do_cpu invoked from kernel context!" , regs); |
1416 | |
1417 | switch (cpid) { |
1418 | case 0: |
1419 | epc = (unsigned int __user *)exception_epc(regs); |
1420 | old_epc = regs->cp0_epc; |
1421 | old31 = regs->regs[31]; |
1422 | opcode = 0; |
1423 | status = -1; |
1424 | |
1425 | if (unlikely(compute_return_epc(regs) < 0)) |
1426 | break; |
1427 | |
1428 | if (!get_isa16_mode(regs->cp0_epc)) { |
1429 | if (unlikely(get_user(opcode, epc) < 0)) |
1430 | status = SIGSEGV; |
1431 | |
1432 | if (!cpu_has_llsc && status < 0) |
1433 | status = simulate_llsc(regs, opcode); |
1434 | } |
1435 | |
1436 | if (status < 0) |
1437 | status = SIGILL; |
1438 | |
1439 | if (unlikely(status > 0)) { |
1440 | regs->cp0_epc = old_epc; /* Undo skip-over. */ |
1441 | regs->regs[31] = old31; |
1442 | force_sig(status); |
1443 | } |
1444 | |
1445 | break; |
1446 | |
1447 | #ifdef CONFIG_MIPS_FP_SUPPORT |
1448 | case 3: |
1449 | /* |
1450 | * The COP3 opcode space and consequently the CP0.Status.CU3 |
1451 | * bit and the CP0.Cause.CE=3 encoding have been removed as |
1452 | * of the MIPS III ISA. From the MIPS IV and MIPS32r2 ISAs |
1453 | * up the space has been reused for COP1X instructions, that |
1454 | * are enabled by the CP0.Status.CU1 bit and consequently |
1455 | * use the CP0.Cause.CE=1 encoding for Coprocessor Unusable |
1456 | * exceptions. Some FPU-less processors that implement one |
1457 | * of these ISAs however use this code erroneously for COP1X |
1458 | * instructions. Therefore we redirect this trap to the FP |
1459 | * emulator too. |
1460 | */ |
1461 | if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) { |
1462 | force_sig(SIGILL); |
1463 | break; |
1464 | } |
1465 | fallthrough; |
1466 | case 1: { |
1467 | void __user *fault_addr; |
1468 | unsigned long fcr31; |
1469 | int err, sig; |
1470 | |
1471 | err = enable_restore_fp_context(0); |
1472 | |
1473 | if (raw_cpu_has_fpu && !err) |
1474 | break; |
1475 | |
1476 | sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0, |
1477 | &fault_addr); |
1478 | |
1479 | /* |
1480 | * We can't allow the emulated instruction to leave |
1481 | * any enabled Cause bits set in $fcr31. |
1482 | */ |
1483 | fcr31 = mask_fcr31_x(current->thread.fpu.fcr31); |
1484 | current->thread.fpu.fcr31 &= ~fcr31; |
1485 | |
1486 | /* Send a signal if required. */ |
1487 | if (!process_fpemu_return(sig, fault_addr, fcr31) && !err) |
1488 | mt_ase_fp_affinity(); |
1489 | |
1490 | break; |
1491 | } |
1492 | #else /* CONFIG_MIPS_FP_SUPPORT */ |
1493 | case 1: |
1494 | case 3: |
1495 | force_sig(SIGILL); |
1496 | break; |
1497 | #endif /* CONFIG_MIPS_FP_SUPPORT */ |
1498 | |
1499 | case 2: |
1500 | raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs); |
1501 | break; |
1502 | } |
1503 | |
1504 | exception_exit(prev_ctx: prev_state); |
1505 | } |
1506 | |
1507 | asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr) |
1508 | { |
1509 | enum ctx_state prev_state; |
1510 | |
1511 | prev_state = exception_enter(); |
1512 | current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; |
1513 | if (notify_die(DIE_MSAFP, "MSA FP exception" , regs, 0, |
1514 | current->thread.trap_nr, SIGFPE) == NOTIFY_STOP) |
1515 | goto out; |
1516 | |
1517 | /* Clear MSACSR.Cause before enabling interrupts */ |
1518 | write_msa_csr(msacsr & ~MSA_CSR_CAUSEF); |
1519 | local_irq_enable(); |
1520 | |
1521 | die_if_kernel("do_msa_fpe invoked from kernel context!" , regs); |
1522 | force_sig(SIGFPE); |
1523 | out: |
1524 | exception_exit(prev_ctx: prev_state); |
1525 | } |
1526 | |
1527 | asmlinkage void do_msa(struct pt_regs *regs) |
1528 | { |
1529 | enum ctx_state prev_state; |
1530 | int err; |
1531 | |
1532 | prev_state = exception_enter(); |
1533 | |
1534 | if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) { |
1535 | force_sig(SIGILL); |
1536 | goto out; |
1537 | } |
1538 | |
1539 | die_if_kernel("do_msa invoked from kernel context!" , regs); |
1540 | |
1541 | err = enable_restore_fp_context(msa: 1); |
1542 | if (err) |
1543 | force_sig(SIGILL); |
1544 | out: |
1545 | exception_exit(prev_ctx: prev_state); |
1546 | } |
1547 | |
1548 | asmlinkage void do_mdmx(struct pt_regs *regs) |
1549 | { |
1550 | enum ctx_state prev_state; |
1551 | |
1552 | prev_state = exception_enter(); |
1553 | force_sig(SIGILL); |
1554 | exception_exit(prev_ctx: prev_state); |
1555 | } |
1556 | |
1557 | /* |
1558 | * Called with interrupts disabled. |
1559 | */ |
1560 | asmlinkage void do_watch(struct pt_regs *regs) |
1561 | { |
1562 | enum ctx_state prev_state; |
1563 | |
1564 | prev_state = exception_enter(); |
1565 | /* |
1566 | * Clear WP (bit 22) bit of cause register so we don't loop |
1567 | * forever. |
1568 | */ |
1569 | clear_c0_cause(CAUSEF_WP); |
1570 | |
1571 | /* |
1572 | * If the current thread has the watch registers loaded, save |
1573 | * their values and send SIGTRAP. Otherwise another thread |
1574 | * left the registers set, clear them and continue. |
1575 | */ |
1576 | if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) { |
1577 | mips_read_watch_registers(); |
1578 | local_irq_enable(); |
1579 | force_sig_fault(SIGTRAP, TRAP_HWBKPT, NULL); |
1580 | } else { |
1581 | mips_clear_watch_registers(); |
1582 | local_irq_enable(); |
1583 | } |
1584 | exception_exit(prev_ctx: prev_state); |
1585 | } |
1586 | |
1587 | asmlinkage void do_mcheck(struct pt_regs *regs) |
1588 | { |
1589 | int multi_match = regs->cp0_status & ST0_TS; |
1590 | enum ctx_state prev_state; |
1591 | |
1592 | prev_state = exception_enter(); |
1593 | show_regs(regs); |
1594 | |
1595 | if (multi_match) { |
1596 | dump_tlb_regs(); |
1597 | pr_info("\n" ); |
1598 | dump_tlb_all(); |
1599 | } |
1600 | |
1601 | show_code(pc: (void *)regs->cp0_epc, user: user_mode(regs)); |
1602 | |
1603 | /* |
1604 | * Some chips may have other causes of machine check (e.g. SB1 |
1605 | * graduation timer) |
1606 | */ |
1607 | panic(fmt: "Caught Machine Check exception - %scaused by multiple " |
1608 | "matching entries in the TLB." , |
1609 | (multi_match) ? "" : "not " ); |
1610 | } |
1611 | |
1612 | asmlinkage void do_mt(struct pt_regs *regs) |
1613 | { |
1614 | int subcode; |
1615 | |
1616 | subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT) |
1617 | >> VPECONTROL_EXCPT_SHIFT; |
1618 | switch (subcode) { |
1619 | case 0: |
1620 | printk(KERN_DEBUG "Thread Underflow\n" ); |
1621 | break; |
1622 | case 1: |
1623 | printk(KERN_DEBUG "Thread Overflow\n" ); |
1624 | break; |
1625 | case 2: |
1626 | printk(KERN_DEBUG "Invalid YIELD Qualifier\n" ); |
1627 | break; |
1628 | case 3: |
1629 | printk(KERN_DEBUG "Gating Storage Exception\n" ); |
1630 | break; |
1631 | case 4: |
1632 | printk(KERN_DEBUG "YIELD Scheduler Exception\n" ); |
1633 | break; |
1634 | case 5: |
1635 | printk(KERN_DEBUG "Gating Storage Scheduler Exception\n" ); |
1636 | break; |
1637 | default: |
1638 | printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n" , |
1639 | subcode); |
1640 | break; |
1641 | } |
1642 | die_if_kernel("MIPS MT Thread exception in kernel" , regs); |
1643 | |
1644 | force_sig(SIGILL); |
1645 | } |
1646 | |
1647 | |
1648 | asmlinkage void do_dsp(struct pt_regs *regs) |
1649 | { |
1650 | if (cpu_has_dsp) |
1651 | panic(fmt: "Unexpected DSP exception" ); |
1652 | |
1653 | force_sig(SIGILL); |
1654 | } |
1655 | |
1656 | asmlinkage void do_reserved(struct pt_regs *regs) |
1657 | { |
1658 | /* |
1659 | * Game over - no way to handle this if it ever occurs. Most probably |
1660 | * caused by a new unknown cpu type or after another deadly |
1661 | * hard/software error. |
1662 | */ |
1663 | show_regs(regs); |
1664 | panic(fmt: "Caught reserved exception %ld - should not happen." , |
1665 | (regs->cp0_cause & 0x7f) >> 2); |
1666 | } |
1667 | |
1668 | static int __initdata l1parity = 1; |
1669 | static int __init nol1parity(char *s) |
1670 | { |
1671 | l1parity = 0; |
1672 | return 1; |
1673 | } |
1674 | __setup("nol1par" , nol1parity); |
1675 | static int __initdata l2parity = 1; |
1676 | static int __init nol2parity(char *s) |
1677 | { |
1678 | l2parity = 0; |
1679 | return 1; |
1680 | } |
1681 | __setup("nol2par" , nol2parity); |
1682 | |
1683 | /* |
1684 | * Some MIPS CPUs can enable/disable for cache parity detection, but do |
1685 | * it different ways. |
1686 | */ |
1687 | static inline __init void parity_protection_init(void) |
1688 | { |
1689 | #define ERRCTL_PE 0x80000000 |
1690 | #define ERRCTL_L2P 0x00800000 |
1691 | |
1692 | if (mips_cm_revision() >= CM_REV_CM3) { |
1693 | ulong gcr_ectl, cp0_ectl; |
1694 | |
1695 | /* |
1696 | * With CM3 systems we need to ensure that the L1 & L2 |
1697 | * parity enables are set to the same value, since this |
1698 | * is presumed by the hardware engineers. |
1699 | * |
1700 | * If the user disabled either of L1 or L2 ECC checking, |
1701 | * disable both. |
1702 | */ |
1703 | l1parity &= l2parity; |
1704 | l2parity &= l1parity; |
1705 | |
1706 | /* Probe L1 ECC support */ |
1707 | cp0_ectl = read_c0_ecc(); |
1708 | write_c0_ecc(cp0_ectl | ERRCTL_PE); |
1709 | back_to_back_c0_hazard(); |
1710 | cp0_ectl = read_c0_ecc(); |
1711 | |
1712 | /* Probe L2 ECC support */ |
1713 | gcr_ectl = read_gcr_err_control(); |
1714 | |
1715 | if (!(gcr_ectl & CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT) || |
1716 | !(cp0_ectl & ERRCTL_PE)) { |
1717 | /* |
1718 | * One of L1 or L2 ECC checking isn't supported, |
1719 | * so we cannot enable either. |
1720 | */ |
1721 | l1parity = l2parity = 0; |
1722 | } |
1723 | |
1724 | /* Configure L1 ECC checking */ |
1725 | if (l1parity) |
1726 | cp0_ectl |= ERRCTL_PE; |
1727 | else |
1728 | cp0_ectl &= ~ERRCTL_PE; |
1729 | write_c0_ecc(cp0_ectl); |
1730 | back_to_back_c0_hazard(); |
1731 | WARN_ON(!!(read_c0_ecc() & ERRCTL_PE) != l1parity); |
1732 | |
1733 | /* Configure L2 ECC checking */ |
1734 | if (l2parity) |
1735 | gcr_ectl |= CM_GCR_ERR_CONTROL_L2_ECC_EN; |
1736 | else |
1737 | gcr_ectl &= ~CM_GCR_ERR_CONTROL_L2_ECC_EN; |
1738 | write_gcr_err_control(gcr_ectl); |
1739 | gcr_ectl = read_gcr_err_control(); |
1740 | gcr_ectl &= CM_GCR_ERR_CONTROL_L2_ECC_EN; |
1741 | WARN_ON(!!gcr_ectl != l2parity); |
1742 | |
1743 | pr_info("Cache parity protection %sabled\n" , |
1744 | l1parity ? "en" : "dis" ); |
1745 | return; |
1746 | } |
1747 | |
1748 | switch (current_cpu_type()) { |
1749 | case CPU_24K: |
1750 | case CPU_34K: |
1751 | case CPU_74K: |
1752 | case CPU_1004K: |
1753 | case CPU_1074K: |
1754 | case CPU_INTERAPTIV: |
1755 | case CPU_PROAPTIV: |
1756 | case CPU_P5600: |
1757 | case CPU_QEMU_GENERIC: |
1758 | case CPU_P6600: |
1759 | { |
1760 | unsigned long errctl; |
1761 | unsigned int l1parity_present, l2parity_present; |
1762 | |
1763 | errctl = read_c0_ecc(); |
1764 | errctl &= ~(ERRCTL_PE|ERRCTL_L2P); |
1765 | |
1766 | /* probe L1 parity support */ |
1767 | write_c0_ecc(errctl | ERRCTL_PE); |
1768 | back_to_back_c0_hazard(); |
1769 | l1parity_present = (read_c0_ecc() & ERRCTL_PE); |
1770 | |
1771 | /* probe L2 parity support */ |
1772 | write_c0_ecc(errctl|ERRCTL_L2P); |
1773 | back_to_back_c0_hazard(); |
1774 | l2parity_present = (read_c0_ecc() & ERRCTL_L2P); |
1775 | |
1776 | if (l1parity_present && l2parity_present) { |
1777 | if (l1parity) |
1778 | errctl |= ERRCTL_PE; |
1779 | if (l1parity ^ l2parity) |
1780 | errctl |= ERRCTL_L2P; |
1781 | } else if (l1parity_present) { |
1782 | if (l1parity) |
1783 | errctl |= ERRCTL_PE; |
1784 | } else if (l2parity_present) { |
1785 | if (l2parity) |
1786 | errctl |= ERRCTL_L2P; |
1787 | } else { |
1788 | /* No parity available */ |
1789 | } |
1790 | |
1791 | printk(KERN_INFO "Writing ErrCtl register=%08lx\n" , errctl); |
1792 | |
1793 | write_c0_ecc(errctl); |
1794 | back_to_back_c0_hazard(); |
1795 | errctl = read_c0_ecc(); |
1796 | printk(KERN_INFO "Readback ErrCtl register=%08lx\n" , errctl); |
1797 | |
1798 | if (l1parity_present) |
1799 | printk(KERN_INFO "Cache parity protection %sabled\n" , |
1800 | (errctl & ERRCTL_PE) ? "en" : "dis" ); |
1801 | |
1802 | if (l2parity_present) { |
1803 | if (l1parity_present && l1parity) |
1804 | errctl ^= ERRCTL_L2P; |
1805 | printk(KERN_INFO "L2 cache parity protection %sabled\n" , |
1806 | (errctl & ERRCTL_L2P) ? "en" : "dis" ); |
1807 | } |
1808 | } |
1809 | break; |
1810 | |
1811 | case CPU_5KC: |
1812 | case CPU_5KE: |
1813 | case CPU_LOONGSON32: |
1814 | write_c0_ecc(0x80000000); |
1815 | back_to_back_c0_hazard(); |
1816 | /* Set the PE bit (bit 31) in the c0_errctl register. */ |
1817 | printk(KERN_INFO "Cache parity protection %sabled\n" , |
1818 | (read_c0_ecc() & 0x80000000) ? "en" : "dis" ); |
1819 | break; |
1820 | case CPU_20KC: |
1821 | case CPU_25KF: |
1822 | /* Clear the DE bit (bit 16) in the c0_status register. */ |
1823 | printk(KERN_INFO "Enable cache parity protection for " |
1824 | "MIPS 20KC/25KF CPUs.\n" ); |
1825 | clear_c0_status(ST0_DE); |
1826 | break; |
1827 | default: |
1828 | break; |
1829 | } |
1830 | } |
1831 | |
1832 | asmlinkage void cache_parity_error(void) |
1833 | { |
1834 | const int field = 2 * sizeof(unsigned long); |
1835 | unsigned int reg_val; |
1836 | |
1837 | /* For the moment, report the problem and hang. */ |
1838 | printk("Cache error exception:\n" ); |
1839 | printk("cp0_errorepc == %0*lx\n" , field, read_c0_errorepc()); |
1840 | reg_val = read_c0_cacheerr(); |
1841 | printk("c0_cacheerr == %08x\n" , reg_val); |
1842 | |
1843 | printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n" , |
1844 | reg_val & (1<<30) ? "secondary" : "primary" , |
1845 | reg_val & (1<<31) ? "data" : "insn" ); |
1846 | if ((cpu_has_mips_r2_r6) && |
1847 | ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) { |
1848 | pr_err("Error bits: %s%s%s%s%s%s%s%s\n" , |
1849 | reg_val & (1<<29) ? "ED " : "" , |
1850 | reg_val & (1<<28) ? "ET " : "" , |
1851 | reg_val & (1<<27) ? "ES " : "" , |
1852 | reg_val & (1<<26) ? "EE " : "" , |
1853 | reg_val & (1<<25) ? "EB " : "" , |
1854 | reg_val & (1<<24) ? "EI " : "" , |
1855 | reg_val & (1<<23) ? "E1 " : "" , |
1856 | reg_val & (1<<22) ? "E0 " : "" ); |
1857 | } else { |
1858 | pr_err("Error bits: %s%s%s%s%s%s%s\n" , |
1859 | reg_val & (1<<29) ? "ED " : "" , |
1860 | reg_val & (1<<28) ? "ET " : "" , |
1861 | reg_val & (1<<26) ? "EE " : "" , |
1862 | reg_val & (1<<25) ? "EB " : "" , |
1863 | reg_val & (1<<24) ? "EI " : "" , |
1864 | reg_val & (1<<23) ? "E1 " : "" , |
1865 | reg_val & (1<<22) ? "E0 " : "" ); |
1866 | } |
1867 | printk("IDX: 0x%08x\n" , reg_val & ((1<<22)-1)); |
1868 | |
1869 | #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) |
1870 | if (reg_val & (1<<22)) |
1871 | printk("DErrAddr0: 0x%0*lx\n" , field, read_c0_derraddr0()); |
1872 | |
1873 | if (reg_val & (1<<23)) |
1874 | printk("DErrAddr1: 0x%0*lx\n" , field, read_c0_derraddr1()); |
1875 | #endif |
1876 | |
1877 | panic(fmt: "Can't handle the cache error!" ); |
1878 | } |
1879 | |
1880 | asmlinkage void do_ftlb(void) |
1881 | { |
1882 | const int field = 2 * sizeof(unsigned long); |
1883 | unsigned int reg_val; |
1884 | |
1885 | /* For the moment, report the problem and hang. */ |
1886 | if ((cpu_has_mips_r2_r6) && |
1887 | (((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS) || |
1888 | ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_LOONGSON))) { |
1889 | pr_err("FTLB error exception, cp0_ecc=0x%08x:\n" , |
1890 | read_c0_ecc()); |
1891 | pr_err("cp0_errorepc == %0*lx\n" , field, read_c0_errorepc()); |
1892 | reg_val = read_c0_cacheerr(); |
1893 | pr_err("c0_cacheerr == %08x\n" , reg_val); |
1894 | |
1895 | if ((reg_val & 0xc0000000) == 0xc0000000) { |
1896 | pr_err("Decoded c0_cacheerr: FTLB parity error\n" ); |
1897 | } else { |
1898 | pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n" , |
1899 | reg_val & (1<<30) ? "secondary" : "primary" , |
1900 | reg_val & (1<<31) ? "data" : "insn" ); |
1901 | } |
1902 | } else { |
1903 | pr_err("FTLB error exception\n" ); |
1904 | } |
1905 | /* Just print the cacheerr bits for now */ |
1906 | cache_parity_error(); |
1907 | } |
1908 | |
1909 | asmlinkage void do_gsexc(struct pt_regs *regs, u32 diag1) |
1910 | { |
1911 | u32 exccode = (diag1 & LOONGSON_DIAG1_EXCCODE) >> |
1912 | LOONGSON_DIAG1_EXCCODE_SHIFT; |
1913 | enum ctx_state prev_state; |
1914 | |
1915 | prev_state = exception_enter(); |
1916 | |
1917 | switch (exccode) { |
1918 | case 0x08: |
1919 | /* Undocumented exception, will trigger on certain |
1920 | * also-undocumented instructions accessible from userspace. |
1921 | * Processor state is not otherwise corrupted, but currently |
1922 | * we don't know how to proceed. Maybe there is some |
1923 | * undocumented control flag to enable the instructions? |
1924 | */ |
1925 | force_sig(SIGILL); |
1926 | break; |
1927 | |
1928 | default: |
1929 | /* None of the other exceptions, documented or not, have |
1930 | * further details given; none are encountered in the wild |
1931 | * either. Panic in case some of them turn out to be fatal. |
1932 | */ |
1933 | show_regs(regs); |
1934 | panic(fmt: "Unhandled Loongson exception - GSCause = %08x" , diag1); |
1935 | } |
1936 | |
1937 | exception_exit(prev_ctx: prev_state); |
1938 | } |
1939 | |
1940 | /* |
1941 | * SDBBP EJTAG debug exception handler. |
1942 | * We skip the instruction and return to the next instruction. |
1943 | */ |
1944 | void ejtag_exception_handler(struct pt_regs *regs) |
1945 | { |
1946 | const int field = 2 * sizeof(unsigned long); |
1947 | unsigned long depc, old_epc, old_ra; |
1948 | unsigned int debug; |
1949 | |
1950 | printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n" ); |
1951 | depc = read_c0_depc(); |
1952 | debug = read_c0_debug(); |
1953 | printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n" , field, depc, debug); |
1954 | if (debug & 0x80000000) { |
1955 | /* |
1956 | * In branch delay slot. |
1957 | * We cheat a little bit here and use EPC to calculate the |
1958 | * debug return address (DEPC). EPC is restored after the |
1959 | * calculation. |
1960 | */ |
1961 | old_epc = regs->cp0_epc; |
1962 | old_ra = regs->regs[31]; |
1963 | regs->cp0_epc = depc; |
1964 | compute_return_epc(regs); |
1965 | depc = regs->cp0_epc; |
1966 | regs->cp0_epc = old_epc; |
1967 | regs->regs[31] = old_ra; |
1968 | } else |
1969 | depc += 4; |
1970 | write_c0_depc(depc); |
1971 | |
1972 | #if 0 |
1973 | printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n" ); |
1974 | write_c0_debug(debug | 0x100); |
1975 | #endif |
1976 | } |
1977 | |
1978 | /* |
1979 | * NMI exception handler. |
1980 | * No lock; only written during early bootup by CPU 0. |
1981 | */ |
1982 | static RAW_NOTIFIER_HEAD(nmi_chain); |
1983 | |
1984 | int register_nmi_notifier(struct notifier_block *nb) |
1985 | { |
1986 | return raw_notifier_chain_register(nh: &nmi_chain, nb); |
1987 | } |
1988 | |
1989 | void __noreturn nmi_exception_handler(struct pt_regs *regs) |
1990 | { |
1991 | char str[100]; |
1992 | |
1993 | nmi_enter(); |
1994 | raw_notifier_call_chain(nh: &nmi_chain, val: 0, v: regs); |
1995 | bust_spinlocks(yes: 1); |
1996 | snprintf(buf: str, size: 100, fmt: "CPU%d NMI taken, CP0_EPC=%lx\n" , |
1997 | smp_processor_id(), regs->cp0_epc); |
1998 | regs->cp0_epc = read_c0_errorepc(); |
1999 | die(str, regs); |
2000 | nmi_exit(); |
2001 | } |
2002 | |
2003 | unsigned long ebase; |
2004 | EXPORT_SYMBOL_GPL(ebase); |
2005 | unsigned long exception_handlers[32]; |
2006 | unsigned long vi_handlers[64]; |
2007 | |
2008 | void reserve_exception_space(phys_addr_t addr, unsigned long size) |
2009 | { |
2010 | memblock_reserve(base: addr, size); |
2011 | } |
2012 | |
2013 | void __init *set_except_vector(int n, void *addr) |
2014 | { |
2015 | unsigned long handler = (unsigned long) addr; |
2016 | unsigned long old_handler; |
2017 | |
2018 | #ifdef CONFIG_CPU_MICROMIPS |
2019 | /* |
2020 | * Only the TLB handlers are cache aligned with an even |
2021 | * address. All other handlers are on an odd address and |
2022 | * require no modification. Otherwise, MIPS32 mode will |
2023 | * be entered when handling any TLB exceptions. That |
2024 | * would be bad...since we must stay in microMIPS mode. |
2025 | */ |
2026 | if (!(handler & 0x1)) |
2027 | handler |= 1; |
2028 | #endif |
2029 | old_handler = xchg(&exception_handlers[n], handler); |
2030 | |
2031 | if (n == 0 && cpu_has_divec) { |
2032 | #ifdef CONFIG_CPU_MICROMIPS |
2033 | unsigned long jump_mask = ~((1 << 27) - 1); |
2034 | #else |
2035 | unsigned long jump_mask = ~((1 << 28) - 1); |
2036 | #endif |
2037 | u32 *buf = (u32 *)(ebase + 0x200); |
2038 | unsigned int k0 = 26; |
2039 | if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) { |
2040 | uasm_i_j(&buf, handler & ~jump_mask); |
2041 | uasm_i_nop(&buf); |
2042 | } else { |
2043 | UASM_i_LA(&buf, k0, handler); |
2044 | uasm_i_jr(&buf, k0); |
2045 | uasm_i_nop(&buf); |
2046 | } |
2047 | local_flush_icache_range(ebase + 0x200, (unsigned long)buf); |
2048 | } |
2049 | return (void *)old_handler; |
2050 | } |
2051 | |
2052 | static void do_default_vi(void) |
2053 | { |
2054 | show_regs(regs: get_irq_regs()); |
2055 | panic(fmt: "Caught unexpected vectored interrupt." ); |
2056 | } |
2057 | |
2058 | static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) |
2059 | { |
2060 | unsigned long handler; |
2061 | unsigned long old_handler = vi_handlers[n]; |
2062 | int = current_cpu_data.srsets; |
2063 | u16 *h; |
2064 | unsigned char *b; |
2065 | |
2066 | BUG_ON(!cpu_has_veic && !cpu_has_vint); |
2067 | |
2068 | if (addr == NULL) { |
2069 | handler = (unsigned long) do_default_vi; |
2070 | srs = 0; |
2071 | } else |
2072 | handler = (unsigned long) addr; |
2073 | vi_handlers[n] = handler; |
2074 | |
2075 | b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING); |
2076 | |
2077 | if (srs >= srssets) |
2078 | panic(fmt: "Shadow register set %d not supported" , srs); |
2079 | |
2080 | if (cpu_has_veic) { |
2081 | if (board_bind_eic_interrupt) |
2082 | board_bind_eic_interrupt(n, srs); |
2083 | } else if (cpu_has_vint) { |
2084 | /* SRSMap is only defined if shadow sets are implemented */ |
2085 | if (srssets > 1) |
2086 | change_c0_srsmap(0xf << n*4, srs << n*4); |
2087 | } |
2088 | |
2089 | if (srs == 0) { |
2090 | /* |
2091 | * If no shadow set is selected then use the default handler |
2092 | * that does normal register saving and standard interrupt exit |
2093 | */ |
2094 | extern const u8 except_vec_vi[], except_vec_vi_lui[]; |
2095 | extern const u8 except_vec_vi_ori[], except_vec_vi_end[]; |
2096 | extern const u8 rollback_except_vec_vi[]; |
2097 | const u8 *vec_start = using_rollback_handler() ? |
2098 | rollback_except_vec_vi : except_vec_vi; |
2099 | #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) |
2100 | const int lui_offset = except_vec_vi_lui - vec_start + 2; |
2101 | const int ori_offset = except_vec_vi_ori - vec_start + 2; |
2102 | #else |
2103 | const int lui_offset = except_vec_vi_lui - vec_start; |
2104 | const int ori_offset = except_vec_vi_ori - vec_start; |
2105 | #endif |
2106 | const int handler_len = except_vec_vi_end - vec_start; |
2107 | |
2108 | if (handler_len > VECTORSPACING) { |
2109 | /* |
2110 | * Sigh... panicing won't help as the console |
2111 | * is probably not configured :( |
2112 | */ |
2113 | panic(fmt: "VECTORSPACING too small" ); |
2114 | } |
2115 | |
2116 | set_handler(((unsigned long)b - ebase), vec_start, |
2117 | #ifdef CONFIG_CPU_MICROMIPS |
2118 | (handler_len - 1)); |
2119 | #else |
2120 | handler_len); |
2121 | #endif |
2122 | h = (u16 *)(b + lui_offset); |
2123 | *h = (handler >> 16) & 0xffff; |
2124 | h = (u16 *)(b + ori_offset); |
2125 | *h = (handler & 0xffff); |
2126 | local_flush_icache_range((unsigned long)b, |
2127 | (unsigned long)(b+handler_len)); |
2128 | } |
2129 | else { |
2130 | /* |
2131 | * In other cases jump directly to the interrupt handler. It |
2132 | * is the handler's responsibility to save registers if required |
2133 | * (eg hi/lo) and return from the exception using "eret". |
2134 | */ |
2135 | u32 insn; |
2136 | |
2137 | h = (u16 *)b; |
2138 | /* j handler */ |
2139 | #ifdef CONFIG_CPU_MICROMIPS |
2140 | insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1); |
2141 | #else |
2142 | insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2); |
2143 | #endif |
2144 | h[0] = (insn >> 16) & 0xffff; |
2145 | h[1] = insn & 0xffff; |
2146 | h[2] = 0; |
2147 | h[3] = 0; |
2148 | local_flush_icache_range((unsigned long)b, |
2149 | (unsigned long)(b+8)); |
2150 | } |
2151 | |
2152 | return (void *)old_handler; |
2153 | } |
2154 | |
2155 | void *set_vi_handler(int n, vi_handler_t addr) |
2156 | { |
2157 | return set_vi_srs_handler(n, addr, 0); |
2158 | } |
2159 | |
2160 | extern void tlb_init(void); |
2161 | |
2162 | /* |
2163 | * Timer interrupt |
2164 | */ |
2165 | int cp0_compare_irq; |
2166 | EXPORT_SYMBOL_GPL(cp0_compare_irq); |
2167 | int cp0_compare_irq_shift; |
2168 | |
2169 | /* |
2170 | * Performance counter IRQ or -1 if shared with timer |
2171 | */ |
2172 | int cp0_perfcount_irq; |
2173 | EXPORT_SYMBOL_GPL(cp0_perfcount_irq); |
2174 | |
2175 | /* |
2176 | * Fast debug channel IRQ or -1 if not present |
2177 | */ |
2178 | int cp0_fdc_irq; |
2179 | EXPORT_SYMBOL_GPL(cp0_fdc_irq); |
2180 | |
2181 | static int noulri; |
2182 | |
2183 | static int __init ulri_disable(char *s) |
2184 | { |
2185 | pr_info("Disabling ulri\n" ); |
2186 | noulri = 1; |
2187 | |
2188 | return 1; |
2189 | } |
2190 | __setup("noulri" , ulri_disable); |
2191 | |
2192 | /* configure STATUS register */ |
2193 | static void configure_status(void) |
2194 | { |
2195 | /* |
2196 | * Disable coprocessors and select 32-bit or 64-bit addressing |
2197 | * and the 16/32 or 32/32 FPR register model. Reset the BEV |
2198 | * flag that some firmware may have left set and the TS bit (for |
2199 | * IP27). Set XX for ISA IV code to work. |
2200 | */ |
2201 | unsigned int status_set = ST0_KERNEL_CUMASK; |
2202 | #ifdef CONFIG_64BIT |
2203 | status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX; |
2204 | #endif |
2205 | if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV) |
2206 | status_set |= ST0_XX; |
2207 | if (cpu_has_dsp) |
2208 | status_set |= ST0_MX; |
2209 | |
2210 | change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX, |
2211 | status_set); |
2212 | back_to_back_c0_hazard(); |
2213 | } |
2214 | |
2215 | unsigned int hwrena; |
2216 | EXPORT_SYMBOL_GPL(hwrena); |
2217 | |
2218 | /* configure HWRENA register */ |
2219 | static void configure_hwrena(void) |
2220 | { |
2221 | hwrena = cpu_hwrena_impl_bits; |
2222 | |
2223 | if (cpu_has_mips_r2_r6) |
2224 | hwrena |= MIPS_HWRENA_CPUNUM | |
2225 | MIPS_HWRENA_SYNCISTEP | |
2226 | MIPS_HWRENA_CC | |
2227 | MIPS_HWRENA_CCRES; |
2228 | |
2229 | if (!noulri && cpu_has_userlocal) |
2230 | hwrena |= MIPS_HWRENA_ULR; |
2231 | |
2232 | if (hwrena) |
2233 | write_c0_hwrena(hwrena); |
2234 | } |
2235 | |
2236 | static void configure_exception_vector(void) |
2237 | { |
2238 | if (cpu_has_mips_r2_r6) { |
2239 | unsigned long sr = set_c0_status(ST0_BEV); |
2240 | /* If available, use WG to set top bits of EBASE */ |
2241 | if (cpu_has_ebase_wg) { |
2242 | #ifdef CONFIG_64BIT |
2243 | write_c0_ebase_64(ebase | MIPS_EBASE_WG); |
2244 | #else |
2245 | write_c0_ebase(ebase | MIPS_EBASE_WG); |
2246 | #endif |
2247 | } |
2248 | write_c0_ebase(ebase); |
2249 | write_c0_status(sr); |
2250 | } |
2251 | if (cpu_has_veic || cpu_has_vint) { |
2252 | /* Setting vector spacing enables EI/VI mode */ |
2253 | change_c0_intctl(0x3e0, VECTORSPACING); |
2254 | } |
2255 | if (cpu_has_divec) { |
2256 | if (cpu_has_mipsmt) { |
2257 | unsigned int vpflags = dvpe(); |
2258 | set_c0_cause(CAUSEF_IV); |
2259 | evpe(vpflags); |
2260 | } else |
2261 | set_c0_cause(CAUSEF_IV); |
2262 | } |
2263 | } |
2264 | |
2265 | void per_cpu_trap_init(bool is_boot_cpu) |
2266 | { |
2267 | unsigned int cpu = smp_processor_id(); |
2268 | |
2269 | configure_status(); |
2270 | configure_hwrena(); |
2271 | |
2272 | configure_exception_vector(); |
2273 | |
2274 | /* |
2275 | * Before R2 both interrupt numbers were fixed to 7, so on R2 only: |
2276 | * |
2277 | * o read IntCtl.IPTI to determine the timer interrupt |
2278 | * o read IntCtl.IPPCI to determine the performance counter interrupt |
2279 | * o read IntCtl.IPFDC to determine the fast debug channel interrupt |
2280 | */ |
2281 | if (cpu_has_mips_r2_r6) { |
2282 | cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP; |
2283 | cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7; |
2284 | cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7; |
2285 | cp0_fdc_irq = (read_c0_intctl() >> INTCTLB_IPFDC) & 7; |
2286 | if (!cp0_fdc_irq) |
2287 | cp0_fdc_irq = -1; |
2288 | |
2289 | } else { |
2290 | cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ; |
2291 | cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ; |
2292 | cp0_perfcount_irq = -1; |
2293 | cp0_fdc_irq = -1; |
2294 | } |
2295 | |
2296 | if (cpu_has_mmid) |
2297 | cpu_data[cpu].asid_cache = 0; |
2298 | else if (!cpu_data[cpu].asid_cache) |
2299 | cpu_data[cpu].asid_cache = asid_first_version(cpu); |
2300 | |
2301 | mmgrab(mm: &init_mm); |
2302 | current->active_mm = &init_mm; |
2303 | BUG_ON(current->mm); |
2304 | enter_lazy_tlb(mm: &init_mm, current); |
2305 | |
2306 | /* Boot CPU's cache setup in setup_arch(). */ |
2307 | if (!is_boot_cpu) |
2308 | cpu_cache_init(); |
2309 | tlb_init(); |
2310 | TLBMISS_HANDLER_SETUP(); |
2311 | } |
2312 | |
2313 | /* Install CPU exception handler */ |
2314 | void set_handler(unsigned long offset, const void *addr, unsigned long size) |
2315 | { |
2316 | #ifdef CONFIG_CPU_MICROMIPS |
2317 | memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size); |
2318 | #else |
2319 | memcpy((void *)(ebase + offset), addr, size); |
2320 | #endif |
2321 | local_flush_icache_range(ebase + offset, ebase + offset + size); |
2322 | } |
2323 | |
2324 | static const char panic_null_cerr[] = |
2325 | "Trying to set NULL cache error exception handler\n" ; |
2326 | |
2327 | /* |
2328 | * Install uncached CPU exception handler. |
2329 | * This is suitable only for the cache error exception which is the only |
2330 | * exception handler that is being run uncached. |
2331 | */ |
2332 | void set_uncached_handler(unsigned long offset, void *addr, |
2333 | unsigned long size) |
2334 | { |
2335 | unsigned long uncached_ebase = CKSEG1ADDR(ebase); |
2336 | |
2337 | if (!addr) |
2338 | panic(fmt: panic_null_cerr); |
2339 | |
2340 | memcpy((void *)(uncached_ebase + offset), addr, size); |
2341 | } |
2342 | |
2343 | static int __initdata rdhwr_noopt; |
2344 | static int __init set_rdhwr_noopt(char *str) |
2345 | { |
2346 | rdhwr_noopt = 1; |
2347 | return 1; |
2348 | } |
2349 | |
2350 | __setup("rdhwr_noopt" , set_rdhwr_noopt); |
2351 | |
2352 | void __init trap_init(void) |
2353 | { |
2354 | extern char except_vec3_generic; |
2355 | extern char except_vec4; |
2356 | extern char except_vec3_r4000; |
2357 | unsigned long i, vec_size; |
2358 | phys_addr_t ebase_pa; |
2359 | |
2360 | check_wait(); |
2361 | |
2362 | if (!cpu_has_mips_r2_r6) { |
2363 | ebase = CAC_BASE; |
2364 | vec_size = 0x400; |
2365 | } else { |
2366 | if (cpu_has_veic || cpu_has_vint) |
2367 | vec_size = 0x200 + VECTORSPACING*64; |
2368 | else |
2369 | vec_size = PAGE_SIZE; |
2370 | |
2371 | ebase_pa = memblock_phys_alloc(size: vec_size, align: 1 << fls(x: vec_size)); |
2372 | if (!ebase_pa) |
2373 | panic(fmt: "%s: Failed to allocate %lu bytes align=0x%x\n" , |
2374 | __func__, vec_size, 1 << fls(x: vec_size)); |
2375 | |
2376 | /* |
2377 | * Try to ensure ebase resides in KSeg0 if possible. |
2378 | * |
2379 | * It shouldn't generally be in XKPhys on MIPS64 to avoid |
2380 | * hitting a poorly defined exception base for Cache Errors. |
2381 | * The allocation is likely to be in the low 512MB of physical, |
2382 | * in which case we should be able to convert to KSeg0. |
2383 | * |
2384 | * EVA is special though as it allows segments to be rearranged |
2385 | * and to become uncached during cache error handling. |
2386 | */ |
2387 | if (!IS_ENABLED(CONFIG_EVA) && !WARN_ON(ebase_pa >= 0x20000000)) |
2388 | ebase = CKSEG0ADDR(ebase_pa); |
2389 | else |
2390 | ebase = (unsigned long)phys_to_virt(address: ebase_pa); |
2391 | } |
2392 | |
2393 | if (cpu_has_mmips) { |
2394 | unsigned int config3 = read_c0_config3(); |
2395 | |
2396 | if (IS_ENABLED(CONFIG_CPU_MICROMIPS)) |
2397 | write_c0_config3(config3 | MIPS_CONF3_ISA_OE); |
2398 | else |
2399 | write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE); |
2400 | } |
2401 | |
2402 | if (board_ebase_setup) |
2403 | board_ebase_setup(); |
2404 | per_cpu_trap_init(is_boot_cpu: true); |
2405 | memblock_set_bottom_up(enable: false); |
2406 | |
2407 | /* |
2408 | * Copy the generic exception handlers to their final destination. |
2409 | * This will be overridden later as suitable for a particular |
2410 | * configuration. |
2411 | */ |
2412 | set_handler(0x180, &except_vec3_generic, 0x80); |
2413 | |
2414 | /* |
2415 | * Setup default vectors |
2416 | */ |
2417 | for (i = 0; i <= 31; i++) |
2418 | set_except_vector(n: i, addr: handle_reserved); |
2419 | |
2420 | /* |
2421 | * Copy the EJTAG debug exception vector handler code to it's final |
2422 | * destination. |
2423 | */ |
2424 | if (cpu_has_ejtag && board_ejtag_handler_setup) |
2425 | board_ejtag_handler_setup(); |
2426 | |
2427 | /* |
2428 | * Only some CPUs have the watch exceptions. |
2429 | */ |
2430 | if (cpu_has_watch) |
2431 | set_except_vector(EXCCODE_WATCH, handle_watch); |
2432 | |
2433 | /* |
2434 | * Initialise interrupt handlers |
2435 | */ |
2436 | if (cpu_has_veic || cpu_has_vint) { |
2437 | int nvec = cpu_has_veic ? 64 : 8; |
2438 | for (i = 0; i < nvec; i++) |
2439 | set_vi_handler(i, NULL); |
2440 | } |
2441 | else if (cpu_has_divec) |
2442 | set_handler(0x200, &except_vec4, 0x8); |
2443 | |
2444 | /* |
2445 | * Some CPUs can enable/disable for cache parity detection, but does |
2446 | * it different ways. |
2447 | */ |
2448 | parity_protection_init(); |
2449 | |
2450 | /* |
2451 | * The Data Bus Errors / Instruction Bus Errors are signaled |
2452 | * by external hardware. Therefore these two exceptions |
2453 | * may have board specific handlers. |
2454 | */ |
2455 | if (board_be_init) |
2456 | board_be_init(); |
2457 | |
2458 | set_except_vector(EXCCODE_INT, using_rollback_handler() ? |
2459 | rollback_handle_int : handle_int); |
2460 | set_except_vector(EXCCODE_MOD, handle_tlbm); |
2461 | set_except_vector(EXCCODE_TLBL, handle_tlbl); |
2462 | set_except_vector(EXCCODE_TLBS, handle_tlbs); |
2463 | |
2464 | set_except_vector(EXCCODE_ADEL, handle_adel); |
2465 | set_except_vector(EXCCODE_ADES, handle_ades); |
2466 | |
2467 | set_except_vector(EXCCODE_IBE, handle_ibe); |
2468 | set_except_vector(EXCCODE_DBE, handle_dbe); |
2469 | |
2470 | set_except_vector(EXCCODE_SYS, handle_sys); |
2471 | set_except_vector(EXCCODE_BP, handle_bp); |
2472 | |
2473 | if (rdhwr_noopt) |
2474 | set_except_vector(EXCCODE_RI, handle_ri); |
2475 | else { |
2476 | if (cpu_has_vtag_icache) |
2477 | set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp); |
2478 | else if (current_cpu_type() == CPU_LOONGSON64) |
2479 | set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp); |
2480 | else |
2481 | set_except_vector(EXCCODE_RI, handle_ri_rdhwr); |
2482 | } |
2483 | |
2484 | set_except_vector(EXCCODE_CPU, handle_cpu); |
2485 | set_except_vector(EXCCODE_OV, handle_ov); |
2486 | set_except_vector(EXCCODE_TR, handle_tr); |
2487 | set_except_vector(EXCCODE_MSAFPE, handle_msa_fpe); |
2488 | |
2489 | if (board_nmi_handler_setup) |
2490 | board_nmi_handler_setup(); |
2491 | |
2492 | if (cpu_has_fpu && !cpu_has_nofpuex) |
2493 | set_except_vector(EXCCODE_FPE, handle_fpe); |
2494 | |
2495 | if (cpu_has_ftlbparex) |
2496 | set_except_vector(MIPS_EXCCODE_TLBPAR, handle_ftlb); |
2497 | |
2498 | if (cpu_has_gsexcex) |
2499 | set_except_vector(LOONGSON_EXCCODE_GSEXC, handle_gsexc); |
2500 | |
2501 | if (cpu_has_rixiex) { |
2502 | set_except_vector(EXCCODE_TLBRI, tlb_do_page_fault_0); |
2503 | set_except_vector(EXCCODE_TLBXI, tlb_do_page_fault_0); |
2504 | } |
2505 | |
2506 | set_except_vector(EXCCODE_MSADIS, handle_msa); |
2507 | set_except_vector(EXCCODE_MDMX, handle_mdmx); |
2508 | |
2509 | if (cpu_has_mcheck) |
2510 | set_except_vector(EXCCODE_MCHECK, handle_mcheck); |
2511 | |
2512 | if (cpu_has_mipsmt) |
2513 | set_except_vector(EXCCODE_THREAD, handle_mt); |
2514 | |
2515 | set_except_vector(EXCCODE_DSPDIS, handle_dsp); |
2516 | |
2517 | if (board_cache_error_setup) |
2518 | board_cache_error_setup(); |
2519 | |
2520 | if (cpu_has_vce) |
2521 | /* Special exception: R4[04]00 uses also the divec space. */ |
2522 | set_handler(0x180, &except_vec3_r4000, 0x100); |
2523 | else if (cpu_has_4kex) |
2524 | set_handler(0x180, &except_vec3_generic, 0x80); |
2525 | else |
2526 | set_handler(0x080, &except_vec3_generic, 0x80); |
2527 | |
2528 | local_flush_icache_range(ebase, ebase + vec_size); |
2529 | |
2530 | sort_extable(start: __start___dbe_table, finish: __stop___dbe_table); |
2531 | |
2532 | cu2_notifier(default_cu2_call, 0x80000000); /* Run last */ |
2533 | } |
2534 | |
2535 | static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd, |
2536 | void *v) |
2537 | { |
2538 | switch (cmd) { |
2539 | case CPU_PM_ENTER_FAILED: |
2540 | case CPU_PM_EXIT: |
2541 | configure_status(); |
2542 | configure_hwrena(); |
2543 | configure_exception_vector(); |
2544 | |
2545 | /* Restore register with CPU number for TLB handlers */ |
2546 | TLBMISS_HANDLER_RESTORE(); |
2547 | |
2548 | break; |
2549 | } |
2550 | |
2551 | return NOTIFY_OK; |
2552 | } |
2553 | |
2554 | static struct notifier_block trap_pm_notifier_block = { |
2555 | .notifier_call = trap_pm_notifier, |
2556 | }; |
2557 | |
2558 | static int __init trap_pm_init(void) |
2559 | { |
2560 | return cpu_pm_register_notifier(nb: &trap_pm_notifier_block); |
2561 | } |
2562 | arch_initcall(trap_pm_init); |
2563 | |