1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Based on arch/arm/kernel/traps.c |
4 | * |
5 | * Copyright (C) 1995-2009 Russell King |
6 | * Copyright (C) 2012 ARM Ltd. |
7 | */ |
8 | |
9 | #include <linux/bug.h> |
10 | #include <linux/context_tracking.h> |
11 | #include <linux/signal.h> |
12 | #include <linux/kallsyms.h> |
13 | #include <linux/kprobes.h> |
14 | #include <linux/spinlock.h> |
15 | #include <linux/uaccess.h> |
16 | #include <linux/hardirq.h> |
17 | #include <linux/kdebug.h> |
18 | #include <linux/module.h> |
19 | #include <linux/kexec.h> |
20 | #include <linux/delay.h> |
21 | #include <linux/efi.h> |
22 | #include <linux/init.h> |
23 | #include <linux/sched/signal.h> |
24 | #include <linux/sched/debug.h> |
25 | #include <linux/sched/task_stack.h> |
26 | #include <linux/sizes.h> |
27 | #include <linux/syscalls.h> |
28 | #include <linux/mm_types.h> |
29 | #include <linux/kasan.h> |
30 | #include <linux/ubsan.h> |
31 | #include <linux/cfi.h> |
32 | |
33 | #include <asm/atomic.h> |
34 | #include <asm/bug.h> |
35 | #include <asm/cpufeature.h> |
36 | #include <asm/daifflags.h> |
37 | #include <asm/debug-monitors.h> |
38 | #include <asm/efi.h> |
39 | #include <asm/esr.h> |
40 | #include <asm/exception.h> |
41 | #include <asm/extable.h> |
42 | #include <asm/insn.h> |
43 | #include <asm/kprobes.h> |
44 | #include <asm/patching.h> |
45 | #include <asm/traps.h> |
46 | #include <asm/smp.h> |
47 | #include <asm/stack_pointer.h> |
48 | #include <asm/stacktrace.h> |
49 | #include <asm/system_misc.h> |
50 | #include <asm/sysreg.h> |
51 | |
52 | static bool __kprobes __check_eq(unsigned long pstate) |
53 | { |
54 | return (pstate & PSR_Z_BIT) != 0; |
55 | } |
56 | |
57 | static bool __kprobes __check_ne(unsigned long pstate) |
58 | { |
59 | return (pstate & PSR_Z_BIT) == 0; |
60 | } |
61 | |
62 | static bool __kprobes __check_cs(unsigned long pstate) |
63 | { |
64 | return (pstate & PSR_C_BIT) != 0; |
65 | } |
66 | |
67 | static bool __kprobes __check_cc(unsigned long pstate) |
68 | { |
69 | return (pstate & PSR_C_BIT) == 0; |
70 | } |
71 | |
72 | static bool __kprobes __check_mi(unsigned long pstate) |
73 | { |
74 | return (pstate & PSR_N_BIT) != 0; |
75 | } |
76 | |
77 | static bool __kprobes __check_pl(unsigned long pstate) |
78 | { |
79 | return (pstate & PSR_N_BIT) == 0; |
80 | } |
81 | |
82 | static bool __kprobes __check_vs(unsigned long pstate) |
83 | { |
84 | return (pstate & PSR_V_BIT) != 0; |
85 | } |
86 | |
87 | static bool __kprobes __check_vc(unsigned long pstate) |
88 | { |
89 | return (pstate & PSR_V_BIT) == 0; |
90 | } |
91 | |
92 | static bool __kprobes __check_hi(unsigned long pstate) |
93 | { |
94 | pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */ |
95 | return (pstate & PSR_C_BIT) != 0; |
96 | } |
97 | |
98 | static bool __kprobes __check_ls(unsigned long pstate) |
99 | { |
100 | pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */ |
101 | return (pstate & PSR_C_BIT) == 0; |
102 | } |
103 | |
104 | static bool __kprobes __check_ge(unsigned long pstate) |
105 | { |
106 | pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */ |
107 | return (pstate & PSR_N_BIT) == 0; |
108 | } |
109 | |
110 | static bool __kprobes __check_lt(unsigned long pstate) |
111 | { |
112 | pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */ |
113 | return (pstate & PSR_N_BIT) != 0; |
114 | } |
115 | |
116 | static bool __kprobes __check_gt(unsigned long pstate) |
117 | { |
118 | /*PSR_N_BIT ^= PSR_V_BIT */ |
119 | unsigned long temp = pstate ^ (pstate << 3); |
120 | |
121 | temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */ |
122 | return (temp & PSR_N_BIT) == 0; |
123 | } |
124 | |
125 | static bool __kprobes __check_le(unsigned long pstate) |
126 | { |
127 | /*PSR_N_BIT ^= PSR_V_BIT */ |
128 | unsigned long temp = pstate ^ (pstate << 3); |
129 | |
130 | temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */ |
131 | return (temp & PSR_N_BIT) != 0; |
132 | } |
133 | |
134 | static bool __kprobes __check_al(unsigned long pstate) |
135 | { |
136 | return true; |
137 | } |
138 | |
139 | /* |
140 | * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that |
141 | * it behaves identically to 0b1110 ("al"). |
142 | */ |
143 | pstate_check_t * const aarch32_opcode_cond_checks[16] = { |
144 | __check_eq, __check_ne, __check_cs, __check_cc, |
145 | __check_mi, __check_pl, __check_vs, __check_vc, |
146 | __check_hi, __check_ls, __check_ge, __check_lt, |
147 | __check_gt, __check_le, __check_al, __check_al |
148 | }; |
149 | |
150 | int show_unhandled_signals = 0; |
151 | |
152 | static void dump_kernel_instr(const char *lvl, struct pt_regs *regs) |
153 | { |
154 | unsigned long addr = instruction_pointer(regs); |
155 | char str[sizeof("00000000 " ) * 5 + 2 + 1], *p = str; |
156 | int i; |
157 | |
158 | if (user_mode(regs)) |
159 | return; |
160 | |
161 | for (i = -4; i < 1; i++) { |
162 | unsigned int val, bad; |
163 | |
164 | bad = aarch64_insn_read(&((u32 *)addr)[i], &val); |
165 | |
166 | if (!bad) |
167 | p += sprintf(buf: p, fmt: i == 0 ? "(%08x) " : "%08x " , val); |
168 | else |
169 | p += sprintf(buf: p, fmt: i == 0 ? "(????????) " : "???????? " ); |
170 | } |
171 | |
172 | printk("%sCode: %s\n" , lvl, str); |
173 | } |
174 | |
175 | #ifdef CONFIG_PREEMPT |
176 | #define S_PREEMPT " PREEMPT" |
177 | #elif defined(CONFIG_PREEMPT_RT) |
178 | #define S_PREEMPT " PREEMPT_RT" |
179 | #else |
180 | #define S_PREEMPT "" |
181 | #endif |
182 | |
183 | #define S_SMP " SMP" |
184 | |
185 | static int __die(const char *str, long err, struct pt_regs *regs) |
186 | { |
187 | static int die_counter; |
188 | int ret; |
189 | |
190 | pr_emerg("Internal error: %s: %016lx [#%d]" S_PREEMPT S_SMP "\n" , |
191 | str, err, ++die_counter); |
192 | |
193 | /* trap and error numbers are mostly meaningless on ARM */ |
194 | ret = notify_die(val: DIE_OOPS, str, regs, err, trap: 0, SIGSEGV); |
195 | if (ret == NOTIFY_STOP) |
196 | return ret; |
197 | |
198 | print_modules(); |
199 | show_regs(regs); |
200 | |
201 | dump_kernel_instr(KERN_EMERG, regs); |
202 | |
203 | return ret; |
204 | } |
205 | |
206 | static DEFINE_RAW_SPINLOCK(die_lock); |
207 | |
208 | /* |
209 | * This function is protected against re-entrancy. |
210 | */ |
211 | void die(const char *str, struct pt_regs *regs, long err) |
212 | { |
213 | int ret; |
214 | unsigned long flags; |
215 | |
216 | raw_spin_lock_irqsave(&die_lock, flags); |
217 | |
218 | oops_enter(); |
219 | |
220 | console_verbose(); |
221 | bust_spinlocks(yes: 1); |
222 | ret = __die(str, err, regs); |
223 | |
224 | if (regs && kexec_should_crash(current)) |
225 | crash_kexec(regs); |
226 | |
227 | bust_spinlocks(yes: 0); |
228 | add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); |
229 | oops_exit(); |
230 | |
231 | if (in_interrupt()) |
232 | panic(fmt: "%s: Fatal exception in interrupt" , str); |
233 | if (panic_on_oops) |
234 | panic(fmt: "%s: Fatal exception" , str); |
235 | |
236 | raw_spin_unlock_irqrestore(&die_lock, flags); |
237 | |
238 | if (ret != NOTIFY_STOP) |
239 | make_task_dead(SIGSEGV); |
240 | } |
241 | |
242 | static void arm64_show_signal(int signo, const char *str) |
243 | { |
244 | static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, |
245 | DEFAULT_RATELIMIT_BURST); |
246 | struct task_struct *tsk = current; |
247 | unsigned long esr = tsk->thread.fault_code; |
248 | struct pt_regs *regs = task_pt_regs(tsk); |
249 | |
250 | /* Leave if the signal won't be shown */ |
251 | if (!show_unhandled_signals || |
252 | !unhandled_signal(tsk, sig: signo) || |
253 | !__ratelimit(&rs)) |
254 | return; |
255 | |
256 | pr_info("%s[%d]: unhandled exception: " , tsk->comm, task_pid_nr(tsk)); |
257 | if (esr) |
258 | pr_cont("%s, ESR 0x%016lx, " , esr_get_class_string(esr), esr); |
259 | |
260 | pr_cont("%s" , str); |
261 | print_vma_addr(KERN_CONT " in " , rip: regs->pc); |
262 | pr_cont("\n" ); |
263 | __show_regs(regs); |
264 | } |
265 | |
266 | void arm64_force_sig_fault(int signo, int code, unsigned long far, |
267 | const char *str) |
268 | { |
269 | arm64_show_signal(signo, str); |
270 | if (signo == SIGKILL) |
271 | force_sig(SIGKILL); |
272 | else |
273 | force_sig_fault(sig: signo, code, addr: (void __user *)far); |
274 | } |
275 | |
276 | void arm64_force_sig_mceerr(int code, unsigned long far, short lsb, |
277 | const char *str) |
278 | { |
279 | arm64_show_signal(SIGBUS, str); |
280 | force_sig_mceerr(code, (void __user *)far, lsb); |
281 | } |
282 | |
283 | void arm64_force_sig_ptrace_errno_trap(int errno, unsigned long far, |
284 | const char *str) |
285 | { |
286 | arm64_show_signal(SIGTRAP, str); |
287 | force_sig_ptrace_errno_trap(errno, addr: (void __user *)far); |
288 | } |
289 | |
290 | void arm64_notify_die(const char *str, struct pt_regs *regs, |
291 | int signo, int sicode, unsigned long far, |
292 | unsigned long err) |
293 | { |
294 | if (user_mode(regs)) { |
295 | WARN_ON(regs != current_pt_regs()); |
296 | current->thread.fault_address = 0; |
297 | current->thread.fault_code = err; |
298 | |
299 | arm64_force_sig_fault(signo, code: sicode, far, str); |
300 | } else { |
301 | die(str, regs, err); |
302 | } |
303 | } |
304 | |
305 | #ifdef CONFIG_COMPAT |
306 | #define PSTATE_IT_1_0_SHIFT 25 |
307 | #define PSTATE_IT_1_0_MASK (0x3 << PSTATE_IT_1_0_SHIFT) |
308 | #define PSTATE_IT_7_2_SHIFT 10 |
309 | #define PSTATE_IT_7_2_MASK (0x3f << PSTATE_IT_7_2_SHIFT) |
310 | |
311 | static u32 compat_get_it_state(struct pt_regs *regs) |
312 | { |
313 | u32 it, pstate = regs->pstate; |
314 | |
315 | it = (pstate & PSTATE_IT_1_0_MASK) >> PSTATE_IT_1_0_SHIFT; |
316 | it |= ((pstate & PSTATE_IT_7_2_MASK) >> PSTATE_IT_7_2_SHIFT) << 2; |
317 | |
318 | return it; |
319 | } |
320 | |
321 | static void compat_set_it_state(struct pt_regs *regs, u32 it) |
322 | { |
323 | u32 pstate_it; |
324 | |
325 | pstate_it = (it << PSTATE_IT_1_0_SHIFT) & PSTATE_IT_1_0_MASK; |
326 | pstate_it |= ((it >> 2) << PSTATE_IT_7_2_SHIFT) & PSTATE_IT_7_2_MASK; |
327 | |
328 | regs->pstate &= ~PSR_AA32_IT_MASK; |
329 | regs->pstate |= pstate_it; |
330 | } |
331 | |
332 | static void advance_itstate(struct pt_regs *regs) |
333 | { |
334 | u32 it; |
335 | |
336 | /* ARM mode */ |
337 | if (!(regs->pstate & PSR_AA32_T_BIT) || |
338 | !(regs->pstate & PSR_AA32_IT_MASK)) |
339 | return; |
340 | |
341 | it = compat_get_it_state(regs); |
342 | |
343 | /* |
344 | * If this is the last instruction of the block, wipe the IT |
345 | * state. Otherwise advance it. |
346 | */ |
347 | if (!(it & 7)) |
348 | it = 0; |
349 | else |
350 | it = (it & 0xe0) | ((it << 1) & 0x1f); |
351 | |
352 | compat_set_it_state(regs, it); |
353 | } |
354 | #else |
355 | static void advance_itstate(struct pt_regs *regs) |
356 | { |
357 | } |
358 | #endif |
359 | |
360 | void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size) |
361 | { |
362 | regs->pc += size; |
363 | |
364 | /* |
365 | * If we were single stepping, we want to get the step exception after |
366 | * we return from the trap. |
367 | */ |
368 | if (user_mode(regs)) |
369 | user_fastforward_single_step(current); |
370 | |
371 | if (compat_user_mode(regs)) |
372 | advance_itstate(regs); |
373 | else |
374 | regs->pstate &= ~PSR_BTYPE_MASK; |
375 | } |
376 | |
377 | static int user_insn_read(struct pt_regs *regs, u32 *insnp) |
378 | { |
379 | u32 instr; |
380 | unsigned long pc = instruction_pointer(regs); |
381 | |
382 | if (compat_thumb_mode(regs)) { |
383 | /* 16-bit Thumb instruction */ |
384 | __le16 instr_le; |
385 | if (get_user(instr_le, (__le16 __user *)pc)) |
386 | return -EFAULT; |
387 | instr = le16_to_cpu(instr_le); |
388 | if (aarch32_insn_is_wide(instr)) { |
389 | u32 instr2; |
390 | |
391 | if (get_user(instr_le, (__le16 __user *)(pc + 2))) |
392 | return -EFAULT; |
393 | instr2 = le16_to_cpu(instr_le); |
394 | instr = (instr << 16) | instr2; |
395 | } |
396 | } else { |
397 | /* 32-bit ARM instruction */ |
398 | __le32 instr_le; |
399 | if (get_user(instr_le, (__le32 __user *)pc)) |
400 | return -EFAULT; |
401 | instr = le32_to_cpu(instr_le); |
402 | } |
403 | |
404 | *insnp = instr; |
405 | return 0; |
406 | } |
407 | |
408 | void force_signal_inject(int signal, int code, unsigned long address, unsigned long err) |
409 | { |
410 | const char *desc; |
411 | struct pt_regs *regs = current_pt_regs(); |
412 | |
413 | if (WARN_ON(!user_mode(regs))) |
414 | return; |
415 | |
416 | switch (signal) { |
417 | case SIGILL: |
418 | desc = "undefined instruction" ; |
419 | break; |
420 | case SIGSEGV: |
421 | desc = "illegal memory access" ; |
422 | break; |
423 | default: |
424 | desc = "unknown or unrecoverable error" ; |
425 | break; |
426 | } |
427 | |
428 | /* Force signals we don't understand to SIGKILL */ |
429 | if (WARN_ON(signal != SIGKILL && |
430 | siginfo_layout(signal, code) != SIL_FAULT)) { |
431 | signal = SIGKILL; |
432 | } |
433 | |
434 | arm64_notify_die(str: desc, regs, signo: signal, sicode: code, far: address, err); |
435 | } |
436 | |
437 | /* |
438 | * Set up process info to signal segmentation fault - called on access error. |
439 | */ |
440 | void arm64_notify_segfault(unsigned long addr) |
441 | { |
442 | int code; |
443 | |
444 | mmap_read_lock(current->mm); |
445 | if (find_vma(current->mm, untagged_addr(addr)) == NULL) |
446 | code = SEGV_MAPERR; |
447 | else |
448 | code = SEGV_ACCERR; |
449 | mmap_read_unlock(current->mm); |
450 | |
451 | force_signal_inject(SIGSEGV, code, address: addr, err: 0); |
452 | } |
453 | |
454 | void do_el0_undef(struct pt_regs *regs, unsigned long esr) |
455 | { |
456 | u32 insn; |
457 | |
458 | /* check for AArch32 breakpoint instructions */ |
459 | if (!aarch32_break_handler(regs)) |
460 | return; |
461 | |
462 | if (user_insn_read(regs, insnp: &insn)) |
463 | goto out_err; |
464 | |
465 | if (try_emulate_mrs(regs, insn)) |
466 | return; |
467 | |
468 | if (try_emulate_armv8_deprecated(regs, insn)) |
469 | return; |
470 | |
471 | out_err: |
472 | force_signal_inject(SIGILL, ILL_ILLOPC, address: regs->pc, err: 0); |
473 | } |
474 | |
475 | void do_el1_undef(struct pt_regs *regs, unsigned long esr) |
476 | { |
477 | u32 insn; |
478 | |
479 | if (aarch64_insn_read((void *)regs->pc, &insn)) |
480 | goto out_err; |
481 | |
482 | if (try_emulate_el1_ssbs(regs, insn)) |
483 | return; |
484 | |
485 | out_err: |
486 | die(str: "Oops - Undefined instruction" , regs, err: esr); |
487 | } |
488 | |
489 | void do_el0_bti(struct pt_regs *regs) |
490 | { |
491 | force_signal_inject(SIGILL, ILL_ILLOPC, address: regs->pc, err: 0); |
492 | } |
493 | |
494 | void do_el1_bti(struct pt_regs *regs, unsigned long esr) |
495 | { |
496 | if (efi_runtime_fixup_exception(regs, "BTI violation" )) { |
497 | regs->pstate &= ~PSR_BTYPE_MASK; |
498 | return; |
499 | } |
500 | die(str: "Oops - BTI" , regs, err: esr); |
501 | } |
502 | |
503 | void do_el0_fpac(struct pt_regs *regs, unsigned long esr) |
504 | { |
505 | force_signal_inject(SIGILL, ILL_ILLOPN, address: regs->pc, err: esr); |
506 | } |
507 | |
508 | void do_el1_fpac(struct pt_regs *regs, unsigned long esr) |
509 | { |
510 | /* |
511 | * Unexpected FPAC exception in the kernel: kill the task before it |
512 | * does any more harm. |
513 | */ |
514 | die(str: "Oops - FPAC" , regs, err: esr); |
515 | } |
516 | |
517 | void do_el0_mops(struct pt_regs *regs, unsigned long esr) |
518 | { |
519 | arm64_mops_reset_regs(®s->user_regs, esr); |
520 | |
521 | /* |
522 | * If single stepping then finish the step before executing the |
523 | * prologue instruction. |
524 | */ |
525 | user_fastforward_single_step(current); |
526 | } |
527 | |
528 | #define __user_cache_maint(insn, address, res) \ |
529 | if (address >= TASK_SIZE_MAX) { \ |
530 | res = -EFAULT; \ |
531 | } else { \ |
532 | uaccess_ttbr0_enable(); \ |
533 | asm volatile ( \ |
534 | "1: " insn ", %1\n" \ |
535 | " mov %w0, #0\n" \ |
536 | "2:\n" \ |
537 | _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0) \ |
538 | : "=r" (res) \ |
539 | : "r" (address)); \ |
540 | uaccess_ttbr0_disable(); \ |
541 | } |
542 | |
543 | static void user_cache_maint_handler(unsigned long esr, struct pt_regs *regs) |
544 | { |
545 | unsigned long tagged_address, address; |
546 | int rt = ESR_ELx_SYS64_ISS_RT(esr); |
547 | int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT; |
548 | int ret = 0; |
549 | |
550 | tagged_address = pt_regs_read_reg(regs, rt); |
551 | address = untagged_addr(tagged_address); |
552 | |
553 | switch (crm) { |
554 | case ESR_ELx_SYS64_ISS_CRM_DC_CVAU: /* DC CVAU, gets promoted */ |
555 | __user_cache_maint("dc civac" , address, ret); |
556 | break; |
557 | case ESR_ELx_SYS64_ISS_CRM_DC_CVAC: /* DC CVAC, gets promoted */ |
558 | __user_cache_maint("dc civac" , address, ret); |
559 | break; |
560 | case ESR_ELx_SYS64_ISS_CRM_DC_CVADP: /* DC CVADP */ |
561 | __user_cache_maint("sys 3, c7, c13, 1" , address, ret); |
562 | break; |
563 | case ESR_ELx_SYS64_ISS_CRM_DC_CVAP: /* DC CVAP */ |
564 | __user_cache_maint("sys 3, c7, c12, 1" , address, ret); |
565 | break; |
566 | case ESR_ELx_SYS64_ISS_CRM_DC_CIVAC: /* DC CIVAC */ |
567 | __user_cache_maint("dc civac" , address, ret); |
568 | break; |
569 | case ESR_ELx_SYS64_ISS_CRM_IC_IVAU: /* IC IVAU */ |
570 | __user_cache_maint("ic ivau" , address, ret); |
571 | break; |
572 | default: |
573 | force_signal_inject(SIGILL, ILL_ILLOPC, address: regs->pc, err: 0); |
574 | return; |
575 | } |
576 | |
577 | if (ret) |
578 | arm64_notify_segfault(addr: tagged_address); |
579 | else |
580 | arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); |
581 | } |
582 | |
583 | static void ctr_read_handler(unsigned long esr, struct pt_regs *regs) |
584 | { |
585 | int rt = ESR_ELx_SYS64_ISS_RT(esr); |
586 | unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0); |
587 | |
588 | if (cpus_have_final_cap(ARM64_WORKAROUND_1542419)) { |
589 | /* Hide DIC so that we can trap the unnecessary maintenance...*/ |
590 | val &= ~BIT(CTR_EL0_DIC_SHIFT); |
591 | |
592 | /* ... and fake IminLine to reduce the number of traps. */ |
593 | val &= ~CTR_EL0_IminLine_MASK; |
594 | val |= (PAGE_SHIFT - 2) & CTR_EL0_IminLine_MASK; |
595 | } |
596 | |
597 | pt_regs_write_reg(regs, rt, val); |
598 | |
599 | arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); |
600 | } |
601 | |
602 | static void cntvct_read_handler(unsigned long esr, struct pt_regs *regs) |
603 | { |
604 | int rt = ESR_ELx_SYS64_ISS_RT(esr); |
605 | |
606 | pt_regs_write_reg(regs, rt, arch_timer_read_counter()); |
607 | arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); |
608 | } |
609 | |
610 | static void cntfrq_read_handler(unsigned long esr, struct pt_regs *regs) |
611 | { |
612 | int rt = ESR_ELx_SYS64_ISS_RT(esr); |
613 | |
614 | pt_regs_write_reg(regs, rt, arch_timer_get_rate()); |
615 | arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); |
616 | } |
617 | |
618 | static void mrs_handler(unsigned long esr, struct pt_regs *regs) |
619 | { |
620 | u32 sysreg, rt; |
621 | |
622 | rt = ESR_ELx_SYS64_ISS_RT(esr); |
623 | sysreg = esr_sys64_to_sysreg(esr); |
624 | |
625 | if (do_emulate_mrs(regs, sysreg, rt) != 0) |
626 | force_signal_inject(SIGILL, ILL_ILLOPC, address: regs->pc, err: 0); |
627 | } |
628 | |
629 | static void wfi_handler(unsigned long esr, struct pt_regs *regs) |
630 | { |
631 | arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); |
632 | } |
633 | |
634 | struct sys64_hook { |
635 | unsigned long esr_mask; |
636 | unsigned long esr_val; |
637 | void (*handler)(unsigned long esr, struct pt_regs *regs); |
638 | }; |
639 | |
640 | static const struct sys64_hook sys64_hooks[] = { |
641 | { |
642 | .esr_mask = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK, |
643 | .esr_val = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL, |
644 | .handler = user_cache_maint_handler, |
645 | }, |
646 | { |
647 | /* Trap read access to CTR_EL0 */ |
648 | .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK, |
649 | .esr_val = ESR_ELx_SYS64_ISS_SYS_CTR_READ, |
650 | .handler = ctr_read_handler, |
651 | }, |
652 | { |
653 | /* Trap read access to CNTVCT_EL0 */ |
654 | .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK, |
655 | .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTVCT, |
656 | .handler = cntvct_read_handler, |
657 | }, |
658 | { |
659 | /* Trap read access to CNTVCTSS_EL0 */ |
660 | .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK, |
661 | .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTVCTSS, |
662 | .handler = cntvct_read_handler, |
663 | }, |
664 | { |
665 | /* Trap read access to CNTFRQ_EL0 */ |
666 | .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK, |
667 | .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTFRQ, |
668 | .handler = cntfrq_read_handler, |
669 | }, |
670 | { |
671 | /* Trap read access to CPUID registers */ |
672 | .esr_mask = ESR_ELx_SYS64_ISS_SYS_MRS_OP_MASK, |
673 | .esr_val = ESR_ELx_SYS64_ISS_SYS_MRS_OP_VAL, |
674 | .handler = mrs_handler, |
675 | }, |
676 | { |
677 | /* Trap WFI instructions executed in userspace */ |
678 | .esr_mask = ESR_ELx_WFx_MASK, |
679 | .esr_val = ESR_ELx_WFx_WFI_VAL, |
680 | .handler = wfi_handler, |
681 | }, |
682 | {}, |
683 | }; |
684 | |
685 | #ifdef CONFIG_COMPAT |
686 | static bool cp15_cond_valid(unsigned long esr, struct pt_regs *regs) |
687 | { |
688 | int cond; |
689 | |
690 | /* Only a T32 instruction can trap without CV being set */ |
691 | if (!(esr & ESR_ELx_CV)) { |
692 | u32 it; |
693 | |
694 | it = compat_get_it_state(regs); |
695 | if (!it) |
696 | return true; |
697 | |
698 | cond = it >> 4; |
699 | } else { |
700 | cond = (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT; |
701 | } |
702 | |
703 | return aarch32_opcode_cond_checks[cond](regs->pstate); |
704 | } |
705 | |
706 | static void compat_cntfrq_read_handler(unsigned long esr, struct pt_regs *regs) |
707 | { |
708 | int reg = (esr & ESR_ELx_CP15_32_ISS_RT_MASK) >> ESR_ELx_CP15_32_ISS_RT_SHIFT; |
709 | |
710 | pt_regs_write_reg(regs, reg, arch_timer_get_rate()); |
711 | arm64_skip_faulting_instruction(regs, size: 4); |
712 | } |
713 | |
714 | static const struct sys64_hook cp15_32_hooks[] = { |
715 | { |
716 | .esr_mask = ESR_ELx_CP15_32_ISS_SYS_MASK, |
717 | .esr_val = ESR_ELx_CP15_32_ISS_SYS_CNTFRQ, |
718 | .handler = compat_cntfrq_read_handler, |
719 | }, |
720 | {}, |
721 | }; |
722 | |
723 | static void compat_cntvct_read_handler(unsigned long esr, struct pt_regs *regs) |
724 | { |
725 | int rt = (esr & ESR_ELx_CP15_64_ISS_RT_MASK) >> ESR_ELx_CP15_64_ISS_RT_SHIFT; |
726 | int rt2 = (esr & ESR_ELx_CP15_64_ISS_RT2_MASK) >> ESR_ELx_CP15_64_ISS_RT2_SHIFT; |
727 | u64 val = arch_timer_read_counter(); |
728 | |
729 | pt_regs_write_reg(regs, rt, lower_32_bits(val)); |
730 | pt_regs_write_reg(regs, rt2, upper_32_bits(val)); |
731 | arm64_skip_faulting_instruction(regs, size: 4); |
732 | } |
733 | |
734 | static const struct sys64_hook cp15_64_hooks[] = { |
735 | { |
736 | .esr_mask = ESR_ELx_CP15_64_ISS_SYS_MASK, |
737 | .esr_val = ESR_ELx_CP15_64_ISS_SYS_CNTVCT, |
738 | .handler = compat_cntvct_read_handler, |
739 | }, |
740 | { |
741 | .esr_mask = ESR_ELx_CP15_64_ISS_SYS_MASK, |
742 | .esr_val = ESR_ELx_CP15_64_ISS_SYS_CNTVCTSS, |
743 | .handler = compat_cntvct_read_handler, |
744 | }, |
745 | {}, |
746 | }; |
747 | |
748 | void do_el0_cp15(unsigned long esr, struct pt_regs *regs) |
749 | { |
750 | const struct sys64_hook *hook, *hook_base; |
751 | |
752 | if (!cp15_cond_valid(esr, regs)) { |
753 | /* |
754 | * There is no T16 variant of a CP access, so we |
755 | * always advance PC by 4 bytes. |
756 | */ |
757 | arm64_skip_faulting_instruction(regs, size: 4); |
758 | return; |
759 | } |
760 | |
761 | switch (ESR_ELx_EC(esr)) { |
762 | case ESR_ELx_EC_CP15_32: |
763 | hook_base = cp15_32_hooks; |
764 | break; |
765 | case ESR_ELx_EC_CP15_64: |
766 | hook_base = cp15_64_hooks; |
767 | break; |
768 | default: |
769 | do_el0_undef(regs, esr); |
770 | return; |
771 | } |
772 | |
773 | for (hook = hook_base; hook->handler; hook++) |
774 | if ((hook->esr_mask & esr) == hook->esr_val) { |
775 | hook->handler(esr, regs); |
776 | return; |
777 | } |
778 | |
779 | /* |
780 | * New cp15 instructions may previously have been undefined at |
781 | * EL0. Fall back to our usual undefined instruction handler |
782 | * so that we handle these consistently. |
783 | */ |
784 | do_el0_undef(regs, esr); |
785 | } |
786 | #endif |
787 | |
788 | void do_el0_sys(unsigned long esr, struct pt_regs *regs) |
789 | { |
790 | const struct sys64_hook *hook; |
791 | |
792 | for (hook = sys64_hooks; hook->handler; hook++) |
793 | if ((hook->esr_mask & esr) == hook->esr_val) { |
794 | hook->handler(esr, regs); |
795 | return; |
796 | } |
797 | |
798 | /* |
799 | * New SYS instructions may previously have been undefined at EL0. Fall |
800 | * back to our usual undefined instruction handler so that we handle |
801 | * these consistently. |
802 | */ |
803 | do_el0_undef(regs, esr); |
804 | } |
805 | |
806 | static const char *esr_class_str[] = { |
807 | [0 ... ESR_ELx_EC_MAX] = "UNRECOGNIZED EC" , |
808 | [ESR_ELx_EC_UNKNOWN] = "Unknown/Uncategorized" , |
809 | [ESR_ELx_EC_WFx] = "WFI/WFE" , |
810 | [ESR_ELx_EC_CP15_32] = "CP15 MCR/MRC" , |
811 | [ESR_ELx_EC_CP15_64] = "CP15 MCRR/MRRC" , |
812 | [ESR_ELx_EC_CP14_MR] = "CP14 MCR/MRC" , |
813 | [ESR_ELx_EC_CP14_LS] = "CP14 LDC/STC" , |
814 | [ESR_ELx_EC_FP_ASIMD] = "ASIMD" , |
815 | [ESR_ELx_EC_CP10_ID] = "CP10 MRC/VMRS" , |
816 | [ESR_ELx_EC_PAC] = "PAC" , |
817 | [ESR_ELx_EC_CP14_64] = "CP14 MCRR/MRRC" , |
818 | [ESR_ELx_EC_BTI] = "BTI" , |
819 | [ESR_ELx_EC_ILL] = "PSTATE.IL" , |
820 | [ESR_ELx_EC_SVC32] = "SVC (AArch32)" , |
821 | [ESR_ELx_EC_HVC32] = "HVC (AArch32)" , |
822 | [ESR_ELx_EC_SMC32] = "SMC (AArch32)" , |
823 | [ESR_ELx_EC_SVC64] = "SVC (AArch64)" , |
824 | [ESR_ELx_EC_HVC64] = "HVC (AArch64)" , |
825 | [ESR_ELx_EC_SMC64] = "SMC (AArch64)" , |
826 | [ESR_ELx_EC_SYS64] = "MSR/MRS (AArch64)" , |
827 | [ESR_ELx_EC_SVE] = "SVE" , |
828 | [ESR_ELx_EC_ERET] = "ERET/ERETAA/ERETAB" , |
829 | [ESR_ELx_EC_FPAC] = "FPAC" , |
830 | [ESR_ELx_EC_SME] = "SME" , |
831 | [ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF" , |
832 | [ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)" , |
833 | [ESR_ELx_EC_IABT_CUR] = "IABT (current EL)" , |
834 | [ESR_ELx_EC_PC_ALIGN] = "PC Alignment" , |
835 | [ESR_ELx_EC_DABT_LOW] = "DABT (lower EL)" , |
836 | [ESR_ELx_EC_DABT_CUR] = "DABT (current EL)" , |
837 | [ESR_ELx_EC_SP_ALIGN] = "SP Alignment" , |
838 | [ESR_ELx_EC_MOPS] = "MOPS" , |
839 | [ESR_ELx_EC_FP_EXC32] = "FP (AArch32)" , |
840 | [ESR_ELx_EC_FP_EXC64] = "FP (AArch64)" , |
841 | [ESR_ELx_EC_SERROR] = "SError" , |
842 | [ESR_ELx_EC_BREAKPT_LOW] = "Breakpoint (lower EL)" , |
843 | [ESR_ELx_EC_BREAKPT_CUR] = "Breakpoint (current EL)" , |
844 | [ESR_ELx_EC_SOFTSTP_LOW] = "Software Step (lower EL)" , |
845 | [ESR_ELx_EC_SOFTSTP_CUR] = "Software Step (current EL)" , |
846 | [ESR_ELx_EC_WATCHPT_LOW] = "Watchpoint (lower EL)" , |
847 | [ESR_ELx_EC_WATCHPT_CUR] = "Watchpoint (current EL)" , |
848 | [ESR_ELx_EC_BKPT32] = "BKPT (AArch32)" , |
849 | [ESR_ELx_EC_VECTOR32] = "Vector catch (AArch32)" , |
850 | [ESR_ELx_EC_BRK64] = "BRK (AArch64)" , |
851 | }; |
852 | |
853 | const char *esr_get_class_string(unsigned long esr) |
854 | { |
855 | return esr_class_str[ESR_ELx_EC(esr)]; |
856 | } |
857 | |
858 | /* |
859 | * bad_el0_sync handles unexpected, but potentially recoverable synchronous |
860 | * exceptions taken from EL0. |
861 | */ |
862 | void bad_el0_sync(struct pt_regs *regs, int reason, unsigned long esr) |
863 | { |
864 | unsigned long pc = instruction_pointer(regs); |
865 | |
866 | current->thread.fault_address = 0; |
867 | current->thread.fault_code = esr; |
868 | |
869 | arm64_force_sig_fault(SIGILL, ILL_ILLOPC, far: pc, |
870 | str: "Bad EL0 synchronous exception" ); |
871 | } |
872 | |
873 | #ifdef CONFIG_VMAP_STACK |
874 | |
875 | DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack) |
876 | __aligned(16); |
877 | |
878 | void __noreturn panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far) |
879 | { |
880 | unsigned long tsk_stk = (unsigned long)current->stack; |
881 | unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr); |
882 | unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack); |
883 | |
884 | console_verbose(); |
885 | pr_emerg("Insufficient stack space to handle exception!" ); |
886 | |
887 | pr_emerg("ESR: 0x%016lx -- %s\n" , esr, esr_get_class_string(esr)); |
888 | pr_emerg("FAR: 0x%016lx\n" , far); |
889 | |
890 | pr_emerg("Task stack: [0x%016lx..0x%016lx]\n" , |
891 | tsk_stk, tsk_stk + THREAD_SIZE); |
892 | pr_emerg("IRQ stack: [0x%016lx..0x%016lx]\n" , |
893 | irq_stk, irq_stk + IRQ_STACK_SIZE); |
894 | pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n" , |
895 | ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE); |
896 | |
897 | __show_regs(regs); |
898 | |
899 | /* |
900 | * We use nmi_panic to limit the potential for recusive overflows, and |
901 | * to get a better stack trace. |
902 | */ |
903 | nmi_panic(NULL, msg: "kernel stack overflow" ); |
904 | cpu_park_loop(); |
905 | } |
906 | #endif |
907 | |
908 | void __noreturn arm64_serror_panic(struct pt_regs *regs, unsigned long esr) |
909 | { |
910 | console_verbose(); |
911 | |
912 | pr_crit("SError Interrupt on CPU%d, code 0x%016lx -- %s\n" , |
913 | smp_processor_id(), esr, esr_get_class_string(esr)); |
914 | if (regs) |
915 | __show_regs(regs); |
916 | |
917 | nmi_panic(regs, msg: "Asynchronous SError Interrupt" ); |
918 | |
919 | cpu_park_loop(); |
920 | } |
921 | |
922 | bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned long esr) |
923 | { |
924 | unsigned long aet = arm64_ras_serror_get_severity(esr); |
925 | |
926 | switch (aet) { |
927 | case ESR_ELx_AET_CE: /* corrected error */ |
928 | case ESR_ELx_AET_UEO: /* restartable, not yet consumed */ |
929 | /* |
930 | * The CPU can make progress. We may take UEO again as |
931 | * a more severe error. |
932 | */ |
933 | return false; |
934 | |
935 | case ESR_ELx_AET_UEU: /* Uncorrected Unrecoverable */ |
936 | case ESR_ELx_AET_UER: /* Uncorrected Recoverable */ |
937 | /* |
938 | * The CPU can't make progress. The exception may have |
939 | * been imprecise. |
940 | * |
941 | * Neoverse-N1 #1349291 means a non-KVM SError reported as |
942 | * Unrecoverable should be treated as Uncontainable. We |
943 | * call arm64_serror_panic() in both cases. |
944 | */ |
945 | return true; |
946 | |
947 | case ESR_ELx_AET_UC: /* Uncontainable or Uncategorized error */ |
948 | default: |
949 | /* Error has been silently propagated */ |
950 | arm64_serror_panic(regs, esr); |
951 | } |
952 | } |
953 | |
954 | void do_serror(struct pt_regs *regs, unsigned long esr) |
955 | { |
956 | /* non-RAS errors are not containable */ |
957 | if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr)) |
958 | arm64_serror_panic(regs, esr); |
959 | } |
960 | |
961 | /* GENERIC_BUG traps */ |
962 | #ifdef CONFIG_GENERIC_BUG |
963 | int is_valid_bugaddr(unsigned long addr) |
964 | { |
965 | /* |
966 | * bug_handler() only called for BRK #BUG_BRK_IMM. |
967 | * So the answer is trivial -- any spurious instances with no |
968 | * bug table entry will be rejected by report_bug() and passed |
969 | * back to the debug-monitors code and handled as a fatal |
970 | * unexpected debug exception. |
971 | */ |
972 | return 1; |
973 | } |
974 | #endif |
975 | |
976 | static int bug_handler(struct pt_regs *regs, unsigned long esr) |
977 | { |
978 | switch (report_bug(bug_addr: regs->pc, regs)) { |
979 | case BUG_TRAP_TYPE_BUG: |
980 | die(str: "Oops - BUG" , regs, err: esr); |
981 | break; |
982 | |
983 | case BUG_TRAP_TYPE_WARN: |
984 | break; |
985 | |
986 | default: |
987 | /* unknown/unrecognised bug trap type */ |
988 | return DBG_HOOK_ERROR; |
989 | } |
990 | |
991 | /* If thread survives, skip over the BUG instruction and continue: */ |
992 | arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); |
993 | return DBG_HOOK_HANDLED; |
994 | } |
995 | |
996 | static struct break_hook bug_break_hook = { |
997 | .fn = bug_handler, |
998 | .imm = BUG_BRK_IMM, |
999 | }; |
1000 | |
1001 | #ifdef CONFIG_CFI_CLANG |
1002 | static int cfi_handler(struct pt_regs *regs, unsigned long esr) |
1003 | { |
1004 | unsigned long target; |
1005 | u32 type; |
1006 | |
1007 | target = pt_regs_read_reg(regs, FIELD_GET(CFI_BRK_IMM_TARGET, esr)); |
1008 | type = (u32)pt_regs_read_reg(regs, FIELD_GET(CFI_BRK_IMM_TYPE, esr)); |
1009 | |
1010 | switch (report_cfi_failure(regs, regs->pc, &target, type)) { |
1011 | case BUG_TRAP_TYPE_BUG: |
1012 | die("Oops - CFI" , regs, esr); |
1013 | break; |
1014 | |
1015 | case BUG_TRAP_TYPE_WARN: |
1016 | break; |
1017 | |
1018 | default: |
1019 | return DBG_HOOK_ERROR; |
1020 | } |
1021 | |
1022 | arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); |
1023 | return DBG_HOOK_HANDLED; |
1024 | } |
1025 | |
1026 | static struct break_hook cfi_break_hook = { |
1027 | .fn = cfi_handler, |
1028 | .imm = CFI_BRK_IMM_BASE, |
1029 | .mask = CFI_BRK_IMM_MASK, |
1030 | }; |
1031 | #endif /* CONFIG_CFI_CLANG */ |
1032 | |
1033 | static int reserved_fault_handler(struct pt_regs *regs, unsigned long esr) |
1034 | { |
1035 | pr_err("%s generated an invalid instruction at %pS!\n" , |
1036 | "Kernel text patching" , |
1037 | (void *)instruction_pointer(regs)); |
1038 | |
1039 | /* We cannot handle this */ |
1040 | return DBG_HOOK_ERROR; |
1041 | } |
1042 | |
1043 | static struct break_hook fault_break_hook = { |
1044 | .fn = reserved_fault_handler, |
1045 | .imm = FAULT_BRK_IMM, |
1046 | }; |
1047 | |
1048 | #ifdef CONFIG_KASAN_SW_TAGS |
1049 | |
1050 | #define KASAN_ESR_RECOVER 0x20 |
1051 | #define KASAN_ESR_WRITE 0x10 |
1052 | #define KASAN_ESR_SIZE_MASK 0x0f |
1053 | #define KASAN_ESR_SIZE(esr) (1 << ((esr) & KASAN_ESR_SIZE_MASK)) |
1054 | |
1055 | static int kasan_handler(struct pt_regs *regs, unsigned long esr) |
1056 | { |
1057 | bool recover = esr & KASAN_ESR_RECOVER; |
1058 | bool write = esr & KASAN_ESR_WRITE; |
1059 | size_t size = KASAN_ESR_SIZE(esr); |
1060 | void *addr = (void *)regs->regs[0]; |
1061 | u64 pc = regs->pc; |
1062 | |
1063 | kasan_report(addr, size, write, pc); |
1064 | |
1065 | /* |
1066 | * The instrumentation allows to control whether we can proceed after |
1067 | * a crash was detected. This is done by passing the -recover flag to |
1068 | * the compiler. Disabling recovery allows to generate more compact |
1069 | * code. |
1070 | * |
1071 | * Unfortunately disabling recovery doesn't work for the kernel right |
1072 | * now. KASAN reporting is disabled in some contexts (for example when |
1073 | * the allocator accesses slab object metadata; this is controlled by |
1074 | * current->kasan_depth). All these accesses are detected by the tool, |
1075 | * even though the reports for them are not printed. |
1076 | * |
1077 | * This is something that might be fixed at some point in the future. |
1078 | */ |
1079 | if (!recover) |
1080 | die("Oops - KASAN" , regs, esr); |
1081 | |
1082 | /* If thread survives, skip over the brk instruction and continue: */ |
1083 | arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); |
1084 | return DBG_HOOK_HANDLED; |
1085 | } |
1086 | |
1087 | static struct break_hook kasan_break_hook = { |
1088 | .fn = kasan_handler, |
1089 | .imm = KASAN_BRK_IMM, |
1090 | .mask = KASAN_BRK_MASK, |
1091 | }; |
1092 | #endif |
1093 | |
1094 | #ifdef CONFIG_UBSAN_TRAP |
1095 | static int ubsan_handler(struct pt_regs *regs, unsigned long esr) |
1096 | { |
1097 | die(report_ubsan_failure(regs, esr & UBSAN_BRK_MASK), regs, esr); |
1098 | return DBG_HOOK_HANDLED; |
1099 | } |
1100 | |
1101 | static struct break_hook ubsan_break_hook = { |
1102 | .fn = ubsan_handler, |
1103 | .imm = UBSAN_BRK_IMM, |
1104 | .mask = UBSAN_BRK_MASK, |
1105 | }; |
1106 | #endif |
1107 | |
1108 | #define (esr) ((esr) & ESR_ELx_BRK64_ISS_COMMENT_MASK) |
1109 | |
1110 | /* |
1111 | * Initial handler for AArch64 BRK exceptions |
1112 | * This handler only used until debug_traps_init(). |
1113 | */ |
1114 | int __init early_brk64(unsigned long addr, unsigned long esr, |
1115 | struct pt_regs *regs) |
1116 | { |
1117 | #ifdef CONFIG_CFI_CLANG |
1118 | if ((esr_comment(esr) & ~CFI_BRK_IMM_MASK) == CFI_BRK_IMM_BASE) |
1119 | return cfi_handler(regs, esr) != DBG_HOOK_HANDLED; |
1120 | #endif |
1121 | #ifdef CONFIG_KASAN_SW_TAGS |
1122 | if ((esr_comment(esr) & ~KASAN_BRK_MASK) == KASAN_BRK_IMM) |
1123 | return kasan_handler(regs, esr) != DBG_HOOK_HANDLED; |
1124 | #endif |
1125 | #ifdef CONFIG_UBSAN_TRAP |
1126 | if ((esr_comment(esr) & ~UBSAN_BRK_MASK) == UBSAN_BRK_IMM) |
1127 | return ubsan_handler(regs, esr) != DBG_HOOK_HANDLED; |
1128 | #endif |
1129 | return bug_handler(regs, esr) != DBG_HOOK_HANDLED; |
1130 | } |
1131 | |
1132 | void __init trap_init(void) |
1133 | { |
1134 | register_kernel_break_hook(&bug_break_hook); |
1135 | #ifdef CONFIG_CFI_CLANG |
1136 | register_kernel_break_hook(&cfi_break_hook); |
1137 | #endif |
1138 | register_kernel_break_hook(&fault_break_hook); |
1139 | #ifdef CONFIG_KASAN_SW_TAGS |
1140 | register_kernel_break_hook(&kasan_break_hook); |
1141 | #endif |
1142 | #ifdef CONFIG_UBSAN_TRAP |
1143 | register_kernel_break_hook(&ubsan_break_hook); |
1144 | #endif |
1145 | debug_traps_init(); |
1146 | } |
1147 | |