1 | // SPDX-License-Identifier: GPL-2.0-only |
---|---|
2 | /* |
3 | * Based on arch/arm/kernel/traps.c |
4 | * |
5 | * Copyright (C) 1995-2009 Russell King |
6 | * Copyright (C) 2012 ARM Ltd. |
7 | */ |
8 | |
9 | #include <linux/bug.h> |
10 | #include <linux/context_tracking.h> |
11 | #include <linux/signal.h> |
12 | #include <linux/kallsyms.h> |
13 | #include <linux/kprobes.h> |
14 | #include <linux/spinlock.h> |
15 | #include <linux/uaccess.h> |
16 | #include <linux/hardirq.h> |
17 | #include <linux/kdebug.h> |
18 | #include <linux/module.h> |
19 | #include <linux/kexec.h> |
20 | #include <linux/delay.h> |
21 | #include <linux/efi.h> |
22 | #include <linux/init.h> |
23 | #include <linux/sched/signal.h> |
24 | #include <linux/sched/debug.h> |
25 | #include <linux/sched/task_stack.h> |
26 | #include <linux/sizes.h> |
27 | #include <linux/syscalls.h> |
28 | #include <linux/mm_types.h> |
29 | #include <linux/kasan.h> |
30 | #include <linux/ubsan.h> |
31 | #include <linux/cfi.h> |
32 | |
33 | #include <asm/atomic.h> |
34 | #include <asm/bug.h> |
35 | #include <asm/cpufeature.h> |
36 | #include <asm/daifflags.h> |
37 | #include <asm/debug-monitors.h> |
38 | #include <asm/efi.h> |
39 | #include <asm/esr.h> |
40 | #include <asm/exception.h> |
41 | #include <asm/extable.h> |
42 | #include <asm/insn.h> |
43 | #include <asm/kprobes.h> |
44 | #include <asm/text-patching.h> |
45 | #include <asm/traps.h> |
46 | #include <asm/smp.h> |
47 | #include <asm/stack_pointer.h> |
48 | #include <asm/stacktrace.h> |
49 | #include <asm/system_misc.h> |
50 | #include <asm/sysreg.h> |
51 | |
52 | static bool __kprobes __check_eq(unsigned long pstate) |
53 | { |
54 | return (pstate & PSR_Z_BIT) != 0; |
55 | } |
56 | |
57 | static bool __kprobes __check_ne(unsigned long pstate) |
58 | { |
59 | return (pstate & PSR_Z_BIT) == 0; |
60 | } |
61 | |
62 | static bool __kprobes __check_cs(unsigned long pstate) |
63 | { |
64 | return (pstate & PSR_C_BIT) != 0; |
65 | } |
66 | |
67 | static bool __kprobes __check_cc(unsigned long pstate) |
68 | { |
69 | return (pstate & PSR_C_BIT) == 0; |
70 | } |
71 | |
72 | static bool __kprobes __check_mi(unsigned long pstate) |
73 | { |
74 | return (pstate & PSR_N_BIT) != 0; |
75 | } |
76 | |
77 | static bool __kprobes __check_pl(unsigned long pstate) |
78 | { |
79 | return (pstate & PSR_N_BIT) == 0; |
80 | } |
81 | |
82 | static bool __kprobes __check_vs(unsigned long pstate) |
83 | { |
84 | return (pstate & PSR_V_BIT) != 0; |
85 | } |
86 | |
87 | static bool __kprobes __check_vc(unsigned long pstate) |
88 | { |
89 | return (pstate & PSR_V_BIT) == 0; |
90 | } |
91 | |
92 | static bool __kprobes __check_hi(unsigned long pstate) |
93 | { |
94 | pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */ |
95 | return (pstate & PSR_C_BIT) != 0; |
96 | } |
97 | |
98 | static bool __kprobes __check_ls(unsigned long pstate) |
99 | { |
100 | pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */ |
101 | return (pstate & PSR_C_BIT) == 0; |
102 | } |
103 | |
104 | static bool __kprobes __check_ge(unsigned long pstate) |
105 | { |
106 | pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */ |
107 | return (pstate & PSR_N_BIT) == 0; |
108 | } |
109 | |
110 | static bool __kprobes __check_lt(unsigned long pstate) |
111 | { |
112 | pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */ |
113 | return (pstate & PSR_N_BIT) != 0; |
114 | } |
115 | |
116 | static bool __kprobes __check_gt(unsigned long pstate) |
117 | { |
118 | /*PSR_N_BIT ^= PSR_V_BIT */ |
119 | unsigned long temp = pstate ^ (pstate << 3); |
120 | |
121 | temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */ |
122 | return (temp & PSR_N_BIT) == 0; |
123 | } |
124 | |
125 | static bool __kprobes __check_le(unsigned long pstate) |
126 | { |
127 | /*PSR_N_BIT ^= PSR_V_BIT */ |
128 | unsigned long temp = pstate ^ (pstate << 3); |
129 | |
130 | temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */ |
131 | return (temp & PSR_N_BIT) != 0; |
132 | } |
133 | |
134 | static bool __kprobes __check_al(unsigned long pstate) |
135 | { |
136 | return true; |
137 | } |
138 | |
139 | /* |
140 | * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that |
141 | * it behaves identically to 0b1110 ("al"). |
142 | */ |
143 | pstate_check_t * const aarch32_opcode_cond_checks[16] = { |
144 | __check_eq, __check_ne, __check_cs, __check_cc, |
145 | __check_mi, __check_pl, __check_vs, __check_vc, |
146 | __check_hi, __check_ls, __check_ge, __check_lt, |
147 | __check_gt, __check_le, __check_al, __check_al |
148 | }; |
149 | |
150 | int show_unhandled_signals = 0; |
151 | |
152 | static void dump_kernel_instr(const char *lvl, struct pt_regs *regs) |
153 | { |
154 | unsigned long addr = instruction_pointer(regs); |
155 | char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str; |
156 | int i; |
157 | |
158 | if (user_mode(regs)) |
159 | return; |
160 | |
161 | for (i = -4; i < 1; i++) { |
162 | unsigned int val, bad; |
163 | |
164 | bad = aarch64_insn_read(&((u32 *)addr)[i], &val); |
165 | |
166 | if (!bad) |
167 | p += sprintf(buf: p, fmt: i == 0 ? "(%08x) ": "%08x ", val); |
168 | else |
169 | p += sprintf(buf: p, fmt: i == 0 ? "(????????) ": "???????? "); |
170 | } |
171 | |
172 | printk("%sCode: %s\n", lvl, str); |
173 | } |
174 | |
175 | #define S_SMP " SMP" |
176 | |
177 | static int __die(const char *str, long err, struct pt_regs *regs) |
178 | { |
179 | static int die_counter; |
180 | int ret; |
181 | |
182 | pr_emerg("Internal error: %s: %016lx [#%d] "S_SMP "\n", |
183 | str, err, ++die_counter); |
184 | |
185 | /* trap and error numbers are mostly meaningless on ARM */ |
186 | ret = notify_die(val: DIE_OOPS, str, regs, err, trap: 0, SIGSEGV); |
187 | if (ret == NOTIFY_STOP) |
188 | return ret; |
189 | |
190 | print_modules(); |
191 | show_regs(regs); |
192 | |
193 | dump_kernel_instr(KERN_EMERG, regs); |
194 | |
195 | return ret; |
196 | } |
197 | |
198 | static DEFINE_RAW_SPINLOCK(die_lock); |
199 | |
200 | /* |
201 | * This function is protected against re-entrancy. |
202 | */ |
203 | void die(const char *str, struct pt_regs *regs, long err) |
204 | { |
205 | int ret; |
206 | unsigned long flags; |
207 | |
208 | raw_spin_lock_irqsave(&die_lock, flags); |
209 | |
210 | oops_enter(); |
211 | |
212 | console_verbose(); |
213 | bust_spinlocks(yes: 1); |
214 | ret = __die(str, err, regs); |
215 | |
216 | if (regs && kexec_should_crash(current)) |
217 | crash_kexec(regs); |
218 | |
219 | bust_spinlocks(yes: 0); |
220 | add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); |
221 | oops_exit(); |
222 | |
223 | if (in_interrupt()) |
224 | panic(fmt: "%s: Fatal exception in interrupt", str); |
225 | if (panic_on_oops) |
226 | panic(fmt: "%s: Fatal exception", str); |
227 | |
228 | raw_spin_unlock_irqrestore(&die_lock, flags); |
229 | |
230 | if (ret != NOTIFY_STOP) |
231 | make_task_dead(SIGSEGV); |
232 | } |
233 | |
234 | static void arm64_show_signal(int signo, const char *str) |
235 | { |
236 | static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, |
237 | DEFAULT_RATELIMIT_BURST); |
238 | struct task_struct *tsk = current; |
239 | unsigned long esr = tsk->thread.fault_code; |
240 | struct pt_regs *regs = task_pt_regs(tsk); |
241 | |
242 | /* Leave if the signal won't be shown */ |
243 | if (!show_unhandled_signals || |
244 | !unhandled_signal(tsk, sig: signo) || |
245 | !__ratelimit(&rs)) |
246 | return; |
247 | |
248 | pr_info("%s[%d]: unhandled exception: ", tsk->comm, task_pid_nr(tsk)); |
249 | if (esr) |
250 | pr_cont("%s, ESR 0x%016lx, ", esr_get_class_string(esr), esr); |
251 | |
252 | pr_cont("%s", str); |
253 | print_vma_addr(KERN_CONT " in ", rip: regs->pc); |
254 | pr_cont("\n"); |
255 | __show_regs(regs); |
256 | } |
257 | |
258 | void arm64_force_sig_fault(int signo, int code, unsigned long far, |
259 | const char *str) |
260 | { |
261 | arm64_show_signal(signo, str); |
262 | if (signo == SIGKILL) |
263 | force_sig(SIGKILL); |
264 | else |
265 | force_sig_fault(sig: signo, code, addr: (void __user *)far); |
266 | } |
267 | |
268 | void arm64_force_sig_fault_pkey(unsigned long far, const char *str, int pkey) |
269 | { |
270 | arm64_show_signal(SIGSEGV, str); |
271 | force_sig_pkuerr(addr: (void __user *)far, pkey); |
272 | } |
273 | |
274 | void arm64_force_sig_mceerr(int code, unsigned long far, short lsb, |
275 | const char *str) |
276 | { |
277 | arm64_show_signal(SIGBUS, str); |
278 | force_sig_mceerr(code, (void __user *)far, lsb); |
279 | } |
280 | |
281 | void arm64_force_sig_ptrace_errno_trap(int errno, unsigned long far, |
282 | const char *str) |
283 | { |
284 | arm64_show_signal(SIGTRAP, str); |
285 | force_sig_ptrace_errno_trap(errno, addr: (void __user *)far); |
286 | } |
287 | |
288 | void arm64_notify_die(const char *str, struct pt_regs *regs, |
289 | int signo, int sicode, unsigned long far, |
290 | unsigned long err) |
291 | { |
292 | if (user_mode(regs)) { |
293 | WARN_ON(regs != current_pt_regs()); |
294 | current->thread.fault_address = 0; |
295 | current->thread.fault_code = err; |
296 | |
297 | arm64_force_sig_fault(signo, code: sicode, far, str); |
298 | } else { |
299 | die(str, regs, err); |
300 | } |
301 | } |
302 | |
303 | #ifdef CONFIG_COMPAT |
304 | #define PSTATE_IT_1_0_SHIFT 25 |
305 | #define PSTATE_IT_1_0_MASK (0x3 << PSTATE_IT_1_0_SHIFT) |
306 | #define PSTATE_IT_7_2_SHIFT 10 |
307 | #define PSTATE_IT_7_2_MASK (0x3f << PSTATE_IT_7_2_SHIFT) |
308 | |
309 | static u32 compat_get_it_state(struct pt_regs *regs) |
310 | { |
311 | u32 it, pstate = regs->pstate; |
312 | |
313 | it = (pstate & PSTATE_IT_1_0_MASK) >> PSTATE_IT_1_0_SHIFT; |
314 | it |= ((pstate & PSTATE_IT_7_2_MASK) >> PSTATE_IT_7_2_SHIFT) << 2; |
315 | |
316 | return it; |
317 | } |
318 | |
319 | static void compat_set_it_state(struct pt_regs *regs, u32 it) |
320 | { |
321 | u32 pstate_it; |
322 | |
323 | pstate_it = (it << PSTATE_IT_1_0_SHIFT) & PSTATE_IT_1_0_MASK; |
324 | pstate_it |= ((it >> 2) << PSTATE_IT_7_2_SHIFT) & PSTATE_IT_7_2_MASK; |
325 | |
326 | regs->pstate &= ~PSR_AA32_IT_MASK; |
327 | regs->pstate |= pstate_it; |
328 | } |
329 | |
330 | static void advance_itstate(struct pt_regs *regs) |
331 | { |
332 | u32 it; |
333 | |
334 | /* ARM mode */ |
335 | if (!(regs->pstate & PSR_AA32_T_BIT) || |
336 | !(regs->pstate & PSR_AA32_IT_MASK)) |
337 | return; |
338 | |
339 | it = compat_get_it_state(regs); |
340 | |
341 | /* |
342 | * If this is the last instruction of the block, wipe the IT |
343 | * state. Otherwise advance it. |
344 | */ |
345 | if (!(it & 7)) |
346 | it = 0; |
347 | else |
348 | it = (it & 0xe0) | ((it << 1) & 0x1f); |
349 | |
350 | compat_set_it_state(regs, it); |
351 | } |
352 | #else |
353 | static void advance_itstate(struct pt_regs *regs) |
354 | { |
355 | } |
356 | #endif |
357 | |
358 | void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size) |
359 | { |
360 | regs->pc += size; |
361 | |
362 | /* |
363 | * If we were single stepping, we want to get the step exception after |
364 | * we return from the trap. |
365 | */ |
366 | if (user_mode(regs)) |
367 | user_fastforward_single_step(current); |
368 | |
369 | if (compat_user_mode(regs)) |
370 | advance_itstate(regs); |
371 | else |
372 | regs->pstate &= ~PSR_BTYPE_MASK; |
373 | } |
374 | |
375 | static int user_insn_read(struct pt_regs *regs, u32 *insnp) |
376 | { |
377 | u32 instr; |
378 | unsigned long pc = instruction_pointer(regs); |
379 | |
380 | if (compat_thumb_mode(regs)) { |
381 | /* 16-bit Thumb instruction */ |
382 | __le16 instr_le; |
383 | if (get_user(instr_le, (__le16 __user *)pc)) |
384 | return -EFAULT; |
385 | instr = le16_to_cpu(instr_le); |
386 | if (aarch32_insn_is_wide(instr)) { |
387 | u32 instr2; |
388 | |
389 | if (get_user(instr_le, (__le16 __user *)(pc + 2))) |
390 | return -EFAULT; |
391 | instr2 = le16_to_cpu(instr_le); |
392 | instr = (instr << 16) | instr2; |
393 | } |
394 | } else { |
395 | /* 32-bit ARM instruction */ |
396 | __le32 instr_le; |
397 | if (get_user(instr_le, (__le32 __user *)pc)) |
398 | return -EFAULT; |
399 | instr = le32_to_cpu(instr_le); |
400 | } |
401 | |
402 | *insnp = instr; |
403 | return 0; |
404 | } |
405 | |
406 | void force_signal_inject(int signal, int code, unsigned long address, unsigned long err) |
407 | { |
408 | const char *desc; |
409 | struct pt_regs *regs = current_pt_regs(); |
410 | |
411 | if (WARN_ON(!user_mode(regs))) |
412 | return; |
413 | |
414 | switch (signal) { |
415 | case SIGILL: |
416 | desc = "undefined instruction"; |
417 | break; |
418 | case SIGSEGV: |
419 | desc = "illegal memory access"; |
420 | break; |
421 | default: |
422 | desc = "unknown or unrecoverable error"; |
423 | break; |
424 | } |
425 | |
426 | /* Force signals we don't understand to SIGKILL */ |
427 | if (WARN_ON(signal != SIGKILL && |
428 | siginfo_layout(signal, code) != SIL_FAULT)) { |
429 | signal = SIGKILL; |
430 | } |
431 | |
432 | arm64_notify_die(str: desc, regs, signo: signal, sicode: code, far: address, err); |
433 | } |
434 | |
435 | /* |
436 | * Set up process info to signal segmentation fault - called on access error. |
437 | */ |
438 | void arm64_notify_segfault(unsigned long addr) |
439 | { |
440 | int code; |
441 | |
442 | mmap_read_lock(current->mm); |
443 | if (find_vma(current->mm, untagged_addr(addr)) == NULL) |
444 | code = SEGV_MAPERR; |
445 | else |
446 | code = SEGV_ACCERR; |
447 | mmap_read_unlock(current->mm); |
448 | |
449 | force_signal_inject(SIGSEGV, code, address: addr, err: 0); |
450 | } |
451 | |
452 | void do_el0_undef(struct pt_regs *regs, unsigned long esr) |
453 | { |
454 | u32 insn; |
455 | |
456 | /* check for AArch32 breakpoint instructions */ |
457 | if (!aarch32_break_handler(regs)) |
458 | return; |
459 | |
460 | if (user_insn_read(regs, insnp: &insn)) |
461 | goto out_err; |
462 | |
463 | if (try_emulate_mrs(regs, insn)) |
464 | return; |
465 | |
466 | if (try_emulate_armv8_deprecated(regs, insn)) |
467 | return; |
468 | |
469 | out_err: |
470 | force_signal_inject(SIGILL, ILL_ILLOPC, address: regs->pc, err: 0); |
471 | } |
472 | |
473 | void do_el1_undef(struct pt_regs *regs, unsigned long esr) |
474 | { |
475 | u32 insn; |
476 | |
477 | if (aarch64_insn_read((void *)regs->pc, &insn)) |
478 | goto out_err; |
479 | |
480 | if (try_emulate_el1_ssbs(regs, insn)) |
481 | return; |
482 | |
483 | out_err: |
484 | die(str: "Oops - Undefined instruction", regs, err: esr); |
485 | } |
486 | |
487 | void do_el0_bti(struct pt_regs *regs) |
488 | { |
489 | force_signal_inject(SIGILL, ILL_ILLOPC, address: regs->pc, err: 0); |
490 | } |
491 | |
492 | void do_el1_bti(struct pt_regs *regs, unsigned long esr) |
493 | { |
494 | if (efi_runtime_fixup_exception(regs, "BTI violation")) { |
495 | regs->pstate &= ~PSR_BTYPE_MASK; |
496 | return; |
497 | } |
498 | die(str: "Oops - BTI", regs, err: esr); |
499 | } |
500 | |
501 | void do_el0_gcs(struct pt_regs *regs, unsigned long esr) |
502 | { |
503 | force_signal_inject(SIGSEGV, SEGV_CPERR, address: regs->pc, err: 0); |
504 | } |
505 | |
506 | void do_el1_gcs(struct pt_regs *regs, unsigned long esr) |
507 | { |
508 | die(str: "Oops - GCS", regs, err: esr); |
509 | } |
510 | |
511 | void do_el0_fpac(struct pt_regs *regs, unsigned long esr) |
512 | { |
513 | force_signal_inject(SIGILL, ILL_ILLOPN, address: regs->pc, err: esr); |
514 | } |
515 | |
516 | void do_el1_fpac(struct pt_regs *regs, unsigned long esr) |
517 | { |
518 | /* |
519 | * Unexpected FPAC exception in the kernel: kill the task before it |
520 | * does any more harm. |
521 | */ |
522 | die(str: "Oops - FPAC", regs, err: esr); |
523 | } |
524 | |
525 | void do_el0_mops(struct pt_regs *regs, unsigned long esr) |
526 | { |
527 | arm64_mops_reset_regs(®s->user_regs, esr); |
528 | |
529 | /* |
530 | * If single stepping then finish the step before executing the |
531 | * prologue instruction. |
532 | */ |
533 | user_fastforward_single_step(current); |
534 | } |
535 | |
536 | void do_el1_mops(struct pt_regs *regs, unsigned long esr) |
537 | { |
538 | arm64_mops_reset_regs(®s->user_regs, esr); |
539 | |
540 | kernel_fastforward_single_step(regs); |
541 | } |
542 | |
543 | #define __user_cache_maint(insn, address, res) \ |
544 | if (address >= TASK_SIZE_MAX) { \ |
545 | res = -EFAULT; \ |
546 | } else { \ |
547 | uaccess_ttbr0_enable(); \ |
548 | asm volatile ( \ |
549 | "1: " insn ", %1\n" \ |
550 | " mov %w0, #0\n" \ |
551 | "2:\n" \ |
552 | _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0) \ |
553 | : "=r" (res) \ |
554 | : "r" (address)); \ |
555 | uaccess_ttbr0_disable(); \ |
556 | } |
557 | |
558 | static void user_cache_maint_handler(unsigned long esr, struct pt_regs *regs) |
559 | { |
560 | unsigned long tagged_address, address; |
561 | int rt = ESR_ELx_SYS64_ISS_RT(esr); |
562 | int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT; |
563 | int ret = 0; |
564 | |
565 | tagged_address = pt_regs_read_reg(regs, rt); |
566 | address = untagged_addr(tagged_address); |
567 | |
568 | switch (crm) { |
569 | case ESR_ELx_SYS64_ISS_CRM_DC_CVAU: /* DC CVAU, gets promoted */ |
570 | __user_cache_maint("dc civac", address, ret); |
571 | break; |
572 | case ESR_ELx_SYS64_ISS_CRM_DC_CVAC: /* DC CVAC, gets promoted */ |
573 | __user_cache_maint("dc civac", address, ret); |
574 | break; |
575 | case ESR_ELx_SYS64_ISS_CRM_DC_CVADP: /* DC CVADP */ |
576 | __user_cache_maint("sys 3, c7, c13, 1", address, ret); |
577 | break; |
578 | case ESR_ELx_SYS64_ISS_CRM_DC_CVAP: /* DC CVAP */ |
579 | __user_cache_maint("sys 3, c7, c12, 1", address, ret); |
580 | break; |
581 | case ESR_ELx_SYS64_ISS_CRM_DC_CIVAC: /* DC CIVAC */ |
582 | __user_cache_maint("dc civac", address, ret); |
583 | break; |
584 | case ESR_ELx_SYS64_ISS_CRM_IC_IVAU: /* IC IVAU */ |
585 | __user_cache_maint("ic ivau", address, ret); |
586 | break; |
587 | default: |
588 | force_signal_inject(SIGILL, ILL_ILLOPC, address: regs->pc, err: 0); |
589 | return; |
590 | } |
591 | |
592 | if (ret) |
593 | arm64_notify_segfault(addr: tagged_address); |
594 | else |
595 | arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); |
596 | } |
597 | |
598 | static void ctr_read_handler(unsigned long esr, struct pt_regs *regs) |
599 | { |
600 | int rt = ESR_ELx_SYS64_ISS_RT(esr); |
601 | unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0); |
602 | |
603 | if (cpus_have_final_cap(ARM64_WORKAROUND_1542419)) { |
604 | /* Hide DIC so that we can trap the unnecessary maintenance...*/ |
605 | val &= ~BIT(CTR_EL0_DIC_SHIFT); |
606 | |
607 | /* ... and fake IminLine to reduce the number of traps. */ |
608 | val &= ~CTR_EL0_IminLine_MASK; |
609 | val |= (PAGE_SHIFT - 2) & CTR_EL0_IminLine_MASK; |
610 | } |
611 | |
612 | pt_regs_write_reg(regs, rt, val); |
613 | |
614 | arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); |
615 | } |
616 | |
617 | static void cntvct_read_handler(unsigned long esr, struct pt_regs *regs) |
618 | { |
619 | if (test_thread_flag(TIF_TSC_SIGSEGV)) { |
620 | force_sig(SIGSEGV); |
621 | } else { |
622 | int rt = ESR_ELx_SYS64_ISS_RT(esr); |
623 | |
624 | pt_regs_write_reg(regs, rt, arch_timer_read_counter()); |
625 | arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); |
626 | } |
627 | } |
628 | |
629 | static void cntfrq_read_handler(unsigned long esr, struct pt_regs *regs) |
630 | { |
631 | if (test_thread_flag(TIF_TSC_SIGSEGV)) { |
632 | force_sig(SIGSEGV); |
633 | } else { |
634 | int rt = ESR_ELx_SYS64_ISS_RT(esr); |
635 | |
636 | pt_regs_write_reg(regs, rt, arch_timer_get_rate()); |
637 | arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); |
638 | } |
639 | } |
640 | |
641 | static void mrs_handler(unsigned long esr, struct pt_regs *regs) |
642 | { |
643 | u32 sysreg, rt; |
644 | |
645 | rt = ESR_ELx_SYS64_ISS_RT(esr); |
646 | sysreg = esr_sys64_to_sysreg(esr); |
647 | |
648 | if (do_emulate_mrs(regs, sysreg, rt) != 0) |
649 | force_signal_inject(SIGILL, ILL_ILLOPC, address: regs->pc, err: 0); |
650 | } |
651 | |
652 | static void wfi_handler(unsigned long esr, struct pt_regs *regs) |
653 | { |
654 | arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); |
655 | } |
656 | |
657 | struct sys64_hook { |
658 | unsigned long esr_mask; |
659 | unsigned long esr_val; |
660 | void (*handler)(unsigned long esr, struct pt_regs *regs); |
661 | }; |
662 | |
663 | static const struct sys64_hook sys64_hooks[] = { |
664 | { |
665 | .esr_mask = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK, |
666 | .esr_val = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL, |
667 | .handler = user_cache_maint_handler, |
668 | }, |
669 | { |
670 | /* Trap read access to CTR_EL0 */ |
671 | .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK, |
672 | .esr_val = ESR_ELx_SYS64_ISS_SYS_CTR_READ, |
673 | .handler = ctr_read_handler, |
674 | }, |
675 | { |
676 | /* Trap read access to CNTVCT_EL0 */ |
677 | .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK, |
678 | .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTVCT, |
679 | .handler = cntvct_read_handler, |
680 | }, |
681 | { |
682 | /* Trap read access to CNTVCTSS_EL0 */ |
683 | .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK, |
684 | .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTVCTSS, |
685 | .handler = cntvct_read_handler, |
686 | }, |
687 | { |
688 | /* Trap read access to CNTFRQ_EL0 */ |
689 | .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK, |
690 | .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTFRQ, |
691 | .handler = cntfrq_read_handler, |
692 | }, |
693 | { |
694 | /* Trap read access to CPUID registers */ |
695 | .esr_mask = ESR_ELx_SYS64_ISS_SYS_MRS_OP_MASK, |
696 | .esr_val = ESR_ELx_SYS64_ISS_SYS_MRS_OP_VAL, |
697 | .handler = mrs_handler, |
698 | }, |
699 | { |
700 | /* Trap WFI instructions executed in userspace */ |
701 | .esr_mask = ESR_ELx_WFx_MASK, |
702 | .esr_val = ESR_ELx_WFx_WFI_VAL, |
703 | .handler = wfi_handler, |
704 | }, |
705 | {}, |
706 | }; |
707 | |
708 | #ifdef CONFIG_COMPAT |
709 | static bool cp15_cond_valid(unsigned long esr, struct pt_regs *regs) |
710 | { |
711 | int cond; |
712 | |
713 | /* Only a T32 instruction can trap without CV being set */ |
714 | if (!(esr & ESR_ELx_CV)) { |
715 | u32 it; |
716 | |
717 | it = compat_get_it_state(regs); |
718 | if (!it) |
719 | return true; |
720 | |
721 | cond = it >> 4; |
722 | } else { |
723 | cond = (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT; |
724 | } |
725 | |
726 | return aarch32_opcode_cond_checks[cond](regs->pstate); |
727 | } |
728 | |
729 | static void compat_cntfrq_read_handler(unsigned long esr, struct pt_regs *regs) |
730 | { |
731 | int reg = (esr & ESR_ELx_CP15_32_ISS_RT_MASK) >> ESR_ELx_CP15_32_ISS_RT_SHIFT; |
732 | |
733 | pt_regs_write_reg(regs, reg, arch_timer_get_rate()); |
734 | arm64_skip_faulting_instruction(regs, size: 4); |
735 | } |
736 | |
737 | static const struct sys64_hook cp15_32_hooks[] = { |
738 | { |
739 | .esr_mask = ESR_ELx_CP15_32_ISS_SYS_MASK, |
740 | .esr_val = ESR_ELx_CP15_32_ISS_SYS_CNTFRQ, |
741 | .handler = compat_cntfrq_read_handler, |
742 | }, |
743 | {}, |
744 | }; |
745 | |
746 | static void compat_cntvct_read_handler(unsigned long esr, struct pt_regs *regs) |
747 | { |
748 | int rt = (esr & ESR_ELx_CP15_64_ISS_RT_MASK) >> ESR_ELx_CP15_64_ISS_RT_SHIFT; |
749 | int rt2 = (esr & ESR_ELx_CP15_64_ISS_RT2_MASK) >> ESR_ELx_CP15_64_ISS_RT2_SHIFT; |
750 | u64 val = arch_timer_read_counter(); |
751 | |
752 | pt_regs_write_reg(regs, rt, lower_32_bits(val)); |
753 | pt_regs_write_reg(regs, rt2, upper_32_bits(val)); |
754 | arm64_skip_faulting_instruction(regs, size: 4); |
755 | } |
756 | |
757 | static const struct sys64_hook cp15_64_hooks[] = { |
758 | { |
759 | .esr_mask = ESR_ELx_CP15_64_ISS_SYS_MASK, |
760 | .esr_val = ESR_ELx_CP15_64_ISS_SYS_CNTVCT, |
761 | .handler = compat_cntvct_read_handler, |
762 | }, |
763 | { |
764 | .esr_mask = ESR_ELx_CP15_64_ISS_SYS_MASK, |
765 | .esr_val = ESR_ELx_CP15_64_ISS_SYS_CNTVCTSS, |
766 | .handler = compat_cntvct_read_handler, |
767 | }, |
768 | {}, |
769 | }; |
770 | |
771 | void do_el0_cp15(unsigned long esr, struct pt_regs *regs) |
772 | { |
773 | const struct sys64_hook *hook, *hook_base; |
774 | |
775 | if (!cp15_cond_valid(esr, regs)) { |
776 | /* |
777 | * There is no T16 variant of a CP access, so we |
778 | * always advance PC by 4 bytes. |
779 | */ |
780 | arm64_skip_faulting_instruction(regs, size: 4); |
781 | return; |
782 | } |
783 | |
784 | switch (ESR_ELx_EC(esr)) { |
785 | case ESR_ELx_EC_CP15_32: |
786 | hook_base = cp15_32_hooks; |
787 | break; |
788 | case ESR_ELx_EC_CP15_64: |
789 | hook_base = cp15_64_hooks; |
790 | break; |
791 | default: |
792 | do_el0_undef(regs, esr); |
793 | return; |
794 | } |
795 | |
796 | for (hook = hook_base; hook->handler; hook++) |
797 | if ((hook->esr_mask & esr) == hook->esr_val) { |
798 | hook->handler(esr, regs); |
799 | return; |
800 | } |
801 | |
802 | /* |
803 | * New cp15 instructions may previously have been undefined at |
804 | * EL0. Fall back to our usual undefined instruction handler |
805 | * so that we handle these consistently. |
806 | */ |
807 | do_el0_undef(regs, esr); |
808 | } |
809 | #endif |
810 | |
811 | void do_el0_sys(unsigned long esr, struct pt_regs *regs) |
812 | { |
813 | const struct sys64_hook *hook; |
814 | |
815 | for (hook = sys64_hooks; hook->handler; hook++) |
816 | if ((hook->esr_mask & esr) == hook->esr_val) { |
817 | hook->handler(esr, regs); |
818 | return; |
819 | } |
820 | |
821 | /* |
822 | * New SYS instructions may previously have been undefined at EL0. Fall |
823 | * back to our usual undefined instruction handler so that we handle |
824 | * these consistently. |
825 | */ |
826 | do_el0_undef(regs, esr); |
827 | } |
828 | |
829 | static const char *esr_class_str[] = { |
830 | [0 ... ESR_ELx_EC_MAX] = "UNRECOGNIZED EC", |
831 | [ESR_ELx_EC_UNKNOWN] = "Unknown/Uncategorized", |
832 | [ESR_ELx_EC_WFx] = "WFI/WFE", |
833 | [ESR_ELx_EC_CP15_32] = "CP15 MCR/MRC", |
834 | [ESR_ELx_EC_CP15_64] = "CP15 MCRR/MRRC", |
835 | [ESR_ELx_EC_CP14_MR] = "CP14 MCR/MRC", |
836 | [ESR_ELx_EC_CP14_LS] = "CP14 LDC/STC", |
837 | [ESR_ELx_EC_FP_ASIMD] = "ASIMD", |
838 | [ESR_ELx_EC_CP10_ID] = "CP10 MRC/VMRS", |
839 | [ESR_ELx_EC_PAC] = "PAC", |
840 | [ESR_ELx_EC_CP14_64] = "CP14 MCRR/MRRC", |
841 | [ESR_ELx_EC_BTI] = "BTI", |
842 | [ESR_ELx_EC_ILL] = "PSTATE.IL", |
843 | [ESR_ELx_EC_SVC32] = "SVC (AArch32)", |
844 | [ESR_ELx_EC_HVC32] = "HVC (AArch32)", |
845 | [ESR_ELx_EC_SMC32] = "SMC (AArch32)", |
846 | [ESR_ELx_EC_SVC64] = "SVC (AArch64)", |
847 | [ESR_ELx_EC_HVC64] = "HVC (AArch64)", |
848 | [ESR_ELx_EC_SMC64] = "SMC (AArch64)", |
849 | [ESR_ELx_EC_SYS64] = "MSR/MRS (AArch64)", |
850 | [ESR_ELx_EC_SVE] = "SVE", |
851 | [ESR_ELx_EC_ERET] = "ERET/ERETAA/ERETAB", |
852 | [ESR_ELx_EC_FPAC] = "FPAC", |
853 | [ESR_ELx_EC_SME] = "SME", |
854 | [ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF", |
855 | [ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)", |
856 | [ESR_ELx_EC_IABT_CUR] = "IABT (current EL)", |
857 | [ESR_ELx_EC_PC_ALIGN] = "PC Alignment", |
858 | [ESR_ELx_EC_DABT_LOW] = "DABT (lower EL)", |
859 | [ESR_ELx_EC_DABT_CUR] = "DABT (current EL)", |
860 | [ESR_ELx_EC_SP_ALIGN] = "SP Alignment", |
861 | [ESR_ELx_EC_MOPS] = "MOPS", |
862 | [ESR_ELx_EC_FP_EXC32] = "FP (AArch32)", |
863 | [ESR_ELx_EC_FP_EXC64] = "FP (AArch64)", |
864 | [ESR_ELx_EC_GCS] = "Guarded Control Stack", |
865 | [ESR_ELx_EC_SERROR] = "SError", |
866 | [ESR_ELx_EC_BREAKPT_LOW] = "Breakpoint (lower EL)", |
867 | [ESR_ELx_EC_BREAKPT_CUR] = "Breakpoint (current EL)", |
868 | [ESR_ELx_EC_SOFTSTP_LOW] = "Software Step (lower EL)", |
869 | [ESR_ELx_EC_SOFTSTP_CUR] = "Software Step (current EL)", |
870 | [ESR_ELx_EC_WATCHPT_LOW] = "Watchpoint (lower EL)", |
871 | [ESR_ELx_EC_WATCHPT_CUR] = "Watchpoint (current EL)", |
872 | [ESR_ELx_EC_BKPT32] = "BKPT (AArch32)", |
873 | [ESR_ELx_EC_VECTOR32] = "Vector catch (AArch32)", |
874 | [ESR_ELx_EC_BRK64] = "BRK (AArch64)", |
875 | }; |
876 | |
877 | const char *esr_get_class_string(unsigned long esr) |
878 | { |
879 | return esr_class_str[ESR_ELx_EC(esr)]; |
880 | } |
881 | |
882 | /* |
883 | * bad_el0_sync handles unexpected, but potentially recoverable synchronous |
884 | * exceptions taken from EL0. |
885 | */ |
886 | void bad_el0_sync(struct pt_regs *regs, int reason, unsigned long esr) |
887 | { |
888 | unsigned long pc = instruction_pointer(regs); |
889 | |
890 | current->thread.fault_address = 0; |
891 | current->thread.fault_code = esr; |
892 | |
893 | arm64_force_sig_fault(SIGILL, ILL_ILLOPC, far: pc, |
894 | str: "Bad EL0 synchronous exception"); |
895 | } |
896 | |
897 | #ifdef CONFIG_VMAP_STACK |
898 | |
899 | DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack) |
900 | __aligned(16); |
901 | |
902 | void __noreturn panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far) |
903 | { |
904 | unsigned long tsk_stk = (unsigned long)current->stack; |
905 | unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr); |
906 | unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack); |
907 | |
908 | console_verbose(); |
909 | pr_emerg("Insufficient stack space to handle exception!"); |
910 | |
911 | pr_emerg("ESR: 0x%016lx -- %s\n", esr, esr_get_class_string(esr)); |
912 | pr_emerg("FAR: 0x%016lx\n", far); |
913 | |
914 | pr_emerg("Task stack: [0x%016lx..0x%016lx]\n", |
915 | tsk_stk, tsk_stk + THREAD_SIZE); |
916 | pr_emerg("IRQ stack: [0x%016lx..0x%016lx]\n", |
917 | irq_stk, irq_stk + IRQ_STACK_SIZE); |
918 | pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n", |
919 | ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE); |
920 | |
921 | __show_regs(regs); |
922 | |
923 | /* |
924 | * We use nmi_panic to limit the potential for recusive overflows, and |
925 | * to get a better stack trace. |
926 | */ |
927 | nmi_panic(NULL, msg: "kernel stack overflow"); |
928 | cpu_park_loop(); |
929 | } |
930 | #endif |
931 | |
932 | void __noreturn arm64_serror_panic(struct pt_regs *regs, unsigned long esr) |
933 | { |
934 | console_verbose(); |
935 | |
936 | pr_crit("SError Interrupt on CPU%d, code 0x%016lx -- %s\n", |
937 | smp_processor_id(), esr, esr_get_class_string(esr)); |
938 | if (regs) |
939 | __show_regs(regs); |
940 | |
941 | nmi_panic(regs, msg: "Asynchronous SError Interrupt"); |
942 | |
943 | cpu_park_loop(); |
944 | } |
945 | |
946 | bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned long esr) |
947 | { |
948 | unsigned long aet = arm64_ras_serror_get_severity(esr); |
949 | |
950 | switch (aet) { |
951 | case ESR_ELx_AET_CE: /* corrected error */ |
952 | case ESR_ELx_AET_UEO: /* restartable, not yet consumed */ |
953 | /* |
954 | * The CPU can make progress. We may take UEO again as |
955 | * a more severe error. |
956 | */ |
957 | return false; |
958 | |
959 | case ESR_ELx_AET_UEU: /* Uncorrected Unrecoverable */ |
960 | case ESR_ELx_AET_UER: /* Uncorrected Recoverable */ |
961 | /* |
962 | * The CPU can't make progress. The exception may have |
963 | * been imprecise. |
964 | * |
965 | * Neoverse-N1 #1349291 means a non-KVM SError reported as |
966 | * Unrecoverable should be treated as Uncontainable. We |
967 | * call arm64_serror_panic() in both cases. |
968 | */ |
969 | return true; |
970 | |
971 | case ESR_ELx_AET_UC: /* Uncontainable or Uncategorized error */ |
972 | default: |
973 | /* Error has been silently propagated */ |
974 | arm64_serror_panic(regs, esr); |
975 | } |
976 | } |
977 | |
978 | void do_serror(struct pt_regs *regs, unsigned long esr) |
979 | { |
980 | /* non-RAS errors are not containable */ |
981 | if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr)) |
982 | arm64_serror_panic(regs, esr); |
983 | } |
984 | |
985 | /* GENERIC_BUG traps */ |
986 | #ifdef CONFIG_GENERIC_BUG |
987 | int is_valid_bugaddr(unsigned long addr) |
988 | { |
989 | /* |
990 | * bug_handler() only called for BRK #BUG_BRK_IMM. |
991 | * So the answer is trivial -- any spurious instances with no |
992 | * bug table entry will be rejected by report_bug() and passed |
993 | * back to the debug-monitors code and handled as a fatal |
994 | * unexpected debug exception. |
995 | */ |
996 | return 1; |
997 | } |
998 | #endif |
999 | |
1000 | static int bug_handler(struct pt_regs *regs, unsigned long esr) |
1001 | { |
1002 | switch (report_bug(bug_addr: regs->pc, regs)) { |
1003 | case BUG_TRAP_TYPE_BUG: |
1004 | die(str: "Oops - BUG", regs, err: esr); |
1005 | break; |
1006 | |
1007 | case BUG_TRAP_TYPE_WARN: |
1008 | break; |
1009 | |
1010 | default: |
1011 | /* unknown/unrecognised bug trap type */ |
1012 | return DBG_HOOK_ERROR; |
1013 | } |
1014 | |
1015 | /* If thread survives, skip over the BUG instruction and continue: */ |
1016 | arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); |
1017 | return DBG_HOOK_HANDLED; |
1018 | } |
1019 | |
1020 | static struct break_hook bug_break_hook = { |
1021 | .fn = bug_handler, |
1022 | .imm = BUG_BRK_IMM, |
1023 | }; |
1024 | |
1025 | #ifdef CONFIG_CFI_CLANG |
1026 | static int cfi_handler(struct pt_regs *regs, unsigned long esr) |
1027 | { |
1028 | unsigned long target; |
1029 | u32 type; |
1030 | |
1031 | target = pt_regs_read_reg(regs, FIELD_GET(CFI_BRK_IMM_TARGET, esr)); |
1032 | type = (u32)pt_regs_read_reg(regs, FIELD_GET(CFI_BRK_IMM_TYPE, esr)); |
1033 | |
1034 | switch (report_cfi_failure(regs, regs->pc, &target, type)) { |
1035 | case BUG_TRAP_TYPE_BUG: |
1036 | die("Oops - CFI", regs, esr); |
1037 | break; |
1038 | |
1039 | case BUG_TRAP_TYPE_WARN: |
1040 | break; |
1041 | |
1042 | default: |
1043 | return DBG_HOOK_ERROR; |
1044 | } |
1045 | |
1046 | arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); |
1047 | return DBG_HOOK_HANDLED; |
1048 | } |
1049 | |
1050 | static struct break_hook cfi_break_hook = { |
1051 | .fn = cfi_handler, |
1052 | .imm = CFI_BRK_IMM_BASE, |
1053 | .mask = CFI_BRK_IMM_MASK, |
1054 | }; |
1055 | #endif /* CONFIG_CFI_CLANG */ |
1056 | |
1057 | static int reserved_fault_handler(struct pt_regs *regs, unsigned long esr) |
1058 | { |
1059 | pr_err("%s generated an invalid instruction at %pS!\n", |
1060 | "Kernel text patching", |
1061 | (void *)instruction_pointer(regs)); |
1062 | |
1063 | /* We cannot handle this */ |
1064 | return DBG_HOOK_ERROR; |
1065 | } |
1066 | |
1067 | static struct break_hook fault_break_hook = { |
1068 | .fn = reserved_fault_handler, |
1069 | .imm = FAULT_BRK_IMM, |
1070 | }; |
1071 | |
1072 | #ifdef CONFIG_KASAN_SW_TAGS |
1073 | |
1074 | #define KASAN_ESR_RECOVER 0x20 |
1075 | #define KASAN_ESR_WRITE 0x10 |
1076 | #define KASAN_ESR_SIZE_MASK 0x0f |
1077 | #define KASAN_ESR_SIZE(esr) (1 << ((esr) & KASAN_ESR_SIZE_MASK)) |
1078 | |
1079 | static int kasan_handler(struct pt_regs *regs, unsigned long esr) |
1080 | { |
1081 | bool recover = esr & KASAN_ESR_RECOVER; |
1082 | bool write = esr & KASAN_ESR_WRITE; |
1083 | size_t size = KASAN_ESR_SIZE(esr); |
1084 | void *addr = (void *)regs->regs[0]; |
1085 | u64 pc = regs->pc; |
1086 | |
1087 | kasan_report(addr, size, write, pc); |
1088 | |
1089 | /* |
1090 | * The instrumentation allows to control whether we can proceed after |
1091 | * a crash was detected. This is done by passing the -recover flag to |
1092 | * the compiler. Disabling recovery allows to generate more compact |
1093 | * code. |
1094 | * |
1095 | * Unfortunately disabling recovery doesn't work for the kernel right |
1096 | * now. KASAN reporting is disabled in some contexts (for example when |
1097 | * the allocator accesses slab object metadata; this is controlled by |
1098 | * current->kasan_depth). All these accesses are detected by the tool, |
1099 | * even though the reports for them are not printed. |
1100 | * |
1101 | * This is something that might be fixed at some point in the future. |
1102 | */ |
1103 | if (!recover) |
1104 | die("Oops - KASAN", regs, esr); |
1105 | |
1106 | /* If thread survives, skip over the brk instruction and continue: */ |
1107 | arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); |
1108 | return DBG_HOOK_HANDLED; |
1109 | } |
1110 | |
1111 | static struct break_hook kasan_break_hook = { |
1112 | .fn = kasan_handler, |
1113 | .imm = KASAN_BRK_IMM, |
1114 | .mask = KASAN_BRK_MASK, |
1115 | }; |
1116 | #endif |
1117 | |
1118 | #ifdef CONFIG_UBSAN_TRAP |
1119 | static int ubsan_handler(struct pt_regs *regs, unsigned long esr) |
1120 | { |
1121 | die(report_ubsan_failure(esr & UBSAN_BRK_MASK), regs, esr); |
1122 | return DBG_HOOK_HANDLED; |
1123 | } |
1124 | |
1125 | static struct break_hook ubsan_break_hook = { |
1126 | .fn = ubsan_handler, |
1127 | .imm = UBSAN_BRK_IMM, |
1128 | .mask = UBSAN_BRK_MASK, |
1129 | }; |
1130 | #endif |
1131 | |
1132 | /* |
1133 | * Initial handler for AArch64 BRK exceptions |
1134 | * This handler only used until debug_traps_init(). |
1135 | */ |
1136 | int __init early_brk64(unsigned long addr, unsigned long esr, |
1137 | struct pt_regs *regs) |
1138 | { |
1139 | #ifdef CONFIG_CFI_CLANG |
1140 | if (esr_is_cfi_brk(esr)) |
1141 | return cfi_handler(regs, esr) != DBG_HOOK_HANDLED; |
1142 | #endif |
1143 | #ifdef CONFIG_KASAN_SW_TAGS |
1144 | if ((esr_brk_comment(esr) & ~KASAN_BRK_MASK) == KASAN_BRK_IMM) |
1145 | return kasan_handler(regs, esr) != DBG_HOOK_HANDLED; |
1146 | #endif |
1147 | #ifdef CONFIG_UBSAN_TRAP |
1148 | if (esr_is_ubsan_brk(esr)) |
1149 | return ubsan_handler(regs, esr) != DBG_HOOK_HANDLED; |
1150 | #endif |
1151 | return bug_handler(regs, esr) != DBG_HOOK_HANDLED; |
1152 | } |
1153 | |
1154 | void __init trap_init(void) |
1155 | { |
1156 | register_kernel_break_hook(&bug_break_hook); |
1157 | #ifdef CONFIG_CFI_CLANG |
1158 | register_kernel_break_hook(&cfi_break_hook); |
1159 | #endif |
1160 | register_kernel_break_hook(&fault_break_hook); |
1161 | #ifdef CONFIG_KASAN_SW_TAGS |
1162 | register_kernel_break_hook(&kasan_break_hook); |
1163 | #endif |
1164 | #ifdef CONFIG_UBSAN_TRAP |
1165 | register_kernel_break_hook(&ubsan_break_hook); |
1166 | #endif |
1167 | debug_traps_init(); |
1168 | } |
1169 |
Definitions
- __check_eq
- __check_ne
- __check_cs
- __check_cc
- __check_mi
- __check_pl
- __check_vs
- __check_vc
- __check_hi
- __check_ls
- __check_ge
- __check_lt
- __check_gt
- __check_le
- __check_al
- aarch32_opcode_cond_checks
- show_unhandled_signals
- dump_kernel_instr
- __die
- die_lock
- die
- arm64_show_signal
- arm64_force_sig_fault
- arm64_force_sig_fault_pkey
- arm64_force_sig_mceerr
- arm64_force_sig_ptrace_errno_trap
- arm64_notify_die
- compat_get_it_state
- compat_set_it_state
- advance_itstate
- arm64_skip_faulting_instruction
- user_insn_read
- force_signal_inject
- arm64_notify_segfault
- do_el0_undef
- do_el1_undef
- do_el0_bti
- do_el1_bti
- do_el0_gcs
- do_el1_gcs
- do_el0_fpac
- do_el1_fpac
- do_el0_mops
- do_el1_mops
- user_cache_maint_handler
- ctr_read_handler
- cntvct_read_handler
- cntfrq_read_handler
- mrs_handler
- wfi_handler
- sys64_hook
- sys64_hooks
- cp15_cond_valid
- compat_cntfrq_read_handler
- cp15_32_hooks
- compat_cntvct_read_handler
- cp15_64_hooks
- do_el0_cp15
- do_el0_sys
- esr_class_str
- esr_get_class_string
- bad_el0_sync
- overflow_stack
- panic_bad_stack
- arm64_serror_panic
- arm64_is_fatal_ras_serror
- do_serror
- is_valid_bugaddr
- bug_handler
- bug_break_hook
- reserved_fault_handler
- fault_break_hook
- early_brk64
Improve your Profiling and Debugging skills
Find out more