1 | // SPDX-License-Identifier: GPL-2.0 |
---|---|
2 | /* |
3 | * Exception handling code |
4 | * |
5 | * Copyright (C) 2019 ARM Ltd. |
6 | */ |
7 | |
8 | #include <linux/context_tracking.h> |
9 | #include <linux/kasan.h> |
10 | #include <linux/linkage.h> |
11 | #include <linux/lockdep.h> |
12 | #include <linux/ptrace.h> |
13 | #include <linux/resume_user_mode.h> |
14 | #include <linux/sched.h> |
15 | #include <linux/sched/debug.h> |
16 | #include <linux/thread_info.h> |
17 | |
18 | #include <asm/cpufeature.h> |
19 | #include <asm/daifflags.h> |
20 | #include <asm/esr.h> |
21 | #include <asm/exception.h> |
22 | #include <asm/irq_regs.h> |
23 | #include <asm/kprobes.h> |
24 | #include <asm/mmu.h> |
25 | #include <asm/processor.h> |
26 | #include <asm/sdei.h> |
27 | #include <asm/stacktrace.h> |
28 | #include <asm/sysreg.h> |
29 | #include <asm/system_misc.h> |
30 | |
31 | /* |
32 | * Handle IRQ/context state management when entering from kernel mode. |
33 | * Before this function is called it is not safe to call regular kernel code, |
34 | * instrumentable code, or any code which may trigger an exception. |
35 | * |
36 | * This is intended to match the logic in irqentry_enter(), handling the kernel |
37 | * mode transitions only. |
38 | */ |
39 | static __always_inline void __enter_from_kernel_mode(struct pt_regs *regs) |
40 | { |
41 | regs->exit_rcu = false; |
42 | |
43 | if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) { |
44 | lockdep_hardirqs_off(ip: CALLER_ADDR0); |
45 | ct_irq_enter(); |
46 | trace_hardirqs_off_finish(); |
47 | |
48 | regs->exit_rcu = true; |
49 | return; |
50 | } |
51 | |
52 | lockdep_hardirqs_off(ip: CALLER_ADDR0); |
53 | rcu_irq_enter_check_tick(); |
54 | trace_hardirqs_off_finish(); |
55 | } |
56 | |
57 | static void noinstr enter_from_kernel_mode(struct pt_regs *regs) |
58 | { |
59 | __enter_from_kernel_mode(regs); |
60 | mte_check_tfsr_entry(); |
61 | mte_disable_tco_entry(current); |
62 | } |
63 | |
64 | /* |
65 | * Handle IRQ/context state management when exiting to kernel mode. |
66 | * After this function returns it is not safe to call regular kernel code, |
67 | * instrumentable code, or any code which may trigger an exception. |
68 | * |
69 | * This is intended to match the logic in irqentry_exit(), handling the kernel |
70 | * mode transitions only, and with preemption handled elsewhere. |
71 | */ |
72 | static __always_inline void __exit_to_kernel_mode(struct pt_regs *regs) |
73 | { |
74 | lockdep_assert_irqs_disabled(); |
75 | |
76 | if (interrupts_enabled(regs)) { |
77 | if (regs->exit_rcu) { |
78 | trace_hardirqs_on_prepare(); |
79 | lockdep_hardirqs_on_prepare(); |
80 | ct_irq_exit(); |
81 | lockdep_hardirqs_on(ip: CALLER_ADDR0); |
82 | return; |
83 | } |
84 | |
85 | trace_hardirqs_on(); |
86 | } else { |
87 | if (regs->exit_rcu) |
88 | ct_irq_exit(); |
89 | } |
90 | } |
91 | |
92 | static void noinstr exit_to_kernel_mode(struct pt_regs *regs) |
93 | { |
94 | mte_check_tfsr_exit(); |
95 | __exit_to_kernel_mode(regs); |
96 | } |
97 | |
98 | /* |
99 | * Handle IRQ/context state management when entering from user mode. |
100 | * Before this function is called it is not safe to call regular kernel code, |
101 | * instrumentable code, or any code which may trigger an exception. |
102 | */ |
103 | static __always_inline void __enter_from_user_mode(void) |
104 | { |
105 | lockdep_hardirqs_off(ip: CALLER_ADDR0); |
106 | CT_WARN_ON(ct_state() != CT_STATE_USER); |
107 | user_exit_irqoff(); |
108 | trace_hardirqs_off_finish(); |
109 | mte_disable_tco_entry(current); |
110 | } |
111 | |
112 | static __always_inline void enter_from_user_mode(struct pt_regs *regs) |
113 | { |
114 | __enter_from_user_mode(); |
115 | } |
116 | |
117 | /* |
118 | * Handle IRQ/context state management when exiting to user mode. |
119 | * After this function returns it is not safe to call regular kernel code, |
120 | * instrumentable code, or any code which may trigger an exception. |
121 | */ |
122 | static __always_inline void __exit_to_user_mode(void) |
123 | { |
124 | trace_hardirqs_on_prepare(); |
125 | lockdep_hardirqs_on_prepare(); |
126 | user_enter_irqoff(); |
127 | lockdep_hardirqs_on(ip: CALLER_ADDR0); |
128 | } |
129 | |
130 | static void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags) |
131 | { |
132 | do { |
133 | local_irq_enable(); |
134 | |
135 | if (thread_flags & (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)) |
136 | schedule(); |
137 | |
138 | if (thread_flags & _TIF_UPROBE) |
139 | uprobe_notify_resume(regs); |
140 | |
141 | if (thread_flags & _TIF_MTE_ASYNC_FAULT) { |
142 | clear_thread_flag(TIF_MTE_ASYNC_FAULT); |
143 | send_sig_fault(SIGSEGV, SEGV_MTEAERR, |
144 | (void __user *)NULL, current); |
145 | } |
146 | |
147 | if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) |
148 | do_signal(regs); |
149 | |
150 | if (thread_flags & _TIF_NOTIFY_RESUME) |
151 | resume_user_mode_work(regs); |
152 | |
153 | if (thread_flags & _TIF_FOREIGN_FPSTATE) |
154 | fpsimd_restore_current_state(); |
155 | |
156 | local_irq_disable(); |
157 | thread_flags = read_thread_flags(); |
158 | } while (thread_flags & _TIF_WORK_MASK); |
159 | } |
160 | |
161 | static __always_inline void exit_to_user_mode_prepare(struct pt_regs *regs) |
162 | { |
163 | unsigned long flags; |
164 | |
165 | local_irq_disable(); |
166 | |
167 | flags = read_thread_flags(); |
168 | if (unlikely(flags & _TIF_WORK_MASK)) |
169 | do_notify_resume(regs, thread_flags: flags); |
170 | |
171 | local_daif_mask(); |
172 | |
173 | lockdep_sys_exit(); |
174 | } |
175 | |
176 | static __always_inline void exit_to_user_mode(struct pt_regs *regs) |
177 | { |
178 | exit_to_user_mode_prepare(regs); |
179 | mte_check_tfsr_exit(); |
180 | __exit_to_user_mode(); |
181 | } |
182 | |
183 | asmlinkage void noinstr asm_exit_to_user_mode(struct pt_regs *regs) |
184 | { |
185 | exit_to_user_mode(regs); |
186 | } |
187 | |
188 | /* |
189 | * Handle IRQ/context state management when entering an NMI from user/kernel |
190 | * mode. Before this function is called it is not safe to call regular kernel |
191 | * code, instrumentable code, or any code which may trigger an exception. |
192 | */ |
193 | static void noinstr arm64_enter_nmi(struct pt_regs *regs) |
194 | { |
195 | regs->lockdep_hardirqs = lockdep_hardirqs_enabled(); |
196 | |
197 | __nmi_enter(); |
198 | lockdep_hardirqs_off(ip: CALLER_ADDR0); |
199 | lockdep_hardirq_enter(); |
200 | ct_nmi_enter(); |
201 | |
202 | trace_hardirqs_off_finish(); |
203 | ftrace_nmi_enter(); |
204 | } |
205 | |
206 | /* |
207 | * Handle IRQ/context state management when exiting an NMI from user/kernel |
208 | * mode. After this function returns it is not safe to call regular kernel |
209 | * code, instrumentable code, or any code which may trigger an exception. |
210 | */ |
211 | static void noinstr arm64_exit_nmi(struct pt_regs *regs) |
212 | { |
213 | bool restore = regs->lockdep_hardirqs; |
214 | |
215 | ftrace_nmi_exit(); |
216 | if (restore) { |
217 | trace_hardirqs_on_prepare(); |
218 | lockdep_hardirqs_on_prepare(); |
219 | } |
220 | |
221 | ct_nmi_exit(); |
222 | lockdep_hardirq_exit(); |
223 | if (restore) |
224 | lockdep_hardirqs_on(ip: CALLER_ADDR0); |
225 | __nmi_exit(); |
226 | } |
227 | |
228 | /* |
229 | * Handle IRQ/context state management when entering a debug exception from |
230 | * kernel mode. Before this function is called it is not safe to call regular |
231 | * kernel code, instrumentable code, or any code which may trigger an exception. |
232 | */ |
233 | static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs) |
234 | { |
235 | regs->lockdep_hardirqs = lockdep_hardirqs_enabled(); |
236 | |
237 | lockdep_hardirqs_off(ip: CALLER_ADDR0); |
238 | ct_nmi_enter(); |
239 | |
240 | trace_hardirqs_off_finish(); |
241 | } |
242 | |
243 | /* |
244 | * Handle IRQ/context state management when exiting a debug exception from |
245 | * kernel mode. After this function returns it is not safe to call regular |
246 | * kernel code, instrumentable code, or any code which may trigger an exception. |
247 | */ |
248 | static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs) |
249 | { |
250 | bool restore = regs->lockdep_hardirqs; |
251 | |
252 | if (restore) { |
253 | trace_hardirqs_on_prepare(); |
254 | lockdep_hardirqs_on_prepare(); |
255 | } |
256 | |
257 | ct_nmi_exit(); |
258 | if (restore) |
259 | lockdep_hardirqs_on(ip: CALLER_ADDR0); |
260 | } |
261 | |
262 | #ifdef CONFIG_PREEMPT_DYNAMIC |
263 | DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched); |
264 | #define need_irq_preemption() \ |
265 | (static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched)) |
266 | #else |
267 | #define need_irq_preemption() (IS_ENABLED(CONFIG_PREEMPTION)) |
268 | #endif |
269 | |
270 | static void __sched arm64_preempt_schedule_irq(void) |
271 | { |
272 | if (!need_irq_preemption()) |
273 | return; |
274 | |
275 | /* |
276 | * Note: thread_info::preempt_count includes both thread_info::count |
277 | * and thread_info::need_resched, and is not equivalent to |
278 | * preempt_count(). |
279 | */ |
280 | if (READ_ONCE(current_thread_info()->preempt_count) != 0) |
281 | return; |
282 | |
283 | /* |
284 | * DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC |
285 | * priority masking is used the GIC irqchip driver will clear DAIF.IF |
286 | * using gic_arch_enable_irqs() for normal IRQs. If anything is set in |
287 | * DAIF we must have handled an NMI, so skip preemption. |
288 | */ |
289 | if (system_uses_irq_prio_masking() && read_sysreg(daif)) |
290 | return; |
291 | |
292 | /* |
293 | * Preempting a task from an IRQ means we leave copies of PSTATE |
294 | * on the stack. cpufeature's enable calls may modify PSTATE, but |
295 | * resuming one of these preempted tasks would undo those changes. |
296 | * |
297 | * Only allow a task to be preempted once cpufeatures have been |
298 | * enabled. |
299 | */ |
300 | if (system_capabilities_finalized()) |
301 | preempt_schedule_irq(); |
302 | } |
303 | |
304 | static void do_interrupt_handler(struct pt_regs *regs, |
305 | void (*handler)(struct pt_regs *)) |
306 | { |
307 | struct pt_regs *old_regs = set_irq_regs(regs); |
308 | |
309 | if (on_thread_stack()) |
310 | call_on_irq_stack(regs, handler); |
311 | else |
312 | handler(regs); |
313 | |
314 | set_irq_regs(old_regs); |
315 | } |
316 | |
317 | extern void (*handle_arch_irq)(struct pt_regs *); |
318 | extern void (*handle_arch_fiq)(struct pt_regs *); |
319 | |
320 | static void noinstr __panic_unhandled(struct pt_regs *regs, const char *vector, |
321 | unsigned long esr) |
322 | { |
323 | arm64_enter_nmi(regs); |
324 | |
325 | console_verbose(); |
326 | |
327 | pr_crit("Unhandled %s exception on CPU%d, ESR 0x%016lx -- %s\n", |
328 | vector, smp_processor_id(), esr, |
329 | esr_get_class_string(esr)); |
330 | |
331 | __show_regs(regs); |
332 | panic(fmt: "Unhandled exception"); |
333 | } |
334 | |
335 | #define UNHANDLED(el, regsize, vector) \ |
336 | asmlinkage void noinstr el##_##regsize##_##vector##_handler(struct pt_regs *regs) \ |
337 | { \ |
338 | const char *desc = #regsize "-bit " #el " " #vector; \ |
339 | __panic_unhandled(regs, desc, read_sysreg(esr_el1)); \ |
340 | } |
341 | |
342 | #ifdef CONFIG_ARM64_ERRATUM_1463225 |
343 | static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); |
344 | |
345 | static void cortex_a76_erratum_1463225_svc_handler(void) |
346 | { |
347 | u32 reg, val; |
348 | |
349 | if (!unlikely(test_thread_flag(TIF_SINGLESTEP))) |
350 | return; |
351 | |
352 | if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225))) |
353 | return; |
354 | |
355 | __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1); |
356 | reg = read_sysreg(mdscr_el1); |
357 | val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE; |
358 | write_sysreg(val, mdscr_el1); |
359 | asm volatile("msr daifclr, #8"); |
360 | isb(); |
361 | |
362 | /* We will have taken a single-step exception by this point */ |
363 | |
364 | write_sysreg(reg, mdscr_el1); |
365 | __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0); |
366 | } |
367 | |
368 | static __always_inline bool |
369 | cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) |
370 | { |
371 | if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa)) |
372 | return false; |
373 | |
374 | /* |
375 | * We've taken a dummy step exception from the kernel to ensure |
376 | * that interrupts are re-enabled on the syscall path. Return back |
377 | * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions |
378 | * masked so that we can safely restore the mdscr and get on with |
379 | * handling the syscall. |
380 | */ |
381 | regs->pstate |= PSR_D_BIT; |
382 | return true; |
383 | } |
384 | #else /* CONFIG_ARM64_ERRATUM_1463225 */ |
385 | static void cortex_a76_erratum_1463225_svc_handler(void) { } |
386 | static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) |
387 | { |
388 | return false; |
389 | } |
390 | #endif /* CONFIG_ARM64_ERRATUM_1463225 */ |
391 | |
392 | /* |
393 | * As per the ABI exit SME streaming mode and clear the SVE state not |
394 | * shared with FPSIMD on syscall entry. |
395 | */ |
396 | static inline void fpsimd_syscall_enter(void) |
397 | { |
398 | /* Ensure PSTATE.SM is clear, but leave PSTATE.ZA as-is. */ |
399 | if (system_supports_sme()) |
400 | sme_smstop_sm(); |
401 | |
402 | /* |
403 | * The CPU is not in streaming mode. If non-streaming SVE is not |
404 | * supported, there is no SVE state that needs to be discarded. |
405 | */ |
406 | if (!system_supports_sve()) |
407 | return; |
408 | |
409 | if (test_thread_flag(TIF_SVE)) { |
410 | unsigned int sve_vq_minus_one; |
411 | |
412 | sve_vq_minus_one = sve_vq_from_vl(task_get_sve_vl(current)) - 1; |
413 | sve_flush_live(true, sve_vq_minus_one); |
414 | } |
415 | |
416 | /* |
417 | * Any live non-FPSIMD SVE state has been zeroed. Allow |
418 | * fpsimd_save_user_state() to lazily discard SVE state until either |
419 | * the live state is unbound or fpsimd_syscall_exit() is called. |
420 | */ |
421 | __this_cpu_write(fpsimd_last_state.to_save, FP_STATE_FPSIMD); |
422 | } |
423 | |
424 | static __always_inline void fpsimd_syscall_exit(void) |
425 | { |
426 | if (!system_supports_sve()) |
427 | return; |
428 | |
429 | /* |
430 | * The current task's user FPSIMD/SVE/SME state is now bound to this |
431 | * CPU. The fpsimd_last_state.to_save value is either: |
432 | * |
433 | * - FP_STATE_FPSIMD, if the state has not been reloaded on this CPU |
434 | * since fpsimd_syscall_enter(). |
435 | * |
436 | * - FP_STATE_CURRENT, if the state has been reloaded on this CPU at |
437 | * any point. |
438 | * |
439 | * Reset this to FP_STATE_CURRENT to stop lazy discarding. |
440 | */ |
441 | __this_cpu_write(fpsimd_last_state.to_save, FP_STATE_CURRENT); |
442 | } |
443 | |
444 | UNHANDLED(el1t, 64, sync) |
445 | UNHANDLED(el1t, 64, irq) |
446 | UNHANDLED(el1t, 64, fiq) |
447 | UNHANDLED(el1t, 64, error) |
448 | |
449 | static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr) |
450 | { |
451 | unsigned long far = read_sysreg(far_el1); |
452 | |
453 | enter_from_kernel_mode(regs); |
454 | local_daif_inherit(regs); |
455 | do_mem_abort(far, esr, regs); |
456 | local_daif_mask(); |
457 | exit_to_kernel_mode(regs); |
458 | } |
459 | |
460 | static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr) |
461 | { |
462 | unsigned long far = read_sysreg(far_el1); |
463 | |
464 | enter_from_kernel_mode(regs); |
465 | local_daif_inherit(regs); |
466 | do_sp_pc_abort(far, esr, regs); |
467 | local_daif_mask(); |
468 | exit_to_kernel_mode(regs); |
469 | } |
470 | |
471 | static void noinstr el1_undef(struct pt_regs *regs, unsigned long esr) |
472 | { |
473 | enter_from_kernel_mode(regs); |
474 | local_daif_inherit(regs); |
475 | do_el1_undef(regs, esr); |
476 | local_daif_mask(); |
477 | exit_to_kernel_mode(regs); |
478 | } |
479 | |
480 | static void noinstr el1_bti(struct pt_regs *regs, unsigned long esr) |
481 | { |
482 | enter_from_kernel_mode(regs); |
483 | local_daif_inherit(regs); |
484 | do_el1_bti(regs, esr); |
485 | local_daif_mask(); |
486 | exit_to_kernel_mode(regs); |
487 | } |
488 | |
489 | static void noinstr el1_gcs(struct pt_regs *regs, unsigned long esr) |
490 | { |
491 | enter_from_kernel_mode(regs); |
492 | local_daif_inherit(regs); |
493 | do_el1_gcs(regs, esr); |
494 | local_daif_mask(); |
495 | exit_to_kernel_mode(regs); |
496 | } |
497 | |
498 | static void noinstr el1_mops(struct pt_regs *regs, unsigned long esr) |
499 | { |
500 | enter_from_kernel_mode(regs); |
501 | local_daif_inherit(regs); |
502 | do_el1_mops(regs, esr); |
503 | local_daif_mask(); |
504 | exit_to_kernel_mode(regs); |
505 | } |
506 | |
507 | static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr) |
508 | { |
509 | unsigned long far = read_sysreg(far_el1); |
510 | |
511 | arm64_enter_el1_dbg(regs); |
512 | if (!cortex_a76_erratum_1463225_debug_handler(regs)) |
513 | do_debug_exception(far, esr, regs); |
514 | arm64_exit_el1_dbg(regs); |
515 | } |
516 | |
517 | static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr) |
518 | { |
519 | enter_from_kernel_mode(regs); |
520 | local_daif_inherit(regs); |
521 | do_el1_fpac(regs, esr); |
522 | local_daif_mask(); |
523 | exit_to_kernel_mode(regs); |
524 | } |
525 | |
526 | asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs) |
527 | { |
528 | unsigned long esr = read_sysreg(esr_el1); |
529 | |
530 | switch (ESR_ELx_EC(esr)) { |
531 | case ESR_ELx_EC_DABT_CUR: |
532 | case ESR_ELx_EC_IABT_CUR: |
533 | el1_abort(regs, esr); |
534 | break; |
535 | /* |
536 | * We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a |
537 | * recursive exception when trying to push the initial pt_regs. |
538 | */ |
539 | case ESR_ELx_EC_PC_ALIGN: |
540 | el1_pc(regs, esr); |
541 | break; |
542 | case ESR_ELx_EC_SYS64: |
543 | case ESR_ELx_EC_UNKNOWN: |
544 | el1_undef(regs, esr); |
545 | break; |
546 | case ESR_ELx_EC_BTI: |
547 | el1_bti(regs, esr); |
548 | break; |
549 | case ESR_ELx_EC_GCS: |
550 | el1_gcs(regs, esr); |
551 | break; |
552 | case ESR_ELx_EC_MOPS: |
553 | el1_mops(regs, esr); |
554 | break; |
555 | case ESR_ELx_EC_BREAKPT_CUR: |
556 | case ESR_ELx_EC_SOFTSTP_CUR: |
557 | case ESR_ELx_EC_WATCHPT_CUR: |
558 | case ESR_ELx_EC_BRK64: |
559 | el1_dbg(regs, esr); |
560 | break; |
561 | case ESR_ELx_EC_FPAC: |
562 | el1_fpac(regs, esr); |
563 | break; |
564 | default: |
565 | __panic_unhandled(regs, vector: "64-bit el1h sync", esr); |
566 | } |
567 | } |
568 | |
569 | static __always_inline void __el1_pnmi(struct pt_regs *regs, |
570 | void (*handler)(struct pt_regs *)) |
571 | { |
572 | arm64_enter_nmi(regs); |
573 | do_interrupt_handler(regs, handler); |
574 | arm64_exit_nmi(regs); |
575 | } |
576 | |
577 | static __always_inline void __el1_irq(struct pt_regs *regs, |
578 | void (*handler)(struct pt_regs *)) |
579 | { |
580 | enter_from_kernel_mode(regs); |
581 | |
582 | irq_enter_rcu(); |
583 | do_interrupt_handler(regs, handler); |
584 | irq_exit_rcu(); |
585 | |
586 | arm64_preempt_schedule_irq(); |
587 | |
588 | exit_to_kernel_mode(regs); |
589 | } |
590 | static void noinstr el1_interrupt(struct pt_regs *regs, |
591 | void (*handler)(struct pt_regs *)) |
592 | { |
593 | write_sysreg(DAIF_PROCCTX_NOIRQ, daif); |
594 | |
595 | if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs)) |
596 | __el1_pnmi(regs, handler); |
597 | else |
598 | __el1_irq(regs, handler); |
599 | } |
600 | |
601 | asmlinkage void noinstr el1h_64_irq_handler(struct pt_regs *regs) |
602 | { |
603 | el1_interrupt(regs, handler: handle_arch_irq); |
604 | } |
605 | |
606 | asmlinkage void noinstr el1h_64_fiq_handler(struct pt_regs *regs) |
607 | { |
608 | el1_interrupt(regs, handler: handle_arch_fiq); |
609 | } |
610 | |
611 | asmlinkage void noinstr el1h_64_error_handler(struct pt_regs *regs) |
612 | { |
613 | unsigned long esr = read_sysreg(esr_el1); |
614 | |
615 | local_daif_restore(DAIF_ERRCTX); |
616 | arm64_enter_nmi(regs); |
617 | do_serror(regs, esr); |
618 | arm64_exit_nmi(regs); |
619 | } |
620 | |
621 | static void noinstr el0_da(struct pt_regs *regs, unsigned long esr) |
622 | { |
623 | unsigned long far = read_sysreg(far_el1); |
624 | |
625 | enter_from_user_mode(regs); |
626 | local_daif_restore(DAIF_PROCCTX); |
627 | do_mem_abort(far, esr, regs); |
628 | exit_to_user_mode(regs); |
629 | } |
630 | |
631 | static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr) |
632 | { |
633 | unsigned long far = read_sysreg(far_el1); |
634 | |
635 | /* |
636 | * We've taken an instruction abort from userspace and not yet |
637 | * re-enabled IRQs. If the address is a kernel address, apply |
638 | * BP hardening prior to enabling IRQs and pre-emption. |
639 | */ |
640 | if (!is_ttbr0_addr(far)) |
641 | arm64_apply_bp_hardening(); |
642 | |
643 | enter_from_user_mode(regs); |
644 | local_daif_restore(DAIF_PROCCTX); |
645 | do_mem_abort(far, esr, regs); |
646 | exit_to_user_mode(regs); |
647 | } |
648 | |
649 | static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr) |
650 | { |
651 | enter_from_user_mode(regs); |
652 | local_daif_restore(DAIF_PROCCTX); |
653 | do_fpsimd_acc(esr, regs); |
654 | exit_to_user_mode(regs); |
655 | } |
656 | |
657 | static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr) |
658 | { |
659 | enter_from_user_mode(regs); |
660 | local_daif_restore(DAIF_PROCCTX); |
661 | do_sve_acc(esr, regs); |
662 | exit_to_user_mode(regs); |
663 | } |
664 | |
665 | static void noinstr el0_sme_acc(struct pt_regs *regs, unsigned long esr) |
666 | { |
667 | enter_from_user_mode(regs); |
668 | local_daif_restore(DAIF_PROCCTX); |
669 | do_sme_acc(esr, regs); |
670 | exit_to_user_mode(regs); |
671 | } |
672 | |
673 | static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr) |
674 | { |
675 | enter_from_user_mode(regs); |
676 | local_daif_restore(DAIF_PROCCTX); |
677 | do_fpsimd_exc(esr, regs); |
678 | exit_to_user_mode(regs); |
679 | } |
680 | |
681 | static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr) |
682 | { |
683 | enter_from_user_mode(regs); |
684 | local_daif_restore(DAIF_PROCCTX); |
685 | do_el0_sys(esr, regs); |
686 | exit_to_user_mode(regs); |
687 | } |
688 | |
689 | static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr) |
690 | { |
691 | unsigned long far = read_sysreg(far_el1); |
692 | |
693 | if (!is_ttbr0_addr(instruction_pointer(regs))) |
694 | arm64_apply_bp_hardening(); |
695 | |
696 | enter_from_user_mode(regs); |
697 | local_daif_restore(DAIF_PROCCTX); |
698 | do_sp_pc_abort(far, esr, regs); |
699 | exit_to_user_mode(regs); |
700 | } |
701 | |
702 | static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr) |
703 | { |
704 | enter_from_user_mode(regs); |
705 | local_daif_restore(DAIF_PROCCTX); |
706 | do_sp_pc_abort(regs->sp, esr, regs); |
707 | exit_to_user_mode(regs); |
708 | } |
709 | |
710 | static void noinstr el0_undef(struct pt_regs *regs, unsigned long esr) |
711 | { |
712 | enter_from_user_mode(regs); |
713 | local_daif_restore(DAIF_PROCCTX); |
714 | do_el0_undef(regs, esr); |
715 | exit_to_user_mode(regs); |
716 | } |
717 | |
718 | static void noinstr el0_bti(struct pt_regs *regs) |
719 | { |
720 | enter_from_user_mode(regs); |
721 | local_daif_restore(DAIF_PROCCTX); |
722 | do_el0_bti(regs); |
723 | exit_to_user_mode(regs); |
724 | } |
725 | |
726 | static void noinstr el0_mops(struct pt_regs *regs, unsigned long esr) |
727 | { |
728 | enter_from_user_mode(regs); |
729 | local_daif_restore(DAIF_PROCCTX); |
730 | do_el0_mops(regs, esr); |
731 | exit_to_user_mode(regs); |
732 | } |
733 | |
734 | static void noinstr el0_gcs(struct pt_regs *regs, unsigned long esr) |
735 | { |
736 | enter_from_user_mode(regs); |
737 | local_daif_restore(DAIF_PROCCTX); |
738 | do_el0_gcs(regs, esr); |
739 | exit_to_user_mode(regs); |
740 | } |
741 | |
742 | static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr) |
743 | { |
744 | enter_from_user_mode(regs); |
745 | local_daif_restore(DAIF_PROCCTX); |
746 | bad_el0_sync(regs, 0, esr); |
747 | exit_to_user_mode(regs); |
748 | } |
749 | |
750 | static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr) |
751 | { |
752 | /* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */ |
753 | unsigned long far = read_sysreg(far_el1); |
754 | |
755 | enter_from_user_mode(regs); |
756 | do_debug_exception(far, esr, regs); |
757 | local_daif_restore(DAIF_PROCCTX); |
758 | exit_to_user_mode(regs); |
759 | } |
760 | |
761 | static void noinstr el0_svc(struct pt_regs *regs) |
762 | { |
763 | enter_from_user_mode(regs); |
764 | cortex_a76_erratum_1463225_svc_handler(); |
765 | fpsimd_syscall_enter(); |
766 | local_daif_restore(DAIF_PROCCTX); |
767 | do_el0_svc(regs); |
768 | exit_to_user_mode(regs); |
769 | fpsimd_syscall_exit(); |
770 | } |
771 | |
772 | static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr) |
773 | { |
774 | enter_from_user_mode(regs); |
775 | local_daif_restore(DAIF_PROCCTX); |
776 | do_el0_fpac(regs, esr); |
777 | exit_to_user_mode(regs); |
778 | } |
779 | |
780 | asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs) |
781 | { |
782 | unsigned long esr = read_sysreg(esr_el1); |
783 | |
784 | switch (ESR_ELx_EC(esr)) { |
785 | case ESR_ELx_EC_SVC64: |
786 | el0_svc(regs); |
787 | break; |
788 | case ESR_ELx_EC_DABT_LOW: |
789 | el0_da(regs, esr); |
790 | break; |
791 | case ESR_ELx_EC_IABT_LOW: |
792 | el0_ia(regs, esr); |
793 | break; |
794 | case ESR_ELx_EC_FP_ASIMD: |
795 | el0_fpsimd_acc(regs, esr); |
796 | break; |
797 | case ESR_ELx_EC_SVE: |
798 | el0_sve_acc(regs, esr); |
799 | break; |
800 | case ESR_ELx_EC_SME: |
801 | el0_sme_acc(regs, esr); |
802 | break; |
803 | case ESR_ELx_EC_FP_EXC64: |
804 | el0_fpsimd_exc(regs, esr); |
805 | break; |
806 | case ESR_ELx_EC_SYS64: |
807 | case ESR_ELx_EC_WFx: |
808 | el0_sys(regs, esr); |
809 | break; |
810 | case ESR_ELx_EC_SP_ALIGN: |
811 | el0_sp(regs, esr); |
812 | break; |
813 | case ESR_ELx_EC_PC_ALIGN: |
814 | el0_pc(regs, esr); |
815 | break; |
816 | case ESR_ELx_EC_UNKNOWN: |
817 | el0_undef(regs, esr); |
818 | break; |
819 | case ESR_ELx_EC_BTI: |
820 | el0_bti(regs); |
821 | break; |
822 | case ESR_ELx_EC_MOPS: |
823 | el0_mops(regs, esr); |
824 | break; |
825 | case ESR_ELx_EC_GCS: |
826 | el0_gcs(regs, esr); |
827 | break; |
828 | case ESR_ELx_EC_BREAKPT_LOW: |
829 | case ESR_ELx_EC_SOFTSTP_LOW: |
830 | case ESR_ELx_EC_WATCHPT_LOW: |
831 | case ESR_ELx_EC_BRK64: |
832 | el0_dbg(regs, esr); |
833 | break; |
834 | case ESR_ELx_EC_FPAC: |
835 | el0_fpac(regs, esr); |
836 | break; |
837 | default: |
838 | el0_inv(regs, esr); |
839 | } |
840 | } |
841 | |
842 | static void noinstr el0_interrupt(struct pt_regs *regs, |
843 | void (*handler)(struct pt_regs *)) |
844 | { |
845 | enter_from_user_mode(regs); |
846 | |
847 | write_sysreg(DAIF_PROCCTX_NOIRQ, daif); |
848 | |
849 | if (regs->pc & BIT(55)) |
850 | arm64_apply_bp_hardening(); |
851 | |
852 | irq_enter_rcu(); |
853 | do_interrupt_handler(regs, handler); |
854 | irq_exit_rcu(); |
855 | |
856 | exit_to_user_mode(regs); |
857 | } |
858 | |
859 | static void noinstr __el0_irq_handler_common(struct pt_regs *regs) |
860 | { |
861 | el0_interrupt(regs, handler: handle_arch_irq); |
862 | } |
863 | |
864 | asmlinkage void noinstr el0t_64_irq_handler(struct pt_regs *regs) |
865 | { |
866 | __el0_irq_handler_common(regs); |
867 | } |
868 | |
869 | static void noinstr __el0_fiq_handler_common(struct pt_regs *regs) |
870 | { |
871 | el0_interrupt(regs, handler: handle_arch_fiq); |
872 | } |
873 | |
874 | asmlinkage void noinstr el0t_64_fiq_handler(struct pt_regs *regs) |
875 | { |
876 | __el0_fiq_handler_common(regs); |
877 | } |
878 | |
879 | static void noinstr __el0_error_handler_common(struct pt_regs *regs) |
880 | { |
881 | unsigned long esr = read_sysreg(esr_el1); |
882 | |
883 | enter_from_user_mode(regs); |
884 | local_daif_restore(DAIF_ERRCTX); |
885 | arm64_enter_nmi(regs); |
886 | do_serror(regs, esr); |
887 | arm64_exit_nmi(regs); |
888 | local_daif_restore(DAIF_PROCCTX); |
889 | exit_to_user_mode(regs); |
890 | } |
891 | |
892 | asmlinkage void noinstr el0t_64_error_handler(struct pt_regs *regs) |
893 | { |
894 | __el0_error_handler_common(regs); |
895 | } |
896 | |
897 | #ifdef CONFIG_COMPAT |
898 | static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr) |
899 | { |
900 | enter_from_user_mode(regs); |
901 | local_daif_restore(DAIF_PROCCTX); |
902 | do_el0_cp15(esr, regs); |
903 | exit_to_user_mode(regs); |
904 | } |
905 | |
906 | static void noinstr el0_svc_compat(struct pt_regs *regs) |
907 | { |
908 | enter_from_user_mode(regs); |
909 | cortex_a76_erratum_1463225_svc_handler(); |
910 | local_daif_restore(DAIF_PROCCTX); |
911 | do_el0_svc_compat(regs); |
912 | exit_to_user_mode(regs); |
913 | } |
914 | |
915 | asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs) |
916 | { |
917 | unsigned long esr = read_sysreg(esr_el1); |
918 | |
919 | switch (ESR_ELx_EC(esr)) { |
920 | case ESR_ELx_EC_SVC32: |
921 | el0_svc_compat(regs); |
922 | break; |
923 | case ESR_ELx_EC_DABT_LOW: |
924 | el0_da(regs, esr); |
925 | break; |
926 | case ESR_ELx_EC_IABT_LOW: |
927 | el0_ia(regs, esr); |
928 | break; |
929 | case ESR_ELx_EC_FP_ASIMD: |
930 | el0_fpsimd_acc(regs, esr); |
931 | break; |
932 | case ESR_ELx_EC_FP_EXC32: |
933 | el0_fpsimd_exc(regs, esr); |
934 | break; |
935 | case ESR_ELx_EC_PC_ALIGN: |
936 | el0_pc(regs, esr); |
937 | break; |
938 | case ESR_ELx_EC_UNKNOWN: |
939 | case ESR_ELx_EC_CP14_MR: |
940 | case ESR_ELx_EC_CP14_LS: |
941 | case ESR_ELx_EC_CP14_64: |
942 | el0_undef(regs, esr); |
943 | break; |
944 | case ESR_ELx_EC_CP15_32: |
945 | case ESR_ELx_EC_CP15_64: |
946 | el0_cp15(regs, esr); |
947 | break; |
948 | case ESR_ELx_EC_BREAKPT_LOW: |
949 | case ESR_ELx_EC_SOFTSTP_LOW: |
950 | case ESR_ELx_EC_WATCHPT_LOW: |
951 | case ESR_ELx_EC_BKPT32: |
952 | el0_dbg(regs, esr); |
953 | break; |
954 | default: |
955 | el0_inv(regs, esr); |
956 | } |
957 | } |
958 | |
959 | asmlinkage void noinstr el0t_32_irq_handler(struct pt_regs *regs) |
960 | { |
961 | __el0_irq_handler_common(regs); |
962 | } |
963 | |
964 | asmlinkage void noinstr el0t_32_fiq_handler(struct pt_regs *regs) |
965 | { |
966 | __el0_fiq_handler_common(regs); |
967 | } |
968 | |
969 | asmlinkage void noinstr el0t_32_error_handler(struct pt_regs *regs) |
970 | { |
971 | __el0_error_handler_common(regs); |
972 | } |
973 | #else /* CONFIG_COMPAT */ |
974 | UNHANDLED(el0t, 32, sync) |
975 | UNHANDLED(el0t, 32, irq) |
976 | UNHANDLED(el0t, 32, fiq) |
977 | UNHANDLED(el0t, 32, error) |
978 | #endif /* CONFIG_COMPAT */ |
979 | |
980 | #ifdef CONFIG_VMAP_STACK |
981 | asmlinkage void noinstr __noreturn handle_bad_stack(struct pt_regs *regs) |
982 | { |
983 | unsigned long esr = read_sysreg(esr_el1); |
984 | unsigned long far = read_sysreg(far_el1); |
985 | |
986 | arm64_enter_nmi(regs); |
987 | panic_bad_stack(regs, esr, far); |
988 | } |
989 | #endif /* CONFIG_VMAP_STACK */ |
990 | |
991 | #ifdef CONFIG_ARM_SDE_INTERFACE |
992 | asmlinkage noinstr unsigned long |
993 | __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg) |
994 | { |
995 | unsigned long ret; |
996 | |
997 | /* |
998 | * We didn't take an exception to get here, so the HW hasn't |
999 | * set/cleared bits in PSTATE that we may rely on. |
1000 | * |
1001 | * The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to |
1002 | * whether PSTATE bits are inherited unchanged or generated from |
1003 | * scratch, and the TF-A implementation always clears PAN and always |
1004 | * clears UAO. There are no other known implementations. |
1005 | * |
1006 | * Subsequent revisions (ARM DEN 0054B) follow the usual rules for how |
1007 | * PSTATE is modified upon architectural exceptions, and so PAN is |
1008 | * either inherited or set per SCTLR_ELx.SPAN, and UAO is always |
1009 | * cleared. |
1010 | * |
1011 | * We must explicitly reset PAN to the expected state, including |
1012 | * clearing it when the host isn't using it, in case a VM had it set. |
1013 | */ |
1014 | if (system_uses_hw_pan()) |
1015 | set_pstate_pan(1); |
1016 | else if (cpu_has_pan()) |
1017 | set_pstate_pan(0); |
1018 | |
1019 | arm64_enter_nmi(regs); |
1020 | ret = do_sdei_event(regs, arg); |
1021 | arm64_exit_nmi(regs); |
1022 | |
1023 | return ret; |
1024 | } |
1025 | #endif /* CONFIG_ARM_SDE_INTERFACE */ |
1026 |
Definitions
- __enter_from_kernel_mode
- enter_from_kernel_mode
- __exit_to_kernel_mode
- exit_to_kernel_mode
- __enter_from_user_mode
- enter_from_user_mode
- __exit_to_user_mode
- do_notify_resume
- exit_to_user_mode_prepare
- exit_to_user_mode
- asm_exit_to_user_mode
- arm64_enter_nmi
- arm64_exit_nmi
- arm64_enter_el1_dbg
- arm64_exit_el1_dbg
- sk_dynamic_irqentry_exit_cond_resched
- arm64_preempt_schedule_irq
- do_interrupt_handler
- __panic_unhandled
- cortex_a76_erratum_1463225_svc_handler
- cortex_a76_erratum_1463225_debug_handler
- fpsimd_syscall_enter
- fpsimd_syscall_exit
- el1_abort
- el1_pc
- el1_undef
- el1_bti
- el1_gcs
- el1_mops
- el1_dbg
- el1_fpac
- el1h_64_sync_handler
- __el1_pnmi
- __el1_irq
- el1_interrupt
- el1h_64_irq_handler
- el1h_64_fiq_handler
- el1h_64_error_handler
- el0_da
- el0_ia
- el0_fpsimd_acc
- el0_sve_acc
- el0_sme_acc
- el0_fpsimd_exc
- el0_sys
- el0_pc
- el0_sp
- el0_undef
- el0_bti
- el0_mops
- el0_gcs
- el0_inv
- el0_dbg
- el0_svc
- el0_fpac
- el0t_64_sync_handler
- el0_interrupt
- __el0_irq_handler_common
- el0t_64_irq_handler
- __el0_fiq_handler_common
- el0t_64_fiq_handler
- __el0_error_handler_common
- el0t_64_error_handler
- el0_cp15
- el0_svc_compat
- el0t_32_sync_handler
- el0t_32_irq_handler
- el0t_32_fiq_handler
- el0t_32_error_handler
Improve your Profiling and Debugging skills
Find out more