1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Low-level exception handling code
4 *
5 * Copyright (C) 2012 ARM Ltd.
6 * Authors: Catalin Marinas <catalin.marinas@arm.com>
7 * Will Deacon <will.deacon@arm.com>
8 */
9
10#include <linux/arm-smccc.h>
11#include <linux/init.h>
12#include <linux/linkage.h>
13
14#include <asm/alternative.h>
15#include <asm/assembler.h>
16#include <asm/asm-offsets.h>
17#include <asm/asm_pointer_auth.h>
18#include <asm/bug.h>
19#include <asm/cpufeature.h>
20#include <asm/errno.h>
21#include <asm/esr.h>
22#include <asm/irq.h>
23#include <asm/memory.h>
24#include <asm/mmu.h>
25#include <asm/processor.h>
26#include <asm/ptrace.h>
27#include <asm/scs.h>
28#include <asm/stacktrace/frame.h>
29#include <asm/thread_info.h>
30#include <asm/asm-uaccess.h>
31#include <asm/unistd.h>
32
33 .macro clear_gp_regs
34 .irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
35 mov x\n, xzr
36 .endr
37 .endm
38
39 .macro kernel_ventry, el:req, ht:req, regsize:req, label:req
40 .align 7
41.Lventry_start\@:
42 .if \el == 0
43 /*
44 * This must be the first instruction of the EL0 vector entries. It is
45 * skipped by the trampoline vectors, to trigger the cleanup.
46 */
47 b .Lskip_tramp_vectors_cleanup\@
48 .if \regsize == 64
49 mrs x30, tpidrro_el0
50 msr tpidrro_el0, xzr
51 .else
52 mov x30, xzr
53 .endif
54.Lskip_tramp_vectors_cleanup\@:
55 .endif
56
57 sub sp, sp, #PT_REGS_SIZE
58#ifdef CONFIG_VMAP_STACK
59 /*
60 * Test whether the SP has overflowed, without corrupting a GPR.
61 * Task and IRQ stacks are aligned so that SP & (1 << THREAD_SHIFT)
62 * should always be zero.
63 */
64 add sp, sp, x0 // sp' = sp + x0
65 sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp
66 tbnz x0, #THREAD_SHIFT, 0f
67 sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
68 sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
69 b el\el\ht\()_\regsize\()_\label
70
710:
72 /*
73 * Either we've just detected an overflow, or we've taken an exception
74 * while on the overflow stack. Either way, we won't return to
75 * userspace, and can clobber EL0 registers to free up GPRs.
76 */
77
78 /* Stash the original SP (minus PT_REGS_SIZE) in tpidr_el0. */
79 msr tpidr_el0, x0
80
81 /* Recover the original x0 value and stash it in tpidrro_el0 */
82 sub x0, sp, x0
83 msr tpidrro_el0, x0
84
85 /* Switch to the overflow stack */
86 adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
87
88 /*
89 * Check whether we were already on the overflow stack. This may happen
90 * after panic() re-enables interrupts.
91 */
92 mrs x0, tpidr_el0 // sp of interrupted context
93 sub x0, sp, x0 // delta with top of overflow stack
94 tst x0, #~(OVERFLOW_STACK_SIZE - 1) // within range?
95 b.ne __bad_stack // no? -> bad stack pointer
96
97 /* We were already on the overflow stack. Restore sp/x0 and carry on. */
98 sub sp, sp, x0
99 mrs x0, tpidrro_el0
100#endif
101 b el\el\ht\()_\regsize\()_\label
102.org .Lventry_start\@ + 128 // Did we overflow the ventry slot?
103 .endm
104
105 .macro tramp_alias, dst, sym
106 .set .Lalias\@, TRAMP_VALIAS + \sym - .entry.tramp.text
107 movz \dst, :abs_g2_s:.Lalias\@
108 movk \dst, :abs_g1_nc:.Lalias\@
109 movk \dst, :abs_g0_nc:.Lalias\@
110 .endm
111
112 /*
113 * This macro corrupts x0-x3. It is the caller's duty to save/restore
114 * them if required.
115 */
116 .macro apply_ssbd, state, tmp1, tmp2
117alternative_cb ARM64_ALWAYS_SYSTEM, spectre_v4_patch_fw_mitigation_enable
118 b .L__asm_ssbd_skip\@ // Patched to NOP
119alternative_cb_end
120 ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
121 cbz \tmp2, .L__asm_ssbd_skip\@
122 ldr \tmp2, [tsk, #TSK_TI_FLAGS]
123 tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
124 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
125 mov w1, #\state
126alternative_cb ARM64_ALWAYS_SYSTEM, smccc_patch_fw_mitigation_conduit
127 nop // Patched to SMC/HVC #0
128alternative_cb_end
129.L__asm_ssbd_skip\@:
130 .endm
131
132 /* Check for MTE asynchronous tag check faults */
133 .macro check_mte_async_tcf, tmp, ti_flags, thread_sctlr
134#ifdef CONFIG_ARM64_MTE
135 .arch_extension lse
136alternative_if_not ARM64_MTE
137 b 1f
138alternative_else_nop_endif
139 /*
140 * Asynchronous tag check faults are only possible in ASYNC (2) or
141 * ASYM (3) modes. In each of these modes bit 1 of SCTLR_EL1.TCF0 is
142 * set, so skip the check if it is unset.
143 */
144 tbz \thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f
145 mrs_s \tmp, SYS_TFSRE0_EL1
146 tbz \tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f
147 /* Asynchronous TCF occurred for TTBR0 access, set the TI flag */
148 mov \tmp, #_TIF_MTE_ASYNC_FAULT
149 add \ti_flags, tsk, #TSK_TI_FLAGS
150 stset \tmp, [\ti_flags]
1511:
152#endif
153 .endm
154
155 /* Clear the MTE asynchronous tag check faults */
156 .macro clear_mte_async_tcf thread_sctlr
157#ifdef CONFIG_ARM64_MTE
158alternative_if ARM64_MTE
159 /* See comment in check_mte_async_tcf above. */
160 tbz \thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f
161 dsb ish
162 msr_s SYS_TFSRE0_EL1, xzr
1631:
164alternative_else_nop_endif
165#endif
166 .endm
167
168 .macro mte_set_gcr, mte_ctrl, tmp
169#ifdef CONFIG_ARM64_MTE
170 ubfx \tmp, \mte_ctrl, #MTE_CTRL_GCR_USER_EXCL_SHIFT, #16
171 orr \tmp, \tmp, #SYS_GCR_EL1_RRND
172 msr_s SYS_GCR_EL1, \tmp
173#endif
174 .endm
175
176 .macro mte_set_kernel_gcr, tmp, tmp2
177#ifdef CONFIG_KASAN_HW_TAGS
178alternative_cb ARM64_ALWAYS_SYSTEM, kasan_hw_tags_enable
179 b 1f
180alternative_cb_end
181 mov \tmp, KERNEL_GCR_EL1
182 msr_s SYS_GCR_EL1, \tmp
1831:
184#endif
185 .endm
186
187 .macro mte_set_user_gcr, tsk, tmp, tmp2
188#ifdef CONFIG_KASAN_HW_TAGS
189alternative_cb ARM64_ALWAYS_SYSTEM, kasan_hw_tags_enable
190 b 1f
191alternative_cb_end
192 ldr \tmp, [\tsk, #THREAD_MTE_CTRL]
193
194 mte_set_gcr \tmp, \tmp2
1951:
196#endif
197 .endm
198
199 .macro kernel_entry, el, regsize = 64
200 .if \el == 0
201 alternative_insn nop, SET_PSTATE_DIT(1), ARM64_HAS_DIT
202 .endif
203 .if \regsize == 32
204 mov w0, w0 // zero upper 32 bits of x0
205 .endif
206 stp x0, x1, [sp, #16 * 0]
207 stp x2, x3, [sp, #16 * 1]
208 stp x4, x5, [sp, #16 * 2]
209 stp x6, x7, [sp, #16 * 3]
210 stp x8, x9, [sp, #16 * 4]
211 stp x10, x11, [sp, #16 * 5]
212 stp x12, x13, [sp, #16 * 6]
213 stp x14, x15, [sp, #16 * 7]
214 stp x16, x17, [sp, #16 * 8]
215 stp x18, x19, [sp, #16 * 9]
216 stp x20, x21, [sp, #16 * 10]
217 stp x22, x23, [sp, #16 * 11]
218 stp x24, x25, [sp, #16 * 12]
219 stp x26, x27, [sp, #16 * 13]
220 stp x28, x29, [sp, #16 * 14]
221
222 .if \el == 0
223 clear_gp_regs
224 mrs x21, sp_el0
225 ldr_this_cpu tsk, __entry_task, x20
226 msr sp_el0, tsk
227
228 /*
229 * Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions
230 * when scheduling.
231 */
232 ldr x19, [tsk, #TSK_TI_FLAGS]
233 disable_step_tsk x19, x20
234
235 /* Check for asynchronous tag check faults in user space */
236 ldr x0, [tsk, THREAD_SCTLR_USER]
237 check_mte_async_tcf x22, x23, x0
238
239#ifdef CONFIG_ARM64_PTR_AUTH
240alternative_if ARM64_HAS_ADDRESS_AUTH
241 /*
242 * Enable IA for in-kernel PAC if the task had it disabled. Although
243 * this could be implemented with an unconditional MRS which would avoid
244 * a load, this was measured to be slower on Cortex-A75 and Cortex-A76.
245 *
246 * Install the kernel IA key only if IA was enabled in the task. If IA
247 * was disabled on kernel exit then we would have left the kernel IA
248 * installed so there is no need to install it again.
249 */
250 tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f
251 __ptrauth_keys_install_kernel_nosync tsk, x20, x22, x23
252 b 2f
2531:
254 mrs x0, sctlr_el1
255 orr x0, x0, SCTLR_ELx_ENIA
256 msr sctlr_el1, x0
2572:
258alternative_else_nop_endif
259#endif
260
261 apply_ssbd 1, x22, x23
262
263 mte_set_kernel_gcr x22, x23
264
265 /*
266 * Any non-self-synchronizing system register updates required for
267 * kernel entry should be placed before this point.
268 */
269alternative_if ARM64_MTE
270 isb
271 b 1f
272alternative_else_nop_endif
273alternative_if ARM64_HAS_ADDRESS_AUTH
274 isb
275alternative_else_nop_endif
2761:
277
278 scs_load_current
279 .else
280 add x21, sp, #PT_REGS_SIZE
281 get_current_task tsk
282 .endif /* \el == 0 */
283 mrs x22, elr_el1
284 mrs x23, spsr_el1
285 stp lr, x21, [sp, #S_LR]
286
287 /*
288 * Create a metadata frame record. The unwinder will use this to
289 * identify and unwind exception boundaries.
290 */
291 stp xzr, xzr, [sp, #S_STACKFRAME]
292 .if \el == 0
293 mov x0, #FRAME_META_TYPE_FINAL
294 .else
295 mov x0, #FRAME_META_TYPE_PT_REGS
296 .endif
297 str x0, [sp, #S_STACKFRAME_TYPE]
298 add x29, sp, #S_STACKFRAME
299
300#ifdef CONFIG_ARM64_SW_TTBR0_PAN
301alternative_if_not ARM64_HAS_PAN
302 bl __swpan_entry_el\el
303alternative_else_nop_endif
304#endif
305
306 stp x22, x23, [sp, #S_PC]
307
308 /* Not in a syscall by default (el0_svc overwrites for real syscall) */
309 .if \el == 0
310 mov w21, #NO_SYSCALL
311 str w21, [sp, #S_SYSCALLNO]
312 .endif
313
314#ifdef CONFIG_ARM64_PSEUDO_NMI
315alternative_if_not ARM64_HAS_GIC_PRIO_MASKING
316 b .Lskip_pmr_save\@
317alternative_else_nop_endif
318
319 mrs_s x20, SYS_ICC_PMR_EL1
320 str w20, [sp, #S_PMR]
321 mov x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET
322 msr_s SYS_ICC_PMR_EL1, x20
323
324.Lskip_pmr_save\@:
325#endif
326
327 /*
328 * Registers that may be useful after this macro is invoked:
329 *
330 * x20 - ICC_PMR_EL1
331 * x21 - aborted SP
332 * x22 - aborted PC
333 * x23 - aborted PSTATE
334 */
335 .endm
336
337 .macro kernel_exit, el
338 .if \el != 0
339 disable_daif
340 .endif
341
342#ifdef CONFIG_ARM64_PSEUDO_NMI
343alternative_if_not ARM64_HAS_GIC_PRIO_MASKING
344 b .Lskip_pmr_restore\@
345alternative_else_nop_endif
346
347 ldr w20, [sp, #S_PMR]
348 msr_s SYS_ICC_PMR_EL1, x20
349
350 /* Ensure priority change is seen by redistributor */
351alternative_if_not ARM64_HAS_GIC_PRIO_RELAXED_SYNC
352 dsb sy
353alternative_else_nop_endif
354
355.Lskip_pmr_restore\@:
356#endif
357
358 ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
359
360#ifdef CONFIG_ARM64_SW_TTBR0_PAN
361alternative_if_not ARM64_HAS_PAN
362 bl __swpan_exit_el\el
363alternative_else_nop_endif
364#endif
365
366 .if \el == 0
367 ldr x23, [sp, #S_SP] // load return stack pointer
368 msr sp_el0, x23
369 tst x22, #PSR_MODE32_BIT // native task?
370 b.eq 3f
371
372#ifdef CONFIG_ARM64_ERRATUM_845719
373alternative_if ARM64_WORKAROUND_845719
374#ifdef CONFIG_PID_IN_CONTEXTIDR
375 mrs x29, contextidr_el1
376 msr contextidr_el1, x29
377#else
378 msr contextidr_el1, xzr
379#endif
380alternative_else_nop_endif
381#endif
3823:
383 scs_save tsk
384
385 /* Ignore asynchronous tag check faults in the uaccess routines */
386 ldr x0, [tsk, THREAD_SCTLR_USER]
387 clear_mte_async_tcf x0
388
389#ifdef CONFIG_ARM64_PTR_AUTH
390alternative_if ARM64_HAS_ADDRESS_AUTH
391 /*
392 * IA was enabled for in-kernel PAC. Disable it now if needed, or
393 * alternatively install the user's IA. All other per-task keys and
394 * SCTLR bits were updated on task switch.
395 *
396 * No kernel C function calls after this.
397 */
398 tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f
399 __ptrauth_keys_install_user tsk, x0, x1, x2
400 b 2f
4011:
402 mrs x0, sctlr_el1
403 bic x0, x0, SCTLR_ELx_ENIA
404 msr sctlr_el1, x0
4052:
406alternative_else_nop_endif
407#endif
408
409 mte_set_user_gcr tsk, x0, x1
410
411 apply_ssbd 0, x0, x1
412 .endif
413
414 msr elr_el1, x21 // set up the return data
415 msr spsr_el1, x22
416 ldp x0, x1, [sp, #16 * 0]
417 ldp x2, x3, [sp, #16 * 1]
418 ldp x4, x5, [sp, #16 * 2]
419 ldp x6, x7, [sp, #16 * 3]
420 ldp x8, x9, [sp, #16 * 4]
421 ldp x10, x11, [sp, #16 * 5]
422 ldp x12, x13, [sp, #16 * 6]
423 ldp x14, x15, [sp, #16 * 7]
424 ldp x16, x17, [sp, #16 * 8]
425 ldp x18, x19, [sp, #16 * 9]
426 ldp x20, x21, [sp, #16 * 10]
427 ldp x22, x23, [sp, #16 * 11]
428 ldp x24, x25, [sp, #16 * 12]
429 ldp x26, x27, [sp, #16 * 13]
430 ldp x28, x29, [sp, #16 * 14]
431
432 .if \el == 0
433#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
434 alternative_insn "b .L_skip_tramp_exit_\@", nop, ARM64_UNMAP_KERNEL_AT_EL0
435
436 msr far_el1, x29
437
438 ldr_this_cpu x30, this_cpu_vector, x29
439 tramp_alias x29, tramp_exit
440 msr vbar_el1, x30 // install vector table
441 ldr lr, [sp, #S_LR] // restore x30
442 add sp, sp, #PT_REGS_SIZE // restore sp
443 br x29
444
445.L_skip_tramp_exit_\@:
446#endif
447 .endif
448
449 ldr lr, [sp, #S_LR]
450 add sp, sp, #PT_REGS_SIZE // restore sp
451
452 .if \el == 0
453 /* This must be after the last explicit memory access */
454alternative_if ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
455 tlbi vale1, xzr
456 dsb nsh
457alternative_else_nop_endif
458 .else
459 /* Ensure any device/NC reads complete */
460 alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412
461 .endif
462
463 eret
464 sb
465 .endm
466
467#ifdef CONFIG_ARM64_SW_TTBR0_PAN
468 /*
469 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
470 * EL0, there is no need to check the state of TTBR0_EL1 since
471 * accesses are always enabled.
472 * Note that the meaning of this bit differs from the ARMv8.1 PAN
473 * feature as all TTBR0_EL1 accesses are disabled, not just those to
474 * user mappings.
475 */
476SYM_CODE_START_LOCAL(__swpan_entry_el1)
477 mrs x21, ttbr0_el1
478 tst x21, #TTBR_ASID_MASK // Check for the reserved ASID
479 orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR
480 b.eq 1f // TTBR0 access already disabled
481 and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR
482SYM_INNER_LABEL(__swpan_entry_el0, SYM_L_LOCAL)
483 __uaccess_ttbr0_disable x21
4841: ret
485SYM_CODE_END(__swpan_entry_el1)
486
487 /*
488 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
489 * PAN bit checking.
490 */
491SYM_CODE_START_LOCAL(__swpan_exit_el1)
492 tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
493 __uaccess_ttbr0_enable x0, x1
4941: and x22, x22, #~PSR_PAN_BIT // ARMv8.0 CPUs do not understand this bit
495 ret
496SYM_CODE_END(__swpan_exit_el1)
497
498SYM_CODE_START_LOCAL(__swpan_exit_el0)
499 __uaccess_ttbr0_enable x0, x1
500 /*
501 * Enable errata workarounds only if returning to user. The only
502 * workaround currently required for TTBR0_EL1 changes are for the
503 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
504 * corruption).
505 */
506 b post_ttbr_update_workaround
507SYM_CODE_END(__swpan_exit_el0)
508#endif
509
510/* GPRs used by entry code */
511tsk .req x28 // current thread_info
512
513 .text
514
515/*
516 * Exception vectors.
517 */
518 .pushsection ".entry.text", "ax"
519
520 .align 11
521SYM_CODE_START(vectors)
522 kernel_ventry 1, t, 64, sync // Synchronous EL1t
523 kernel_ventry 1, t, 64, irq // IRQ EL1t
524 kernel_ventry 1, t, 64, fiq // FIQ EL1t
525 kernel_ventry 1, t, 64, error // Error EL1t
526
527 kernel_ventry 1, h, 64, sync // Synchronous EL1h
528 kernel_ventry 1, h, 64, irq // IRQ EL1h
529 kernel_ventry 1, h, 64, fiq // FIQ EL1h
530 kernel_ventry 1, h, 64, error // Error EL1h
531
532 kernel_ventry 0, t, 64, sync // Synchronous 64-bit EL0
533 kernel_ventry 0, t, 64, irq // IRQ 64-bit EL0
534 kernel_ventry 0, t, 64, fiq // FIQ 64-bit EL0
535 kernel_ventry 0, t, 64, error // Error 64-bit EL0
536
537 kernel_ventry 0, t, 32, sync // Synchronous 32-bit EL0
538 kernel_ventry 0, t, 32, irq // IRQ 32-bit EL0
539 kernel_ventry 0, t, 32, fiq // FIQ 32-bit EL0
540 kernel_ventry 0, t, 32, error // Error 32-bit EL0
541SYM_CODE_END(vectors)
542
543#ifdef CONFIG_VMAP_STACK
544SYM_CODE_START_LOCAL(__bad_stack)
545 /*
546 * We detected an overflow in kernel_ventry, which switched to the
547 * overflow stack. Stash the exception regs, and head to our overflow
548 * handler.
549 */
550
551 /* Restore the original x0 value */
552 mrs x0, tpidrro_el0
553
554 /*
555 * Store the original GPRs to the new stack. The orginal SP (minus
556 * PT_REGS_SIZE) was stashed in tpidr_el0 by kernel_ventry.
557 */
558 sub sp, sp, #PT_REGS_SIZE
559 kernel_entry 1
560 mrs x0, tpidr_el0
561 add x0, x0, #PT_REGS_SIZE
562 str x0, [sp, #S_SP]
563
564 /* Stash the regs for handle_bad_stack */
565 mov x0, sp
566
567 /* Time to die */
568 bl handle_bad_stack
569 ASM_BUG()
570SYM_CODE_END(__bad_stack)
571#endif /* CONFIG_VMAP_STACK */
572
573
574 .macro entry_handler el:req, ht:req, regsize:req, label:req
575SYM_CODE_START_LOCAL(el\el\ht\()_\regsize\()_\label)
576 kernel_entry \el, \regsize
577 mov x0, sp
578 bl el\el\ht\()_\regsize\()_\label\()_handler
579 .if \el == 0
580 b ret_to_user
581 .else
582 b ret_to_kernel
583 .endif
584SYM_CODE_END(el\el\ht\()_\regsize\()_\label)
585 .endm
586
587/*
588 * Early exception handlers
589 */
590 entry_handler 1, t, 64, sync
591 entry_handler 1, t, 64, irq
592 entry_handler 1, t, 64, fiq
593 entry_handler 1, t, 64, error
594
595 entry_handler 1, h, 64, sync
596 entry_handler 1, h, 64, irq
597 entry_handler 1, h, 64, fiq
598 entry_handler 1, h, 64, error
599
600 entry_handler 0, t, 64, sync
601 entry_handler 0, t, 64, irq
602 entry_handler 0, t, 64, fiq
603 entry_handler 0, t, 64, error
604
605 entry_handler 0, t, 32, sync
606 entry_handler 0, t, 32, irq
607 entry_handler 0, t, 32, fiq
608 entry_handler 0, t, 32, error
609
610SYM_CODE_START_LOCAL(ret_to_kernel)
611 kernel_exit 1
612SYM_CODE_END(ret_to_kernel)
613
614SYM_CODE_START_LOCAL(ret_to_user)
615 ldr x19, [tsk, #TSK_TI_FLAGS] // re-check for single-step
616 enable_step_tsk x19, x2
617#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
618 bl stackleak_erase_on_task_stack
619#endif
620 kernel_exit 0
621SYM_CODE_END(ret_to_user)
622
623 .popsection // .entry.text
624
625 // Move from tramp_pg_dir to swapper_pg_dir
626 .macro tramp_map_kernel, tmp
627 mrs \tmp, ttbr1_el1
628 add \tmp, \tmp, #TRAMP_SWAPPER_OFFSET
629 bic \tmp, \tmp, #USER_ASID_FLAG
630 msr ttbr1_el1, \tmp
631#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
632alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
633 /* ASID already in \tmp[63:48] */
634 movk \tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
635 movk \tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
636 /* 2MB boundary containing the vectors, so we nobble the walk cache */
637 movk \tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
638 isb
639 tlbi vae1, \tmp
640 dsb nsh
641alternative_else_nop_endif
642#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
643 .endm
644
645 // Move from swapper_pg_dir to tramp_pg_dir
646 .macro tramp_unmap_kernel, tmp
647 mrs \tmp, ttbr1_el1
648 sub \tmp, \tmp, #TRAMP_SWAPPER_OFFSET
649 orr \tmp, \tmp, #USER_ASID_FLAG
650 msr ttbr1_el1, \tmp
651 /*
652 * We avoid running the post_ttbr_update_workaround here because
653 * it's only needed by Cavium ThunderX, which requires KPTI to be
654 * disabled.
655 */
656 .endm
657
658 .macro tramp_data_read_var dst, var
659#ifdef CONFIG_RELOCATABLE
660 ldr \dst, .L__tramp_data_\var
661 .ifndef .L__tramp_data_\var
662 .pushsection ".entry.tramp.rodata", "a", %progbits
663 .align 3
664.L__tramp_data_\var:
665 .quad \var
666 .popsection
667 .endif
668#else
669 /*
670 * As !RELOCATABLE implies !RANDOMIZE_BASE the address is always a
671 * compile time constant (and hence not secret and not worth hiding).
672 *
673 * As statically allocated kernel code and data always live in the top
674 * 47 bits of the address space we can sign-extend bit 47 and avoid an
675 * instruction to load the upper 16 bits (which must be 0xFFFF).
676 */
677 movz \dst, :abs_g2_s:\var
678 movk \dst, :abs_g1_nc:\var
679 movk \dst, :abs_g0_nc:\var
680#endif
681 .endm
682
683#define BHB_MITIGATION_NONE 0
684#define BHB_MITIGATION_LOOP 1
685#define BHB_MITIGATION_FW 2
686#define BHB_MITIGATION_INSN 3
687
688 .macro tramp_ventry, vector_start, regsize, kpti, bhb
689 .align 7
6901:
691 .if \regsize == 64
692 msr tpidrro_el0, x30 // Restored in kernel_ventry
693 .endif
694
695 .if \bhb == BHB_MITIGATION_LOOP
696 /*
697 * This sequence must appear before the first indirect branch. i.e. the
698 * ret out of tramp_ventry. It appears here because x30 is free.
699 */
700 __mitigate_spectre_bhb_loop x30
701 .endif // \bhb == BHB_MITIGATION_LOOP
702
703 .if \bhb == BHB_MITIGATION_INSN
704 clearbhb
705 isb
706 .endif // \bhb == BHB_MITIGATION_INSN
707
708 .if \kpti == 1
709 /*
710 * Defend against branch aliasing attacks by pushing a dummy
711 * entry onto the return stack and using a RET instruction to
712 * enter the full-fat kernel vectors.
713 */
714 bl 2f
715 b .
7162:
717 tramp_map_kernel x30
718alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
719 tramp_data_read_var x30, vectors
720alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
721 prfm plil1strm, [x30, #(1b - \vector_start)]
722alternative_else_nop_endif
723
724 msr vbar_el1, x30
725 isb
726 .else
727 adr_l x30, vectors
728 .endif // \kpti == 1
729
730 .if \bhb == BHB_MITIGATION_FW
731 /*
732 * The firmware sequence must appear before the first indirect branch.
733 * i.e. the ret out of tramp_ventry. But it also needs the stack to be
734 * mapped to save/restore the registers the SMC clobbers.
735 */
736 __mitigate_spectre_bhb_fw
737 .endif // \bhb == BHB_MITIGATION_FW
738
739 add x30, x30, #(1b - \vector_start + 4)
740 ret
741.org 1b + 128 // Did we overflow the ventry slot?
742 .endm
743
744 .macro generate_tramp_vector, kpti, bhb
745.Lvector_start\@:
746 .space 0x400
747
748 .rept 4
749 tramp_ventry .Lvector_start\@, 64, \kpti, \bhb
750 .endr
751 .rept 4
752 tramp_ventry .Lvector_start\@, 32, \kpti, \bhb
753 .endr
754 .endm
755
756#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
757/*
758 * Exception vectors trampoline.
759 * The order must match __bp_harden_el1_vectors and the
760 * arm64_bp_harden_el1_vectors enum.
761 */
762 .pushsection ".entry.tramp.text", "ax"
763 .align 11
764SYM_CODE_START_LOCAL_NOALIGN(tramp_vectors)
765#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
766 generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_LOOP
767 generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_FW
768 generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_INSN
769#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
770 generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_NONE
771SYM_CODE_END(tramp_vectors)
772
773SYM_CODE_START_LOCAL(tramp_exit)
774 tramp_unmap_kernel x29
775 mrs x29, far_el1 // restore x29
776 eret
777 sb
778SYM_CODE_END(tramp_exit)
779 .popsection // .entry.tramp.text
780#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
781
782/*
783 * Exception vectors for spectre mitigations on entry from EL1 when
784 * kpti is not in use.
785 */
786 .macro generate_el1_vector, bhb
787.Lvector_start\@:
788 kernel_ventry 1, t, 64, sync // Synchronous EL1t
789 kernel_ventry 1, t, 64, irq // IRQ EL1t
790 kernel_ventry 1, t, 64, fiq // FIQ EL1h
791 kernel_ventry 1, t, 64, error // Error EL1t
792
793 kernel_ventry 1, h, 64, sync // Synchronous EL1h
794 kernel_ventry 1, h, 64, irq // IRQ EL1h
795 kernel_ventry 1, h, 64, fiq // FIQ EL1h
796 kernel_ventry 1, h, 64, error // Error EL1h
797
798 .rept 4
799 tramp_ventry .Lvector_start\@, 64, 0, \bhb
800 .endr
801 .rept 4
802 tramp_ventry .Lvector_start\@, 32, 0, \bhb
803 .endr
804 .endm
805
806/* The order must match tramp_vecs and the arm64_bp_harden_el1_vectors enum. */
807 .pushsection ".entry.text", "ax"
808 .align 11
809SYM_CODE_START(__bp_harden_el1_vectors)
810#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
811 generate_el1_vector bhb=BHB_MITIGATION_LOOP
812 generate_el1_vector bhb=BHB_MITIGATION_FW
813 generate_el1_vector bhb=BHB_MITIGATION_INSN
814#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
815SYM_CODE_END(__bp_harden_el1_vectors)
816 .popsection
817
818
819/*
820 * Register switch for AArch64. The callee-saved registers need to be saved
821 * and restored. On entry:
822 * x0 = previous task_struct (must be preserved across the switch)
823 * x1 = next task_struct
824 * Previous and next are guaranteed not to be the same.
825 *
826 */
827SYM_FUNC_START(cpu_switch_to)
828 mov x10, #THREAD_CPU_CONTEXT
829 add x8, x0, x10
830 mov x9, sp
831 stp x19, x20, [x8], #16 // store callee-saved registers
832 stp x21, x22, [x8], #16
833 stp x23, x24, [x8], #16
834 stp x25, x26, [x8], #16
835 stp x27, x28, [x8], #16
836 stp x29, x9, [x8], #16
837 str lr, [x8]
838 add x8, x1, x10
839 ldp x19, x20, [x8], #16 // restore callee-saved registers
840 ldp x21, x22, [x8], #16
841 ldp x23, x24, [x8], #16
842 ldp x25, x26, [x8], #16
843 ldp x27, x28, [x8], #16
844 ldp x29, x9, [x8], #16
845 ldr lr, [x8]
846 mov sp, x9
847 msr sp_el0, x1
848 ptrauth_keys_install_kernel x1, x8, x9, x10
849 scs_save x0
850 scs_load_current
851 ret
852SYM_FUNC_END(cpu_switch_to)
853NOKPROBE(cpu_switch_to)
854
855/*
856 * This is how we return from a fork.
857 */
858SYM_CODE_START(ret_from_fork)
859 bl schedule_tail
860 cbz x19, 1f // not a kernel thread
861 mov x0, x20
862 blr x19
8631: get_current_task tsk
864 mov x0, sp
865 bl asm_exit_to_user_mode
866 b ret_to_user
867SYM_CODE_END(ret_from_fork)
868NOKPROBE(ret_from_fork)
869
870/*
871 * void call_on_irq_stack(struct pt_regs *regs,
872 * void (*func)(struct pt_regs *));
873 *
874 * Calls func(regs) using this CPU's irq stack and shadow irq stack.
875 */
876SYM_FUNC_START(call_on_irq_stack)
877#ifdef CONFIG_SHADOW_CALL_STACK
878 get_current_task x16
879 scs_save x16
880 ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x17
881#endif
882
883 /* Create a frame record to save our LR and SP (implicit in FP) */
884 stp x29, x30, [sp, #-16]!
885 mov x29, sp
886
887 ldr_this_cpu x16, irq_stack_ptr, x17
888
889 /* Move to the new stack and call the function there */
890 add sp, x16, #IRQ_STACK_SIZE
891 blr x1
892
893 /*
894 * Restore the SP from the FP, and restore the FP and LR from the frame
895 * record.
896 */
897 mov sp, x29
898 ldp x29, x30, [sp], #16
899 scs_load_current
900 ret
901SYM_FUNC_END(call_on_irq_stack)
902NOKPROBE(call_on_irq_stack)
903
904#ifdef CONFIG_ARM_SDE_INTERFACE
905
906#include <asm/sdei.h>
907#include <uapi/linux/arm_sdei.h>
908
909.macro sdei_handler_exit exit_mode
910 /* On success, this call never returns... */
911 cmp \exit_mode, #SDEI_EXIT_SMC
912 b.ne 99f
913 smc #0
914 b .
91599: hvc #0
916 b .
917.endm
918
919#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
920/*
921 * The regular SDEI entry point may have been unmapped along with the rest of
922 * the kernel. This trampoline restores the kernel mapping to make the x1 memory
923 * argument accessible.
924 *
925 * This clobbers x4, __sdei_handler() will restore this from firmware's
926 * copy.
927 */
928.pushsection ".entry.tramp.text", "ax"
929SYM_CODE_START(__sdei_asm_entry_trampoline)
930 mrs x4, ttbr1_el1
931 tbz x4, #USER_ASID_BIT, 1f
932
933 tramp_map_kernel tmp=x4
934 isb
935 mov x4, xzr
936
937 /*
938 * Remember whether to unmap the kernel on exit.
939 */
9401: str x4, [x1, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)]
941 tramp_data_read_var x4, __sdei_asm_handler
942 br x4
943SYM_CODE_END(__sdei_asm_entry_trampoline)
944NOKPROBE(__sdei_asm_entry_trampoline)
945
946/*
947 * Make the exit call and restore the original ttbr1_el1
948 *
949 * x0 & x1: setup for the exit API call
950 * x2: exit_mode
951 * x4: struct sdei_registered_event argument from registration time.
952 */
953SYM_CODE_START(__sdei_asm_exit_trampoline)
954 ldr x4, [x4, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)]
955 cbnz x4, 1f
956
957 tramp_unmap_kernel tmp=x4
958
9591: sdei_handler_exit exit_mode=x2
960SYM_CODE_END(__sdei_asm_exit_trampoline)
961NOKPROBE(__sdei_asm_exit_trampoline)
962.popsection // .entry.tramp.text
963#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
964
965/*
966 * Software Delegated Exception entry point.
967 *
968 * x0: Event number
969 * x1: struct sdei_registered_event argument from registration time.
970 * x2: interrupted PC
971 * x3: interrupted PSTATE
972 * x4: maybe clobbered by the trampoline
973 *
974 * Firmware has preserved x0->x17 for us, we must save/restore the rest to
975 * follow SMC-CC. We save (or retrieve) all the registers as the handler may
976 * want them.
977 */
978SYM_CODE_START(__sdei_asm_handler)
979 stp x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]
980 stp x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]
981 stp x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]
982 stp x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4]
983 stp x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5]
984 stp x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6]
985 stp x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7]
986 stp x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8]
987 stp x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9]
988 stp x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10]
989 stp x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11]
990 stp x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12]
991 stp x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13]
992 stp x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14]
993 mov x4, sp
994 stp lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR]
995
996 mov x19, x1
997
998 /* Store the registered-event for crash_smp_send_stop() */
999 ldrb w4, [x19, #SDEI_EVENT_PRIORITY]
1000 cbnz w4, 1f
1001 adr_this_cpu dst=x5, sym=sdei_active_normal_event, tmp=x6
1002 b 2f
10031: adr_this_cpu dst=x5, sym=sdei_active_critical_event, tmp=x6
10042: str x19, [x5]
1005
1006#ifdef CONFIG_VMAP_STACK
1007 /*
1008 * entry.S may have been using sp as a scratch register, find whether
1009 * this is a normal or critical event and switch to the appropriate
1010 * stack for this CPU.
1011 */
1012 cbnz w4, 1f
1013 ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6
1014 b 2f
10151: ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6
10162: mov x6, #SDEI_STACK_SIZE
1017 add x5, x5, x6
1018 mov sp, x5
1019#endif
1020
1021#ifdef CONFIG_SHADOW_CALL_STACK
1022 /* Use a separate shadow call stack for normal and critical events */
1023 cbnz w4, 3f
1024 ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_normal_ptr, tmp=x6
1025 b 4f
10263: ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_critical_ptr, tmp=x6
10274:
1028#endif
1029
1030 /*
1031 * We may have interrupted userspace, or a guest, or exit-from or
1032 * return-to either of these. We can't trust sp_el0, restore it.
1033 */
1034 mrs x28, sp_el0
1035 ldr_this_cpu dst=x0, sym=__entry_task, tmp=x1
1036 msr sp_el0, x0
1037
1038 /* If we interrupted the kernel point to the previous stack/frame. */
1039 and x0, x3, #0xc
1040 mrs x1, CurrentEL
1041 cmp x0, x1
1042 csel x29, x29, xzr, eq // fp, or zero
1043 csel x4, x2, xzr, eq // elr, or zero
1044
1045 stp x29, x4, [sp, #-16]!
1046 mov x29, sp
1047
1048 add x0, x19, #SDEI_EVENT_INTREGS
1049 mov x1, x19
1050 bl __sdei_handler
1051
1052 msr sp_el0, x28
1053 /* restore regs >x17 that we clobbered */
1054 mov x4, x19 // keep x4 for __sdei_asm_exit_trampoline
1055 ldp x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14]
1056 ldp x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9]
1057 ldp lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR]
1058 mov sp, x1
1059
1060 mov x1, x0 // address to complete_and_resume
1061 /* x0 = (x0 <= SDEI_EV_FAILED) ?
1062 * EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME
1063 */
1064 cmp x0, #SDEI_EV_FAILED
1065 mov_q x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE
1066 mov_q x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
1067 csel x0, x2, x3, ls
1068
1069 ldr_l x2, sdei_exit_mode
1070
1071 /* Clear the registered-event seen by crash_smp_send_stop() */
1072 ldrb w3, [x4, #SDEI_EVENT_PRIORITY]
1073 cbnz w3, 1f
1074 adr_this_cpu dst=x5, sym=sdei_active_normal_event, tmp=x6
1075 b 2f
10761: adr_this_cpu dst=x5, sym=sdei_active_critical_event, tmp=x6
10772: str xzr, [x5]
1078
1079alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
1080 sdei_handler_exit exit_mode=x2
1081alternative_else_nop_endif
1082
1083#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1084 tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline
1085 br x5
1086#endif
1087SYM_CODE_END(__sdei_asm_handler)
1088NOKPROBE(__sdei_asm_handler)
1089
1090SYM_CODE_START(__sdei_handler_abort)
1091 mov_q x0, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
1092 adr x1, 1f
1093 ldr_l x2, sdei_exit_mode
1094 sdei_handler_exit exit_mode=x2
1095 // exit the handler and jump to the next instruction.
1096 // Exit will stomp x0-x17, PSTATE, ELR_ELx, and SPSR_ELx.
10971: ret
1098SYM_CODE_END(__sdei_handler_abort)
1099NOKPROBE(__sdei_handler_abort)
1100#endif /* CONFIG_ARM_SDE_INTERFACE */
1101

Provided by KDAB

Privacy Policy
Improve your Profiling and Debugging skills
Find out more

source code of linux/arch/arm64/kernel/entry.S