1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | |
3 | #include <linux/context_tracking.h> |
4 | #include <linux/err.h> |
5 | #include <linux/compat.h> |
6 | #include <linux/rseq.h> |
7 | #include <linux/sched/debug.h> /* for show_regs */ |
8 | |
9 | #include <asm/kup.h> |
10 | #include <asm/cputime.h> |
11 | #include <asm/hw_irq.h> |
12 | #include <asm/interrupt.h> |
13 | #include <asm/kprobes.h> |
14 | #include <asm/paca.h> |
15 | #include <asm/ptrace.h> |
16 | #include <asm/reg.h> |
17 | #include <asm/signal.h> |
18 | #include <asm/switch_to.h> |
19 | #include <asm/syscall.h> |
20 | #include <asm/time.h> |
21 | #include <asm/tm.h> |
22 | #include <asm/unistd.h> |
23 | |
24 | #if defined(CONFIG_PPC_ADV_DEBUG_REGS) && defined(CONFIG_PPC32) |
25 | unsigned long global_dbcr0[NR_CPUS]; |
26 | #endif |
27 | |
28 | #ifdef CONFIG_PPC_BOOK3S_64 |
29 | DEFINE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant); |
30 | static inline bool exit_must_hard_disable(void) |
31 | { |
32 | return static_branch_unlikely(&interrupt_exit_not_reentrant); |
33 | } |
34 | #else |
35 | static inline bool exit_must_hard_disable(void) |
36 | { |
37 | return true; |
38 | } |
39 | #endif |
40 | |
41 | /* |
42 | * local irqs must be disabled. Returns false if the caller must re-enable |
43 | * them, check for new work, and try again. |
44 | * |
45 | * This should be called with local irqs disabled, but if they were previously |
46 | * enabled when the interrupt handler returns (indicating a process-context / |
47 | * synchronous interrupt) then irqs_enabled should be true. |
48 | * |
49 | * restartable is true then EE/RI can be left on because interrupts are handled |
50 | * with a restart sequence. |
51 | */ |
52 | static notrace __always_inline bool prep_irq_for_enabled_exit(bool restartable) |
53 | { |
54 | bool must_hard_disable = (exit_must_hard_disable() || !restartable); |
55 | |
56 | /* This must be done with RI=1 because tracing may touch vmaps */ |
57 | trace_hardirqs_on(); |
58 | |
59 | if (must_hard_disable) |
60 | __hard_EE_RI_disable(); |
61 | |
62 | #ifdef CONFIG_PPC64 |
63 | /* This pattern matches prep_irq_for_idle */ |
64 | if (unlikely(lazy_irq_pending_nocheck())) { |
65 | if (must_hard_disable) { |
66 | local_paca->irq_happened |= PACA_IRQ_HARD_DIS; |
67 | __hard_RI_enable(); |
68 | } |
69 | trace_hardirqs_off(); |
70 | |
71 | return false; |
72 | } |
73 | #endif |
74 | return true; |
75 | } |
76 | |
77 | static notrace void booke_load_dbcr0(void) |
78 | { |
79 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
80 | unsigned long dbcr0 = current->thread.debug.dbcr0; |
81 | |
82 | if (likely(!(dbcr0 & DBCR0_IDM))) |
83 | return; |
84 | |
85 | /* |
86 | * Check to see if the dbcr0 register is set up to debug. |
87 | * Use the internal debug mode bit to do this. |
88 | */ |
89 | mtmsr(mfmsr() & ~MSR_DE); |
90 | if (IS_ENABLED(CONFIG_PPC32)) { |
91 | isync(); |
92 | global_dbcr0[smp_processor_id()] = mfspr(SPRN_DBCR0); |
93 | } |
94 | mtspr(SPRN_DBCR0, dbcr0); |
95 | mtspr(SPRN_DBSR, -1); |
96 | #endif |
97 | } |
98 | |
99 | static notrace void check_return_regs_valid(struct pt_regs *regs) |
100 | { |
101 | #ifdef CONFIG_PPC_BOOK3S_64 |
102 | unsigned long trap, srr0, srr1; |
103 | static bool warned; |
104 | u8 *validp; |
105 | char *h; |
106 | |
107 | if (trap_is_scv(regs)) |
108 | return; |
109 | |
110 | trap = TRAP(regs); |
111 | // EE in HV mode sets HSRRs like 0xea0 |
112 | if (cpu_has_feature(CPU_FTR_HVMODE) && trap == INTERRUPT_EXTERNAL) |
113 | trap = 0xea0; |
114 | |
115 | switch (trap) { |
116 | case 0x980: |
117 | case INTERRUPT_H_DATA_STORAGE: |
118 | case 0xe20: |
119 | case 0xe40: |
120 | case INTERRUPT_HMI: |
121 | case 0xe80: |
122 | case 0xea0: |
123 | case INTERRUPT_H_FAC_UNAVAIL: |
124 | case 0x1200: |
125 | case 0x1500: |
126 | case 0x1600: |
127 | case 0x1800: |
128 | validp = &local_paca->hsrr_valid; |
129 | if (!READ_ONCE(*validp)) |
130 | return; |
131 | |
132 | srr0 = mfspr(SPRN_HSRR0); |
133 | srr1 = mfspr(SPRN_HSRR1); |
134 | h = "H" ; |
135 | |
136 | break; |
137 | default: |
138 | validp = &local_paca->srr_valid; |
139 | if (!READ_ONCE(*validp)) |
140 | return; |
141 | |
142 | srr0 = mfspr(SPRN_SRR0); |
143 | srr1 = mfspr(SPRN_SRR1); |
144 | h = "" ; |
145 | break; |
146 | } |
147 | |
148 | if (srr0 == regs->nip && srr1 == regs->msr) |
149 | return; |
150 | |
151 | /* |
152 | * A NMI / soft-NMI interrupt may have come in after we found |
153 | * srr_valid and before the SRRs are loaded. The interrupt then |
154 | * comes in and clobbers SRRs and clears srr_valid. Then we load |
155 | * the SRRs here and test them above and find they don't match. |
156 | * |
157 | * Test validity again after that, to catch such false positives. |
158 | * |
159 | * This test in general will have some window for false negatives |
160 | * and may not catch and fix all such cases if an NMI comes in |
161 | * later and clobbers SRRs without clearing srr_valid, but hopefully |
162 | * such things will get caught most of the time, statistically |
163 | * enough to be able to get a warning out. |
164 | */ |
165 | if (!READ_ONCE(*validp)) |
166 | return; |
167 | |
168 | if (!data_race(warned)) { |
169 | data_race(warned = true); |
170 | printk("%sSRR0 was: %lx should be: %lx\n" , h, srr0, regs->nip); |
171 | printk("%sSRR1 was: %lx should be: %lx\n" , h, srr1, regs->msr); |
172 | show_regs(regs); |
173 | } |
174 | |
175 | WRITE_ONCE(*validp, 0); /* fixup */ |
176 | #endif |
177 | } |
178 | |
179 | static notrace unsigned long |
180 | interrupt_exit_user_prepare_main(unsigned long ret, struct pt_regs *regs) |
181 | { |
182 | unsigned long ti_flags; |
183 | |
184 | again: |
185 | ti_flags = read_thread_flags(); |
186 | while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) { |
187 | local_irq_enable(); |
188 | if (ti_flags & _TIF_NEED_RESCHED) { |
189 | schedule(); |
190 | } else { |
191 | /* |
192 | * SIGPENDING must restore signal handler function |
193 | * argument GPRs, and some non-volatiles (e.g., r1). |
194 | * Restore all for now. This could be made lighter. |
195 | */ |
196 | if (ti_flags & _TIF_SIGPENDING) |
197 | ret |= _TIF_RESTOREALL; |
198 | do_notify_resume(regs, ti_flags); |
199 | } |
200 | local_irq_disable(); |
201 | ti_flags = read_thread_flags(); |
202 | } |
203 | |
204 | if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && IS_ENABLED(CONFIG_PPC_FPU)) { |
205 | if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) && |
206 | unlikely((ti_flags & _TIF_RESTORE_TM))) { |
207 | restore_tm_state(regs); |
208 | } else { |
209 | unsigned long mathflags = MSR_FP; |
210 | |
211 | if (cpu_has_feature(CPU_FTR_VSX)) |
212 | mathflags |= MSR_VEC | MSR_VSX; |
213 | else if (cpu_has_feature(CPU_FTR_ALTIVEC)) |
214 | mathflags |= MSR_VEC; |
215 | |
216 | /* |
217 | * If userspace MSR has all available FP bits set, |
218 | * then they are live and no need to restore. If not, |
219 | * it means the regs were given up and restore_math |
220 | * may decide to restore them (to avoid taking an FP |
221 | * fault). |
222 | */ |
223 | if ((regs->msr & mathflags) != mathflags) |
224 | restore_math(regs); |
225 | } |
226 | } |
227 | |
228 | check_return_regs_valid(regs); |
229 | |
230 | user_enter_irqoff(); |
231 | if (!prep_irq_for_enabled_exit(restartable: true)) { |
232 | user_exit_irqoff(); |
233 | local_irq_enable(); |
234 | local_irq_disable(); |
235 | goto again; |
236 | } |
237 | |
238 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
239 | local_paca->tm_scratch = regs->msr; |
240 | #endif |
241 | |
242 | booke_load_dbcr0(); |
243 | |
244 | account_cpu_user_exit(); |
245 | |
246 | /* Restore user access locks last */ |
247 | kuap_user_restore(regs); |
248 | |
249 | return ret; |
250 | } |
251 | |
252 | /* |
253 | * This should be called after a syscall returns, with r3 the return value |
254 | * from the syscall. If this function returns non-zero, the system call |
255 | * exit assembly should additionally load all GPR registers and CTR and XER |
256 | * from the interrupt frame. |
257 | * |
258 | * The function graph tracer can not trace the return side of this function, |
259 | * because RI=0 and soft mask state is "unreconciled", so it is marked notrace. |
260 | */ |
261 | notrace unsigned long syscall_exit_prepare(unsigned long r3, |
262 | struct pt_regs *regs, |
263 | long scv) |
264 | { |
265 | unsigned long ti_flags; |
266 | unsigned long ret = 0; |
267 | bool is_not_scv = !IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !scv; |
268 | |
269 | CT_WARN_ON(ct_state() == CONTEXT_USER); |
270 | |
271 | kuap_assert_locked(); |
272 | |
273 | regs->result = r3; |
274 | |
275 | /* Check whether the syscall is issued inside a restartable sequence */ |
276 | rseq_syscall(regs); |
277 | |
278 | ti_flags = read_thread_flags(); |
279 | |
280 | if (unlikely(r3 >= (unsigned long)-MAX_ERRNO) && is_not_scv) { |
281 | if (likely(!(ti_flags & (_TIF_NOERROR | _TIF_RESTOREALL)))) { |
282 | r3 = -r3; |
283 | regs->ccr |= 0x10000000; /* Set SO bit in CR */ |
284 | } |
285 | } |
286 | |
287 | if (unlikely(ti_flags & _TIF_PERSYSCALL_MASK)) { |
288 | if (ti_flags & _TIF_RESTOREALL) |
289 | ret = _TIF_RESTOREALL; |
290 | else |
291 | regs->gpr[3] = r3; |
292 | clear_bits(_TIF_PERSYSCALL_MASK, ¤t_thread_info()->flags); |
293 | } else { |
294 | regs->gpr[3] = r3; |
295 | } |
296 | |
297 | if (unlikely(ti_flags & _TIF_SYSCALL_DOTRACE)) { |
298 | do_syscall_trace_leave(regs); |
299 | ret |= _TIF_RESTOREALL; |
300 | } |
301 | |
302 | local_irq_disable(); |
303 | ret = interrupt_exit_user_prepare_main(ret, regs); |
304 | |
305 | #ifdef CONFIG_PPC64 |
306 | regs->exit_result = ret; |
307 | #endif |
308 | |
309 | return ret; |
310 | } |
311 | |
312 | #ifdef CONFIG_PPC64 |
313 | notrace unsigned long syscall_exit_restart(unsigned long r3, struct pt_regs *regs) |
314 | { |
315 | /* |
316 | * This is called when detecting a soft-pending interrupt as well as |
317 | * an alternate-return interrupt. So we can't just have the alternate |
318 | * return path clear SRR1[MSR] and set PACA_IRQ_HARD_DIS (unless |
319 | * the soft-pending case were to fix things up as well). RI might be |
320 | * disabled, in which case it gets re-enabled by __hard_irq_disable(). |
321 | */ |
322 | __hard_irq_disable(); |
323 | local_paca->irq_happened |= PACA_IRQ_HARD_DIS; |
324 | |
325 | #ifdef CONFIG_PPC_BOOK3S_64 |
326 | set_kuap(AMR_KUAP_BLOCKED); |
327 | #endif |
328 | |
329 | trace_hardirqs_off(); |
330 | user_exit_irqoff(); |
331 | account_cpu_user_entry(); |
332 | |
333 | BUG_ON(!user_mode(regs)); |
334 | |
335 | regs->exit_result = interrupt_exit_user_prepare_main(regs->exit_result, regs); |
336 | |
337 | return regs->exit_result; |
338 | } |
339 | #endif |
340 | |
341 | notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs) |
342 | { |
343 | unsigned long ret; |
344 | |
345 | BUG_ON(regs_is_unrecoverable(regs)); |
346 | BUG_ON(arch_irq_disabled_regs(regs)); |
347 | CT_WARN_ON(ct_state() == CONTEXT_USER); |
348 | |
349 | /* |
350 | * We don't need to restore AMR on the way back to userspace for KUAP. |
351 | * AMR can only have been unlocked if we interrupted the kernel. |
352 | */ |
353 | kuap_assert_locked(); |
354 | |
355 | local_irq_disable(); |
356 | |
357 | ret = interrupt_exit_user_prepare_main(ret: 0, regs); |
358 | |
359 | #ifdef CONFIG_PPC64 |
360 | regs->exit_result = ret; |
361 | #endif |
362 | |
363 | return ret; |
364 | } |
365 | |
366 | void preempt_schedule_irq(void); |
367 | |
368 | notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs) |
369 | { |
370 | unsigned long ret = 0; |
371 | unsigned long kuap; |
372 | bool stack_store = read_thread_flags() & _TIF_EMULATE_STACK_STORE; |
373 | |
374 | if (regs_is_unrecoverable(regs)) |
375 | unrecoverable_exception(regs); |
376 | /* |
377 | * CT_WARN_ON comes here via program_check_exception, so avoid |
378 | * recursion. |
379 | * |
380 | * Skip the assertion on PMIs on 64e to work around a problem caused |
381 | * by NMI PMIs incorrectly taking this interrupt return path, it's |
382 | * possible for this to hit after interrupt exit to user switches |
383 | * context to user. See also the comment in the performance monitor |
384 | * handler in exceptions-64e.S |
385 | */ |
386 | if (!IS_ENABLED(CONFIG_PPC_BOOK3E_64) && |
387 | TRAP(regs) != INTERRUPT_PROGRAM && |
388 | TRAP(regs) != INTERRUPT_PERFMON) |
389 | CT_WARN_ON(ct_state() == CONTEXT_USER); |
390 | |
391 | kuap = kuap_get_and_assert_locked(); |
392 | |
393 | local_irq_disable(); |
394 | |
395 | if (!arch_irq_disabled_regs(regs)) { |
396 | /* Returning to a kernel context with local irqs enabled. */ |
397 | WARN_ON_ONCE(!(regs->msr & MSR_EE)); |
398 | again: |
399 | if (IS_ENABLED(CONFIG_PREEMPT)) { |
400 | /* Return to preemptible kernel context */ |
401 | if (unlikely(read_thread_flags() & _TIF_NEED_RESCHED)) { |
402 | if (preempt_count() == 0) |
403 | preempt_schedule_irq(); |
404 | } |
405 | } |
406 | |
407 | check_return_regs_valid(regs); |
408 | |
409 | /* |
410 | * Stack store exit can't be restarted because the interrupt |
411 | * stack frame might have been clobbered. |
412 | */ |
413 | if (!prep_irq_for_enabled_exit(unlikely(stack_store))) { |
414 | /* |
415 | * Replay pending soft-masked interrupts now. Don't |
416 | * just local_irq_enabe(); local_irq_disable(); because |
417 | * if we are returning from an asynchronous interrupt |
418 | * here, another one might hit after irqs are enabled, |
419 | * and it would exit via this same path allowing |
420 | * another to fire, and so on unbounded. |
421 | */ |
422 | hard_irq_disable(); |
423 | replay_soft_interrupts(); |
424 | /* Took an interrupt, may have more exit work to do. */ |
425 | goto again; |
426 | } |
427 | #ifdef CONFIG_PPC64 |
428 | /* |
429 | * An interrupt may clear MSR[EE] and set this concurrently, |
430 | * but it will be marked pending and the exit will be retried. |
431 | * This leaves a racy window where MSR[EE]=0 and HARD_DIS is |
432 | * clear, until interrupt_exit_kernel_restart() calls |
433 | * hard_irq_disable(), which will set HARD_DIS again. |
434 | */ |
435 | local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; |
436 | |
437 | } else { |
438 | check_return_regs_valid(regs); |
439 | |
440 | if (unlikely(stack_store)) |
441 | __hard_EE_RI_disable(); |
442 | #endif /* CONFIG_PPC64 */ |
443 | } |
444 | |
445 | if (unlikely(stack_store)) { |
446 | clear_bits(_TIF_EMULATE_STACK_STORE, ¤t_thread_info()->flags); |
447 | ret = 1; |
448 | } |
449 | |
450 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
451 | local_paca->tm_scratch = regs->msr; |
452 | #endif |
453 | |
454 | /* |
455 | * 64s does not want to mfspr(SPRN_AMR) here, because this comes after |
456 | * mtmsr, which would cause Read-After-Write stalls. Hence, take the |
457 | * AMR value from the check above. |
458 | */ |
459 | kuap_kernel_restore(regs, kuap); |
460 | |
461 | return ret; |
462 | } |
463 | |
464 | #ifdef CONFIG_PPC64 |
465 | notrace unsigned long interrupt_exit_user_restart(struct pt_regs *regs) |
466 | { |
467 | __hard_irq_disable(); |
468 | local_paca->irq_happened |= PACA_IRQ_HARD_DIS; |
469 | |
470 | #ifdef CONFIG_PPC_BOOK3S_64 |
471 | set_kuap(AMR_KUAP_BLOCKED); |
472 | #endif |
473 | |
474 | trace_hardirqs_off(); |
475 | user_exit_irqoff(); |
476 | account_cpu_user_entry(); |
477 | |
478 | BUG_ON(!user_mode(regs)); |
479 | |
480 | regs->exit_result |= interrupt_exit_user_prepare(regs); |
481 | |
482 | return regs->exit_result; |
483 | } |
484 | |
485 | /* |
486 | * No real need to return a value here because the stack store case does not |
487 | * get restarted. |
488 | */ |
489 | notrace unsigned long interrupt_exit_kernel_restart(struct pt_regs *regs) |
490 | { |
491 | __hard_irq_disable(); |
492 | local_paca->irq_happened |= PACA_IRQ_HARD_DIS; |
493 | |
494 | #ifdef CONFIG_PPC_BOOK3S_64 |
495 | set_kuap(AMR_KUAP_BLOCKED); |
496 | #endif |
497 | |
498 | if (regs->softe == IRQS_ENABLED) |
499 | trace_hardirqs_off(); |
500 | |
501 | BUG_ON(user_mode(regs)); |
502 | |
503 | return interrupt_exit_kernel_prepare(regs); |
504 | } |
505 | #endif |
506 | |