1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
2 | /* |
3 | * PowerPC version |
4 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
5 | * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP |
6 | * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com> |
7 | * Adapted for Power Macintosh by Paul Mackerras. |
8 | * Low-level exception handlers and MMU support |
9 | * rewritten by Paul Mackerras. |
10 | * Copyright (C) 1996 Paul Mackerras. |
11 | * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net). |
12 | * |
13 | * This file contains the system call entry code, context switch |
14 | * code, and exception/interrupt return code for PowerPC. |
15 | */ |
16 | |
17 | #include <linux/errno.h> |
18 | #include <linux/err.h> |
19 | #include <linux/sys.h> |
20 | #include <linux/threads.h> |
21 | #include <linux/linkage.h> |
22 | |
23 | #include <asm/reg.h> |
24 | #include <asm/page.h> |
25 | #include <asm/mmu.h> |
26 | #include <asm/cputable.h> |
27 | #include <asm/thread_info.h> |
28 | #include <asm/ppc_asm.h> |
29 | #include <asm/asm-offsets.h> |
30 | #include <asm/unistd.h> |
31 | #include <asm/ptrace.h> |
32 | #include <asm/feature-fixups.h> |
33 | #include <asm/barrier.h> |
34 | #include <asm/kup.h> |
35 | #include <asm/bug.h> |
36 | #include <asm/interrupt.h> |
37 | |
38 | #include "head_32.h" |
39 | |
40 | /* |
41 | * powerpc relies on return from interrupt/syscall being context synchronising |
42 | * (which rfi is) to support ARCH_HAS_MEMBARRIER_SYNC_CORE without additional |
43 | * synchronisation instructions. |
44 | */ |
45 | |
46 | /* |
47 | * Align to 4k in order to ensure that all functions modyfing srr0/srr1 |
48 | * fit into one page in order to not encounter a TLB miss between the |
49 | * modification of srr0/srr1 and the associated rfi. |
50 | */ |
51 | .align 12 |
52 | |
53 | #if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_E500) |
54 | .globl prepare_transfer_to_handler |
55 | prepare_transfer_to_handler: |
56 | /* if from kernel, check interrupted DOZE/NAP mode */ |
57 | lwz r12,TI_LOCAL_FLAGS(r2) |
58 | mtcrf 0x01,r12 |
59 | bt- 31-TLF_NAPPING,4f |
60 | bt- 31-TLF_SLEEPING,7f |
61 | blr |
62 | |
63 | 4: rlwinm r12,r12,0,~_TLF_NAPPING |
64 | stw r12,TI_LOCAL_FLAGS(r2) |
65 | b power_save_ppc32_restore |
66 | |
67 | 7: rlwinm r12,r12,0,~_TLF_SLEEPING |
68 | stw r12,TI_LOCAL_FLAGS(r2) |
69 | lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */ |
70 | rlwinm r9,r9,0,~MSR_EE |
71 | lwz r12,_LINK(r11) /* and return to address in LR */ |
72 | REST_GPR(2, r11) |
73 | b fast_exception_return |
74 | _ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler) |
75 | #endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_PPC_E500 */ |
76 | |
77 | #if defined(CONFIG_PPC_KUEP) && defined(CONFIG_PPC_BOOK3S_32) |
78 | SYM_FUNC_START(__kuep_lock) |
79 | lwz r9, THREAD+THSR0(r2) |
80 | update_user_segments_by_4 r9, r10, r11, r12 |
81 | blr |
82 | SYM_FUNC_END(__kuep_lock) |
83 | |
84 | SYM_FUNC_START_LOCAL(__kuep_unlock) |
85 | lwz r9, THREAD+THSR0(r2) |
86 | rlwinm r9,r9,0,~SR_NX |
87 | update_user_segments_by_4 r9, r10, r11, r12 |
88 | blr |
89 | SYM_FUNC_END(__kuep_unlock) |
90 | |
91 | .macro kuep_lock |
92 | bl __kuep_lock |
93 | .endm |
94 | .macro kuep_unlock |
95 | bl __kuep_unlock |
96 | .endm |
97 | #else |
98 | .macro kuep_lock |
99 | .endm |
100 | .macro kuep_unlock |
101 | .endm |
102 | #endif |
103 | |
104 | .globl transfer_to_syscall |
105 | transfer_to_syscall: |
106 | stw r3, ORIG_GPR3(r1) |
107 | stw r11, GPR1(r1) |
108 | stw r11, 0(r1) |
109 | mflr r12 |
110 | stw r12, _LINK(r1) |
111 | #ifdef CONFIG_BOOKE_OR_40x |
112 | rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */ |
113 | #endif |
114 | lis r12,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */ |
115 | SAVE_GPR(2, r1) |
116 | addi r12,r12,STACK_FRAME_REGS_MARKER@l |
117 | stw r9,_MSR(r1) |
118 | li r2, INTERRUPT_SYSCALL |
119 | stw r12,STACK_INT_FRAME_MARKER(r1) |
120 | stw r2,_TRAP(r1) |
121 | SAVE_GPR(0, r1) |
122 | SAVE_GPRS(3, 8, r1) |
123 | addi r2,r10,-THREAD |
124 | SAVE_NVGPRS(r1) |
125 | kuep_lock |
126 | |
127 | /* Calling convention has r3 = regs, r4 = orig r0 */ |
128 | addi r3,r1,STACK_INT_FRAME_REGS |
129 | mr r4,r0 |
130 | bl system_call_exception |
131 | |
132 | ret_from_syscall: |
133 | addi r4,r1,STACK_INT_FRAME_REGS |
134 | li r5,0 |
135 | bl syscall_exit_prepare |
136 | #ifdef CONFIG_PPC_47x |
137 | lis r4,icache_44x_need_flush@ha |
138 | lwz r5,icache_44x_need_flush@l(r4) |
139 | cmplwi cr0,r5,0 |
140 | bne- .L44x_icache_flush |
141 | #endif /* CONFIG_PPC_47x */ |
142 | .L44x_icache_flush_return: |
143 | kuep_unlock |
144 | lwz r4,_LINK(r1) |
145 | lwz r5,_CCR(r1) |
146 | mtlr r4 |
147 | lwz r7,_NIP(r1) |
148 | lwz r8,_MSR(r1) |
149 | cmpwi r3,0 |
150 | REST_GPR(3, r1) |
151 | syscall_exit_finish: |
152 | mtspr SPRN_SRR0,r7 |
153 | mtspr SPRN_SRR1,r8 |
154 | |
155 | bne 3f |
156 | mtcr r5 |
157 | |
158 | 1: REST_GPR(2, r1) |
159 | REST_GPR(1, r1) |
160 | rfi |
161 | #ifdef CONFIG_40x |
162 | b . /* Prevent prefetch past rfi */ |
163 | #endif |
164 | |
165 | 3: mtcr r5 |
166 | lwz r4,_CTR(r1) |
167 | lwz r5,_XER(r1) |
168 | REST_NVGPRS(r1) |
169 | mtctr r4 |
170 | mtxer r5 |
171 | REST_GPR(0, r1) |
172 | REST_GPRS(3, 12, r1) |
173 | b 1b |
174 | |
175 | #ifdef CONFIG_44x |
176 | .L44x_icache_flush: |
177 | li r7,0 |
178 | iccci r0,r0 |
179 | stw r7,icache_44x_need_flush@l(r4) |
180 | b .L44x_icache_flush_return |
181 | #endif /* CONFIG_44x */ |
182 | |
183 | .globl ret_from_fork |
184 | ret_from_fork: |
185 | REST_NVGPRS(r1) |
186 | bl schedule_tail |
187 | li r3,0 /* fork() return value */ |
188 | b ret_from_syscall |
189 | |
190 | .globl ret_from_kernel_user_thread |
191 | ret_from_kernel_user_thread: |
192 | bl schedule_tail |
193 | mtctr r14 |
194 | mr r3,r15 |
195 | PPC440EP_ERR42 |
196 | bctrl |
197 | li r3,0 |
198 | b ret_from_syscall |
199 | |
200 | .globl start_kernel_thread |
201 | start_kernel_thread: |
202 | bl schedule_tail |
203 | mtctr r14 |
204 | mr r3,r15 |
205 | PPC440EP_ERR42 |
206 | bctrl |
207 | /* |
208 | * This must not return. We actually want to BUG here, not WARN, |
209 | * because BUG will exit the process which is what the kernel thread |
210 | * should have done, which may give some hope of continuing. |
211 | */ |
212 | 100: trap |
213 | EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,0 |
214 | |
215 | .globl fast_exception_return |
216 | fast_exception_return: |
217 | #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) |
218 | andi. r10,r9,MSR_RI /* check for recoverable interrupt */ |
219 | beq 3f /* if not, we've got problems */ |
220 | #endif |
221 | |
222 | 2: lwz r10,_CCR(r11) |
223 | REST_GPRS(1, 6, r11) |
224 | mtcr r10 |
225 | lwz r10,_LINK(r11) |
226 | mtlr r10 |
227 | /* Clear the exception marker on the stack to avoid confusing stacktrace */ |
228 | li r10, 0 |
229 | stw r10, 8(r11) |
230 | REST_GPR(10, r11) |
231 | #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS) |
232 | mtspr SPRN_NRI, r0 |
233 | #endif |
234 | mtspr SPRN_SRR1,r9 |
235 | mtspr SPRN_SRR0,r12 |
236 | REST_GPR(9, r11) |
237 | REST_GPR(12, r11) |
238 | REST_GPR(11, r11) |
239 | rfi |
240 | #ifdef CONFIG_40x |
241 | b . /* Prevent prefetch past rfi */ |
242 | #endif |
243 | _ASM_NOKPROBE_SYMBOL(fast_exception_return) |
244 | |
245 | /* aargh, a nonrecoverable interrupt, panic */ |
246 | /* aargh, we don't know which trap this is */ |
247 | 3: |
248 | li r10,-1 |
249 | stw r10,_TRAP(r11) |
250 | prepare_transfer_to_handler |
251 | bl unrecoverable_exception |
252 | trap /* should not get here */ |
253 | |
254 | .globl interrupt_return |
255 | interrupt_return: |
256 | lwz r4,_MSR(r1) |
257 | addi r3,r1,STACK_INT_FRAME_REGS |
258 | andi. r0,r4,MSR_PR |
259 | beq .Lkernel_interrupt_return |
260 | bl interrupt_exit_user_prepare |
261 | cmpwi r3,0 |
262 | kuep_unlock |
263 | bne- .Lrestore_nvgprs |
264 | |
265 | .Lfast_user_interrupt_return: |
266 | lwz r11,_NIP(r1) |
267 | lwz r12,_MSR(r1) |
268 | mtspr SPRN_SRR0,r11 |
269 | mtspr SPRN_SRR1,r12 |
270 | |
271 | BEGIN_FTR_SECTION |
272 | stwcx. r0,0,r1 /* to clear the reservation */ |
273 | FTR_SECTION_ELSE |
274 | lwarx r0,0,r1 |
275 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) |
276 | |
277 | lwz r3,_CCR(r1) |
278 | lwz r4,_LINK(r1) |
279 | lwz r5,_CTR(r1) |
280 | lwz r6,_XER(r1) |
281 | li r0,0 |
282 | |
283 | /* |
284 | * Leaving a stale exception marker on the stack can confuse |
285 | * the reliable stack unwinder later on. Clear it. |
286 | */ |
287 | stw r0,8(r1) |
288 | REST_GPRS(7, 12, r1) |
289 | |
290 | mtcr r3 |
291 | mtlr r4 |
292 | mtctr r5 |
293 | mtspr SPRN_XER,r6 |
294 | |
295 | REST_GPRS(2, 6, r1) |
296 | REST_GPR(0, r1) |
297 | REST_GPR(1, r1) |
298 | rfi |
299 | #ifdef CONFIG_40x |
300 | b . /* Prevent prefetch past rfi */ |
301 | #endif |
302 | |
303 | .Lrestore_nvgprs: |
304 | REST_NVGPRS(r1) |
305 | b .Lfast_user_interrupt_return |
306 | |
307 | .Lkernel_interrupt_return: |
308 | bl interrupt_exit_kernel_prepare |
309 | |
310 | .Lfast_kernel_interrupt_return: |
311 | cmpwi cr1,r3,0 |
312 | lwz r11,_NIP(r1) |
313 | lwz r12,_MSR(r1) |
314 | mtspr SPRN_SRR0,r11 |
315 | mtspr SPRN_SRR1,r12 |
316 | |
317 | BEGIN_FTR_SECTION |
318 | stwcx. r0,0,r1 /* to clear the reservation */ |
319 | FTR_SECTION_ELSE |
320 | lwarx r0,0,r1 |
321 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) |
322 | |
323 | lwz r3,_LINK(r1) |
324 | lwz r4,_CTR(r1) |
325 | lwz r5,_XER(r1) |
326 | lwz r6,_CCR(r1) |
327 | li r0,0 |
328 | |
329 | REST_GPRS(7, 12, r1) |
330 | |
331 | mtlr r3 |
332 | mtctr r4 |
333 | mtspr SPRN_XER,r5 |
334 | |
335 | /* |
336 | * Leaving a stale exception marker on the stack can confuse |
337 | * the reliable stack unwinder later on. Clear it. |
338 | */ |
339 | stw r0,8(r1) |
340 | |
341 | REST_GPRS(2, 5, r1) |
342 | |
343 | bne- cr1,1f /* emulate stack store */ |
344 | mtcr r6 |
345 | REST_GPR(6, r1) |
346 | REST_GPR(0, r1) |
347 | REST_GPR(1, r1) |
348 | rfi |
349 | #ifdef CONFIG_40x |
350 | b . /* Prevent prefetch past rfi */ |
351 | #endif |
352 | |
353 | 1: /* |
354 | * Emulate stack store with update. New r1 value was already calculated |
355 | * and updated in our interrupt regs by emulate_loadstore, but we can't |
356 | * store the previous value of r1 to the stack before re-loading our |
357 | * registers from it, otherwise they could be clobbered. Use |
358 | * SPRG Scratch0 as temporary storage to hold the store |
359 | * data, as interrupts are disabled here so it won't be clobbered. |
360 | */ |
361 | mtcr r6 |
362 | #ifdef CONFIG_BOOKE |
363 | mtspr SPRN_SPRG_WSCRATCH0, r9 |
364 | #else |
365 | mtspr SPRN_SPRG_SCRATCH0, r9 |
366 | #endif |
367 | addi r9,r1,INT_FRAME_SIZE /* get original r1 */ |
368 | REST_GPR(6, r1) |
369 | REST_GPR(0, r1) |
370 | REST_GPR(1, r1) |
371 | stw r9,0(r1) /* perform store component of stwu */ |
372 | #ifdef CONFIG_BOOKE |
373 | mfspr r9, SPRN_SPRG_RSCRATCH0 |
374 | #else |
375 | mfspr r9, SPRN_SPRG_SCRATCH0 |
376 | #endif |
377 | rfi |
378 | #ifdef CONFIG_40x |
379 | b . /* Prevent prefetch past rfi */ |
380 | #endif |
381 | _ASM_NOKPROBE_SYMBOL(interrupt_return) |
382 | |
383 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) |
384 | |
385 | /* |
386 | * Returning from a critical interrupt in user mode doesn't need |
387 | * to be any different from a normal exception. For a critical |
388 | * interrupt in the kernel, we just return (without checking for |
389 | * preemption) since the interrupt may have happened at some crucial |
390 | * place (e.g. inside the TLB miss handler), and because we will be |
391 | * running with r1 pointing into critical_stack, not the current |
392 | * process's kernel stack (and therefore current_thread_info() will |
393 | * give the wrong answer). |
394 | * We have to restore various SPRs that may have been in use at the |
395 | * time of the critical interrupt. |
396 | * |
397 | */ |
398 | #ifdef CONFIG_40x |
399 | #define PPC_40x_TURN_OFF_MSR_DR \ |
400 | /* avoid any possible TLB misses here by turning off MSR.DR, we \ |
401 | * assume the instructions here are mapped by a pinned TLB entry */ \ |
402 | li r10,MSR_IR; \ |
403 | mtmsr r10; \ |
404 | isync; \ |
405 | tophys(r1, r1); |
406 | #else |
407 | #define PPC_40x_TURN_OFF_MSR_DR |
408 | #endif |
409 | |
410 | #define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \ |
411 | REST_NVGPRS(r1); \ |
412 | lwz r3,_MSR(r1); \ |
413 | andi. r3,r3,MSR_PR; \ |
414 | bne interrupt_return; \ |
415 | REST_GPR(0, r1); \ |
416 | REST_GPRS(2, 8, r1); \ |
417 | lwz r10,_XER(r1); \ |
418 | lwz r11,_CTR(r1); \ |
419 | mtspr SPRN_XER,r10; \ |
420 | mtctr r11; \ |
421 | stwcx. r0,0,r1; /* to clear the reservation */ \ |
422 | lwz r11,_LINK(r1); \ |
423 | mtlr r11; \ |
424 | lwz r10,_CCR(r1); \ |
425 | mtcrf 0xff,r10; \ |
426 | PPC_40x_TURN_OFF_MSR_DR; \ |
427 | lwz r9,_DEAR(r1); \ |
428 | lwz r10,_ESR(r1); \ |
429 | mtspr SPRN_DEAR,r9; \ |
430 | mtspr SPRN_ESR,r10; \ |
431 | lwz r11,_NIP(r1); \ |
432 | lwz r12,_MSR(r1); \ |
433 | mtspr exc_lvl_srr0,r11; \ |
434 | mtspr exc_lvl_srr1,r12; \ |
435 | REST_GPRS(9, 12, r1); \ |
436 | REST_GPR(1, r1); \ |
437 | exc_lvl_rfi; \ |
438 | b .; /* prevent prefetch past exc_lvl_rfi */ |
439 | |
440 | #define RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1) \ |
441 | lwz r9,_##exc_lvl_srr0(r1); \ |
442 | lwz r10,_##exc_lvl_srr1(r1); \ |
443 | mtspr SPRN_##exc_lvl_srr0,r9; \ |
444 | mtspr SPRN_##exc_lvl_srr1,r10; |
445 | |
446 | #if defined(CONFIG_PPC_E500) |
447 | #ifdef CONFIG_PHYS_64BIT |
448 | #define RESTORE_MAS7 \ |
449 | lwz r11,MAS7(r1); \ |
450 | mtspr SPRN_MAS7,r11; |
451 | #else |
452 | #define RESTORE_MAS7 |
453 | #endif /* CONFIG_PHYS_64BIT */ |
454 | #define RESTORE_MMU_REGS \ |
455 | lwz r9,MAS0(r1); \ |
456 | lwz r10,MAS1(r1); \ |
457 | lwz r11,MAS2(r1); \ |
458 | mtspr SPRN_MAS0,r9; \ |
459 | lwz r9,MAS3(r1); \ |
460 | mtspr SPRN_MAS1,r10; \ |
461 | lwz r10,MAS6(r1); \ |
462 | mtspr SPRN_MAS2,r11; \ |
463 | mtspr SPRN_MAS3,r9; \ |
464 | mtspr SPRN_MAS6,r10; \ |
465 | RESTORE_MAS7; |
466 | #elif defined(CONFIG_44x) |
467 | #define RESTORE_MMU_REGS \ |
468 | lwz r9,MMUCR(r1); \ |
469 | mtspr SPRN_MMUCR,r9; |
470 | #else |
471 | #define RESTORE_MMU_REGS |
472 | #endif |
473 | |
474 | #ifdef CONFIG_40x |
475 | .globl ret_from_crit_exc |
476 | ret_from_crit_exc: |
477 | lis r9,crit_srr0@ha; |
478 | lwz r9,crit_srr0@l(r9); |
479 | lis r10,crit_srr1@ha; |
480 | lwz r10,crit_srr1@l(r10); |
481 | mtspr SPRN_SRR0,r9; |
482 | mtspr SPRN_SRR1,r10; |
483 | RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI) |
484 | _ASM_NOKPROBE_SYMBOL(ret_from_crit_exc) |
485 | #endif /* CONFIG_40x */ |
486 | |
487 | #ifdef CONFIG_BOOKE |
488 | .globl ret_from_crit_exc |
489 | ret_from_crit_exc: |
490 | RESTORE_xSRR(SRR0,SRR1); |
491 | RESTORE_MMU_REGS; |
492 | RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI) |
493 | _ASM_NOKPROBE_SYMBOL(ret_from_crit_exc) |
494 | |
495 | .globl ret_from_debug_exc |
496 | ret_from_debug_exc: |
497 | RESTORE_xSRR(SRR0,SRR1); |
498 | RESTORE_xSRR(CSRR0,CSRR1); |
499 | RESTORE_MMU_REGS; |
500 | RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI) |
501 | _ASM_NOKPROBE_SYMBOL(ret_from_debug_exc) |
502 | |
503 | .globl ret_from_mcheck_exc |
504 | ret_from_mcheck_exc: |
505 | RESTORE_xSRR(SRR0,SRR1); |
506 | RESTORE_xSRR(CSRR0,CSRR1); |
507 | RESTORE_xSRR(DSRR0,DSRR1); |
508 | RESTORE_MMU_REGS; |
509 | RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI) |
510 | _ASM_NOKPROBE_SYMBOL(ret_from_mcheck_exc) |
511 | #endif /* CONFIG_BOOKE */ |
512 | #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */ |
513 | |