1 | #include <asm/asm-offsets.h> |
2 | #include <asm/bug.h> |
3 | #ifdef CONFIG_PPC_BOOK3S |
4 | #include <asm/exception-64s.h> |
5 | #else |
6 | #include <asm/exception-64e.h> |
7 | #endif |
8 | #include <asm/feature-fixups.h> |
9 | #include <asm/head-64.h> |
10 | #include <asm/hw_irq.h> |
11 | #include <asm/kup.h> |
12 | #include <asm/mmu.h> |
13 | #include <asm/ppc_asm.h> |
14 | #include <asm/ptrace.h> |
15 | |
16 | .align 7 |
17 | |
18 | .macro DEBUG_SRR_VALID srr |
19 | #ifdef CONFIG_PPC_RFI_SRR_DEBUG |
20 | .ifc \srr,srr |
21 | mfspr r11,SPRN_SRR0 |
22 | ld r12,_NIP(r1) |
23 | clrrdi r11,r11,2 |
24 | clrrdi r12,r12,2 |
25 | 100: tdne r11,r12 |
26 | EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE) |
27 | mfspr r11,SPRN_SRR1 |
28 | ld r12,_MSR(r1) |
29 | 100: tdne r11,r12 |
30 | EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE) |
31 | .else |
32 | mfspr r11,SPRN_HSRR0 |
33 | ld r12,_NIP(r1) |
34 | clrrdi r11,r11,2 |
35 | clrrdi r12,r12,2 |
36 | 100: tdne r11,r12 |
37 | EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE) |
38 | mfspr r11,SPRN_HSRR1 |
39 | ld r12,_MSR(r1) |
40 | 100: tdne r11,r12 |
41 | EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE) |
42 | .endif |
43 | #endif |
44 | .endm |
45 | |
46 | #ifdef CONFIG_PPC_BOOK3S |
47 | .macro system_call_vectored name trapnr |
48 | .globl system_call_vectored_\name |
49 | system_call_vectored_\name: |
50 | _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name) |
51 | SCV_INTERRUPT_TO_KERNEL |
52 | mr r10,r1 |
53 | ld r1,PACAKSAVE(r13) |
54 | std r10,0(r1) |
55 | std r11,_LINK(r1) |
56 | std r11,_NIP(r1) /* Saved LR is also the next instruction */ |
57 | std r12,_MSR(r1) |
58 | std r0,GPR0(r1) |
59 | std r10,GPR1(r1) |
60 | std r2,GPR2(r1) |
61 | LOAD_PACA_TOC() |
62 | mfcr r12 |
63 | li r11,0 |
64 | /* Save syscall parameters in r3-r8 */ |
65 | SAVE_GPRS(3, 8, r1) |
66 | /* Zero r9-r12, this should only be required when restoring all GPRs */ |
67 | std r11,GPR9(r1) |
68 | std r11,GPR10(r1) |
69 | std r11,GPR11(r1) |
70 | std r11,GPR12(r1) |
71 | std r9,GPR13(r1) |
72 | SAVE_NVGPRS(r1) |
73 | std r11,_XER(r1) |
74 | std r11,_CTR(r1) |
75 | |
76 | li r11,\trapnr |
77 | std r11,_TRAP(r1) |
78 | std r12,_CCR(r1) |
79 | std r3,ORIG_GPR3(r1) |
80 | LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER) |
81 | std r11,STACK_INT_FRAME_MARKER(r1) /* "regs" marker */ |
82 | /* Calling convention has r3 = regs, r4 = orig r0 */ |
83 | addi r3,r1,STACK_INT_FRAME_REGS |
84 | mr r4,r0 |
85 | |
86 | BEGIN_FTR_SECTION |
87 | HMT_MEDIUM |
88 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) |
89 | |
90 | /* |
91 | * scv enters with MSR[EE]=1 and is immediately considered soft-masked. |
92 | * The entry vector already sets PACAIRQSOFTMASK to IRQS_ALL_DISABLED, |
93 | * and interrupts may be masked and pending already. |
94 | * system_call_exception() will call trace_hardirqs_off() which means |
95 | * interrupts could already have been blocked before trace_hardirqs_off, |
96 | * but this is the best we can do. |
97 | */ |
98 | |
99 | /* |
100 | * Zero user registers to prevent influencing speculative execution |
101 | * state of kernel code. |
102 | */ |
103 | SANITIZE_SYSCALL_GPRS() |
104 | bl CFUNC(system_call_exception) |
105 | |
106 | .Lsyscall_vectored_\name\()_exit: |
107 | addi r4,r1,STACK_INT_FRAME_REGS |
108 | li r5,1 /* scv */ |
109 | bl CFUNC(syscall_exit_prepare) |
110 | std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */ |
111 | .Lsyscall_vectored_\name\()_rst_start: |
112 | lbz r11,PACAIRQHAPPENED(r13) |
113 | andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l |
114 | bne- syscall_vectored_\name\()_restart |
115 | li r11,IRQS_ENABLED |
116 | stb r11,PACAIRQSOFTMASK(r13) |
117 | li r11,0 |
118 | stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS |
119 | |
120 | ld r2,_CCR(r1) |
121 | ld r4,_NIP(r1) |
122 | ld r5,_MSR(r1) |
123 | |
124 | BEGIN_FTR_SECTION |
125 | stdcx. r0,0,r1 /* to clear the reservation */ |
126 | END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) |
127 | |
128 | BEGIN_FTR_SECTION |
129 | HMT_MEDIUM_LOW |
130 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) |
131 | |
132 | SANITIZE_RESTORE_NVGPRS() |
133 | cmpdi r3,0 |
134 | bne .Lsyscall_vectored_\name\()_restore_regs |
135 | |
136 | /* rfscv returns with LR->NIA and CTR->MSR */ |
137 | mtlr r4 |
138 | mtctr r5 |
139 | |
140 | /* Could zero these as per ABI, but we may consider a stricter ABI |
141 | * which preserves these if libc implementations can benefit, so |
142 | * restore them for now until further measurement is done. */ |
143 | REST_GPR(0, r1) |
144 | REST_GPRS(4, 8, r1) |
145 | /* Zero volatile regs that may contain sensitive kernel data */ |
146 | ZEROIZE_GPRS(9, 12) |
147 | mtspr SPRN_XER,r0 |
148 | |
149 | /* |
150 | * We don't need to restore AMR on the way back to userspace for KUAP. |
151 | * The value of AMR only matters while we're in the kernel. |
152 | */ |
153 | mtcr r2 |
154 | REST_GPRS(2, 3, r1) |
155 | REST_GPR(13, r1) |
156 | REST_GPR(1, r1) |
157 | RFSCV_TO_USER |
158 | b . /* prevent speculative execution */ |
159 | |
160 | .Lsyscall_vectored_\name\()_restore_regs: |
161 | mtspr SPRN_SRR0,r4 |
162 | mtspr SPRN_SRR1,r5 |
163 | |
164 | ld r3,_CTR(r1) |
165 | ld r4,_LINK(r1) |
166 | ld r5,_XER(r1) |
167 | |
168 | HANDLER_RESTORE_NVGPRS() |
169 | REST_GPR(0, r1) |
170 | mtcr r2 |
171 | mtctr r3 |
172 | mtlr r4 |
173 | mtspr SPRN_XER,r5 |
174 | REST_GPRS(2, 13, r1) |
175 | REST_GPR(1, r1) |
176 | RFI_TO_USER |
177 | .Lsyscall_vectored_\name\()_rst_end: |
178 | |
179 | syscall_vectored_\name\()_restart: |
180 | _ASM_NOKPROBE_SYMBOL(syscall_vectored_\name\()_restart) |
181 | GET_PACA(r13) |
182 | ld r1,PACA_EXIT_SAVE_R1(r13) |
183 | LOAD_PACA_TOC() |
184 | ld r3,RESULT(r1) |
185 | addi r4,r1,STACK_INT_FRAME_REGS |
186 | li r11,IRQS_ALL_DISABLED |
187 | stb r11,PACAIRQSOFTMASK(r13) |
188 | bl CFUNC(syscall_exit_restart) |
189 | std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */ |
190 | b .Lsyscall_vectored_\name\()_rst_start |
191 | 1: |
192 | |
193 | SOFT_MASK_TABLE(.Lsyscall_vectored_\name\()_rst_start, 1b) |
194 | RESTART_TABLE(.Lsyscall_vectored_\name\()_rst_start, .Lsyscall_vectored_\name\()_rst_end, syscall_vectored_\name\()_restart) |
195 | |
196 | .endm |
197 | |
198 | system_call_vectored common 0x3000 |
199 | |
200 | /* |
201 | * We instantiate another entry copy for the SIGILL variant, with TRAP=0x7ff0 |
202 | * which is tested by system_call_exception when r0 is -1 (as set by vector |
203 | * entry code). |
204 | */ |
205 | system_call_vectored sigill 0x7ff0 |
206 | |
207 | #endif /* CONFIG_PPC_BOOK3S */ |
208 | |
209 | .balign IFETCH_ALIGN_BYTES |
210 | .globl system_call_common_real |
211 | system_call_common_real: |
212 | _ASM_NOKPROBE_SYMBOL(system_call_common_real) |
213 | ld r10,PACAKMSR(r13) /* get MSR value for kernel */ |
214 | mtmsrd r10 |
215 | |
216 | .balign IFETCH_ALIGN_BYTES |
217 | .globl system_call_common |
218 | system_call_common: |
219 | _ASM_NOKPROBE_SYMBOL(system_call_common) |
220 | mr r10,r1 |
221 | ld r1,PACAKSAVE(r13) |
222 | std r10,0(r1) |
223 | std r11,_NIP(r1) |
224 | std r12,_MSR(r1) |
225 | std r0,GPR0(r1) |
226 | std r10,GPR1(r1) |
227 | std r2,GPR2(r1) |
228 | #ifdef CONFIG_PPC_E500 |
229 | START_BTB_FLUSH_SECTION |
230 | BTB_FLUSH(r10) |
231 | END_BTB_FLUSH_SECTION |
232 | #endif |
233 | LOAD_PACA_TOC() |
234 | mfcr r12 |
235 | li r11,0 |
236 | /* Save syscall parameters in r3-r8 */ |
237 | SAVE_GPRS(3, 8, r1) |
238 | /* Zero r9-r12, this should only be required when restoring all GPRs */ |
239 | std r11,GPR9(r1) |
240 | std r11,GPR10(r1) |
241 | std r11,GPR11(r1) |
242 | std r11,GPR12(r1) |
243 | std r9,GPR13(r1) |
244 | SAVE_NVGPRS(r1) |
245 | std r11,_XER(r1) |
246 | std r11,_CTR(r1) |
247 | mflr r10 |
248 | |
249 | /* |
250 | * This clears CR0.SO (bit 28), which is the error indication on |
251 | * return from this system call. |
252 | */ |
253 | rldimi r12,r11,28,(63-28) |
254 | li r11,0xc00 |
255 | std r10,_LINK(r1) |
256 | std r11,_TRAP(r1) |
257 | std r12,_CCR(r1) |
258 | std r3,ORIG_GPR3(r1) |
259 | LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER) |
260 | std r11,STACK_INT_FRAME_MARKER(r1) /* "regs" marker */ |
261 | /* Calling convention has r3 = regs, r4 = orig r0 */ |
262 | addi r3,r1,STACK_INT_FRAME_REGS |
263 | mr r4,r0 |
264 | |
265 | #ifdef CONFIG_PPC_BOOK3S |
266 | li r11,1 |
267 | stb r11,PACASRR_VALID(r13) |
268 | #endif |
269 | |
270 | /* |
271 | * We always enter kernel from userspace with irq soft-mask enabled and |
272 | * nothing pending. system_call_exception() will call |
273 | * trace_hardirqs_off(). |
274 | */ |
275 | li r11,IRQS_ALL_DISABLED |
276 | stb r11,PACAIRQSOFTMASK(r13) |
277 | #ifdef CONFIG_PPC_BOOK3S |
278 | li r12,-1 /* Set MSR_EE and MSR_RI */ |
279 | mtmsrd r12,1 |
280 | #else |
281 | wrteei 1 |
282 | #endif |
283 | |
284 | /* |
285 | * Zero user registers to prevent influencing speculative execution |
286 | * state of kernel code. |
287 | */ |
288 | SANITIZE_SYSCALL_GPRS() |
289 | bl CFUNC(system_call_exception) |
290 | |
291 | .Lsyscall_exit: |
292 | addi r4,r1,STACK_INT_FRAME_REGS |
293 | li r5,0 /* !scv */ |
294 | bl CFUNC(syscall_exit_prepare) |
295 | std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */ |
296 | #ifdef CONFIG_PPC_BOOK3S |
297 | .Lsyscall_rst_start: |
298 | lbz r11,PACAIRQHAPPENED(r13) |
299 | andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l |
300 | bne- syscall_restart |
301 | #endif |
302 | li r11,IRQS_ENABLED |
303 | stb r11,PACAIRQSOFTMASK(r13) |
304 | li r11,0 |
305 | stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS |
306 | |
307 | ld r2,_CCR(r1) |
308 | ld r6,_LINK(r1) |
309 | mtlr r6 |
310 | |
311 | #ifdef CONFIG_PPC_BOOK3S |
312 | lbz r4,PACASRR_VALID(r13) |
313 | cmpdi r4,0 |
314 | bne 1f |
315 | li r4,0 |
316 | stb r4,PACASRR_VALID(r13) |
317 | #endif |
318 | ld r4,_NIP(r1) |
319 | ld r5,_MSR(r1) |
320 | mtspr SPRN_SRR0,r4 |
321 | mtspr SPRN_SRR1,r5 |
322 | 1: |
323 | DEBUG_SRR_VALID srr |
324 | |
325 | BEGIN_FTR_SECTION |
326 | stdcx. r0,0,r1 /* to clear the reservation */ |
327 | END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) |
328 | |
329 | SANITIZE_RESTORE_NVGPRS() |
330 | cmpdi r3,0 |
331 | bne .Lsyscall_restore_regs |
332 | /* Zero volatile regs that may contain sensitive kernel data */ |
333 | ZEROIZE_GPR(0) |
334 | ZEROIZE_GPRS(4, 12) |
335 | mtctr r0 |
336 | mtspr SPRN_XER,r0 |
337 | .Lsyscall_restore_regs_cont: |
338 | |
339 | BEGIN_FTR_SECTION |
340 | HMT_MEDIUM_LOW |
341 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) |
342 | |
343 | /* |
344 | * We don't need to restore AMR on the way back to userspace for KUAP. |
345 | * The value of AMR only matters while we're in the kernel. |
346 | */ |
347 | mtcr r2 |
348 | REST_GPRS(2, 3, r1) |
349 | REST_GPR(13, r1) |
350 | REST_GPR(1, r1) |
351 | RFI_TO_USER |
352 | b . /* prevent speculative execution */ |
353 | |
354 | .Lsyscall_restore_regs: |
355 | ld r3,_CTR(r1) |
356 | ld r4,_XER(r1) |
357 | HANDLER_RESTORE_NVGPRS() |
358 | mtctr r3 |
359 | mtspr SPRN_XER,r4 |
360 | REST_GPR(0, r1) |
361 | REST_GPRS(4, 12, r1) |
362 | b .Lsyscall_restore_regs_cont |
363 | .Lsyscall_rst_end: |
364 | |
365 | #ifdef CONFIG_PPC_BOOK3S |
366 | syscall_restart: |
367 | _ASM_NOKPROBE_SYMBOL(syscall_restart) |
368 | GET_PACA(r13) |
369 | ld r1,PACA_EXIT_SAVE_R1(r13) |
370 | LOAD_PACA_TOC() |
371 | ld r3,RESULT(r1) |
372 | addi r4,r1,STACK_INT_FRAME_REGS |
373 | li r11,IRQS_ALL_DISABLED |
374 | stb r11,PACAIRQSOFTMASK(r13) |
375 | bl CFUNC(syscall_exit_restart) |
376 | std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */ |
377 | b .Lsyscall_rst_start |
378 | 1: |
379 | |
380 | SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b) |
381 | RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart) |
382 | #endif |
383 | |
384 | /* |
385 | * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not |
386 | * touched, no exit work created, then this can be used. |
387 | */ |
388 | .balign IFETCH_ALIGN_BYTES |
389 | .globl fast_interrupt_return_srr |
390 | fast_interrupt_return_srr: |
391 | _ASM_NOKPROBE_SYMBOL(fast_interrupt_return_srr) |
392 | kuap_check_amr r3, r4 |
393 | ld r5,_MSR(r1) |
394 | andi. r0,r5,MSR_PR |
395 | #ifdef CONFIG_PPC_BOOK3S |
396 | beq 1f |
397 | kuap_user_restore r3, r4 |
398 | b .Lfast_user_interrupt_return_srr |
399 | 1: kuap_kernel_restore r3, r4 |
400 | andi. r0,r5,MSR_RI |
401 | li r3,0 /* 0 return value, no EMULATE_STACK_STORE */ |
402 | bne+ .Lfast_kernel_interrupt_return_srr |
403 | addi r3,r1,STACK_INT_FRAME_REGS |
404 | bl CFUNC(unrecoverable_exception) |
405 | b . /* should not get here */ |
406 | #else |
407 | bne .Lfast_user_interrupt_return_srr |
408 | b .Lfast_kernel_interrupt_return_srr |
409 | #endif |
410 | |
411 | .macro interrupt_return_macro srr |
412 | .balign IFETCH_ALIGN_BYTES |
413 | .globl interrupt_return_\srr |
414 | interrupt_return_\srr\(): |
415 | _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()) |
416 | ld r4,_MSR(r1) |
417 | andi. r0,r4,MSR_PR |
418 | beq interrupt_return_\srr\()_kernel |
419 | interrupt_return_\srr\()_user: /* make backtraces match the _kernel variant */ |
420 | _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user) |
421 | addi r3,r1,STACK_INT_FRAME_REGS |
422 | bl CFUNC(interrupt_exit_user_prepare) |
423 | #ifndef CONFIG_INTERRUPT_SANITIZE_REGISTERS |
424 | cmpdi r3,0 |
425 | bne- .Lrestore_nvgprs_\srr |
426 | .Lrestore_nvgprs_\srr\()_cont: |
427 | #endif |
428 | std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */ |
429 | #ifdef CONFIG_PPC_BOOK3S |
430 | .Linterrupt_return_\srr\()_user_rst_start: |
431 | lbz r11,PACAIRQHAPPENED(r13) |
432 | andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l |
433 | bne- interrupt_return_\srr\()_user_restart |
434 | #endif |
435 | li r11,IRQS_ENABLED |
436 | stb r11,PACAIRQSOFTMASK(r13) |
437 | li r11,0 |
438 | stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS |
439 | |
440 | .Lfast_user_interrupt_return_\srr\(): |
441 | SANITIZE_RESTORE_NVGPRS() |
442 | #ifdef CONFIG_PPC_BOOK3S |
443 | .ifc \srr,srr |
444 | lbz r4,PACASRR_VALID(r13) |
445 | .else |
446 | lbz r4,PACAHSRR_VALID(r13) |
447 | .endif |
448 | cmpdi r4,0 |
449 | li r4,0 |
450 | bne 1f |
451 | #endif |
452 | ld r11,_NIP(r1) |
453 | ld r12,_MSR(r1) |
454 | .ifc \srr,srr |
455 | mtspr SPRN_SRR0,r11 |
456 | mtspr SPRN_SRR1,r12 |
457 | 1: |
458 | #ifdef CONFIG_PPC_BOOK3S |
459 | stb r4,PACASRR_VALID(r13) |
460 | #endif |
461 | .else |
462 | mtspr SPRN_HSRR0,r11 |
463 | mtspr SPRN_HSRR1,r12 |
464 | 1: |
465 | #ifdef CONFIG_PPC_BOOK3S |
466 | stb r4,PACAHSRR_VALID(r13) |
467 | #endif |
468 | .endif |
469 | DEBUG_SRR_VALID \srr |
470 | |
471 | #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG |
472 | lbz r4,PACAIRQSOFTMASK(r13) |
473 | tdnei r4,IRQS_ENABLED |
474 | #endif |
475 | |
476 | BEGIN_FTR_SECTION |
477 | ld r10,_PPR(r1) |
478 | mtspr SPRN_PPR,r10 |
479 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) |
480 | |
481 | BEGIN_FTR_SECTION |
482 | stdcx. r0,0,r1 /* to clear the reservation */ |
483 | FTR_SECTION_ELSE |
484 | ldarx r0,0,r1 |
485 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) |
486 | |
487 | ld r3,_CCR(r1) |
488 | ld r4,_LINK(r1) |
489 | ld r5,_CTR(r1) |
490 | ld r6,_XER(r1) |
491 | li r0,0 |
492 | |
493 | REST_GPRS(7, 13, r1) |
494 | |
495 | mtcr r3 |
496 | mtlr r4 |
497 | mtctr r5 |
498 | mtspr SPRN_XER,r6 |
499 | |
500 | REST_GPRS(2, 6, r1) |
501 | REST_GPR(0, r1) |
502 | REST_GPR(1, r1) |
503 | .ifc \srr,srr |
504 | RFI_TO_USER |
505 | .else |
506 | HRFI_TO_USER |
507 | .endif |
508 | b . /* prevent speculative execution */ |
509 | .Linterrupt_return_\srr\()_user_rst_end: |
510 | |
511 | #ifndef CONFIG_INTERRUPT_SANITIZE_REGISTERS |
512 | .Lrestore_nvgprs_\srr\(): |
513 | REST_NVGPRS(r1) |
514 | b .Lrestore_nvgprs_\srr\()_cont |
515 | #endif |
516 | |
517 | #ifdef CONFIG_PPC_BOOK3S |
518 | interrupt_return_\srr\()_user_restart: |
519 | _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user_restart) |
520 | GET_PACA(r13) |
521 | ld r1,PACA_EXIT_SAVE_R1(r13) |
522 | LOAD_PACA_TOC() |
523 | addi r3,r1,STACK_INT_FRAME_REGS |
524 | li r11,IRQS_ALL_DISABLED |
525 | stb r11,PACAIRQSOFTMASK(r13) |
526 | bl CFUNC(interrupt_exit_user_restart) |
527 | std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */ |
528 | b .Linterrupt_return_\srr\()_user_rst_start |
529 | 1: |
530 | |
531 | SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_user_rst_start, 1b) |
532 | RESTART_TABLE(.Linterrupt_return_\srr\()_user_rst_start, .Linterrupt_return_\srr\()_user_rst_end, interrupt_return_\srr\()_user_restart) |
533 | #endif |
534 | |
535 | .balign IFETCH_ALIGN_BYTES |
536 | interrupt_return_\srr\()_kernel: |
537 | _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel) |
538 | addi r3,r1,STACK_INT_FRAME_REGS |
539 | bl CFUNC(interrupt_exit_kernel_prepare) |
540 | |
541 | std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */ |
542 | .Linterrupt_return_\srr\()_kernel_rst_start: |
543 | ld r11,SOFTE(r1) |
544 | cmpwi r11,IRQS_ENABLED |
545 | stb r11,PACAIRQSOFTMASK(r13) |
546 | beq .Linterrupt_return_\srr\()_soft_enabled |
547 | |
548 | /* |
549 | * Returning to soft-disabled context. |
550 | * Check if a MUST_HARD_MASK interrupt has become pending, in which |
551 | * case we need to disable MSR[EE] in the return context. |
552 | * |
553 | * The MSR[EE] check catches among other things the short incoherency |
554 | * in hard_irq_disable() between clearing MSR[EE] and setting |
555 | * PACA_IRQ_HARD_DIS. |
556 | */ |
557 | ld r12,_MSR(r1) |
558 | andi. r10,r12,MSR_EE |
559 | beq .Lfast_kernel_interrupt_return_\srr\() // EE already disabled |
560 | lbz r11,PACAIRQHAPPENED(r13) |
561 | andi. r10,r11,PACA_IRQ_MUST_HARD_MASK |
562 | bne 1f // HARD_MASK is pending |
563 | // No HARD_MASK pending, clear possible HARD_DIS set by interrupt |
564 | andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l |
565 | stb r11,PACAIRQHAPPENED(r13) |
566 | b .Lfast_kernel_interrupt_return_\srr\() |
567 | |
568 | |
569 | 1: /* Must clear MSR_EE from _MSR */ |
570 | #ifdef CONFIG_PPC_BOOK3S |
571 | li r10,0 |
572 | /* Clear valid before changing _MSR */ |
573 | .ifc \srr,srr |
574 | stb r10,PACASRR_VALID(r13) |
575 | .else |
576 | stb r10,PACAHSRR_VALID(r13) |
577 | .endif |
578 | #endif |
579 | xori r12,r12,MSR_EE |
580 | std r12,_MSR(r1) |
581 | b .Lfast_kernel_interrupt_return_\srr\() |
582 | |
583 | .Linterrupt_return_\srr\()_soft_enabled: |
584 | /* |
585 | * In the soft-enabled case, need to double-check that we have no |
586 | * pending interrupts that might have come in before we reached the |
587 | * restart section of code, and restart the exit so those can be |
588 | * handled. |
589 | * |
590 | * If there are none, it is be possible that the interrupt still |
591 | * has PACA_IRQ_HARD_DIS set, which needs to be cleared for the |
592 | * interrupted context. This clear will not clobber a new pending |
593 | * interrupt coming in, because we're in the restart section, so |
594 | * such would return to the restart location. |
595 | */ |
596 | #ifdef CONFIG_PPC_BOOK3S |
597 | lbz r11,PACAIRQHAPPENED(r13) |
598 | andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l |
599 | bne- interrupt_return_\srr\()_kernel_restart |
600 | #endif |
601 | li r11,0 |
602 | stb r11,PACAIRQHAPPENED(r13) // clear the possible HARD_DIS |
603 | |
604 | .Lfast_kernel_interrupt_return_\srr\(): |
605 | SANITIZE_RESTORE_NVGPRS() |
606 | cmpdi cr1,r3,0 |
607 | #ifdef CONFIG_PPC_BOOK3S |
608 | .ifc \srr,srr |
609 | lbz r4,PACASRR_VALID(r13) |
610 | .else |
611 | lbz r4,PACAHSRR_VALID(r13) |
612 | .endif |
613 | cmpdi r4,0 |
614 | li r4,0 |
615 | bne 1f |
616 | #endif |
617 | ld r11,_NIP(r1) |
618 | ld r12,_MSR(r1) |
619 | .ifc \srr,srr |
620 | mtspr SPRN_SRR0,r11 |
621 | mtspr SPRN_SRR1,r12 |
622 | 1: |
623 | #ifdef CONFIG_PPC_BOOK3S |
624 | stb r4,PACASRR_VALID(r13) |
625 | #endif |
626 | .else |
627 | mtspr SPRN_HSRR0,r11 |
628 | mtspr SPRN_HSRR1,r12 |
629 | 1: |
630 | #ifdef CONFIG_PPC_BOOK3S |
631 | stb r4,PACAHSRR_VALID(r13) |
632 | #endif |
633 | .endif |
634 | DEBUG_SRR_VALID \srr |
635 | |
636 | BEGIN_FTR_SECTION |
637 | stdcx. r0,0,r1 /* to clear the reservation */ |
638 | FTR_SECTION_ELSE |
639 | ldarx r0,0,r1 |
640 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) |
641 | |
642 | ld r3,_LINK(r1) |
643 | ld r4,_CTR(r1) |
644 | ld r5,_XER(r1) |
645 | ld r6,_CCR(r1) |
646 | li r0,0 |
647 | |
648 | REST_GPRS(7, 12, r1) |
649 | |
650 | mtlr r3 |
651 | mtctr r4 |
652 | mtspr SPRN_XER,r5 |
653 | |
654 | /* |
655 | * Leaving a stale STACK_FRAME_REGS_MARKER on the stack can confuse |
656 | * the reliable stack unwinder later on. Clear it. |
657 | */ |
658 | std r0,STACK_INT_FRAME_MARKER(r1) |
659 | |
660 | REST_GPRS(2, 5, r1) |
661 | |
662 | bne- cr1,1f /* emulate stack store */ |
663 | mtcr r6 |
664 | REST_GPR(6, r1) |
665 | REST_GPR(0, r1) |
666 | REST_GPR(1, r1) |
667 | .ifc \srr,srr |
668 | RFI_TO_KERNEL |
669 | .else |
670 | HRFI_TO_KERNEL |
671 | .endif |
672 | b . /* prevent speculative execution */ |
673 | |
674 | 1: /* |
675 | * Emulate stack store with update. New r1 value was already calculated |
676 | * and updated in our interrupt regs by emulate_loadstore, but we can't |
677 | * store the previous value of r1 to the stack before re-loading our |
678 | * registers from it, otherwise they could be clobbered. Use |
679 | * PACA_EXGEN as temporary storage to hold the store data, as |
680 | * interrupts are disabled here so it won't be clobbered. |
681 | */ |
682 | mtcr r6 |
683 | std r9,PACA_EXGEN+0(r13) |
684 | addi r9,r1,INT_FRAME_SIZE /* get original r1 */ |
685 | REST_GPR(6, r1) |
686 | REST_GPR(0, r1) |
687 | REST_GPR(1, r1) |
688 | std r9,0(r1) /* perform store component of stdu */ |
689 | ld r9,PACA_EXGEN+0(r13) |
690 | |
691 | .ifc \srr,srr |
692 | RFI_TO_KERNEL |
693 | .else |
694 | HRFI_TO_KERNEL |
695 | .endif |
696 | b . /* prevent speculative execution */ |
697 | .Linterrupt_return_\srr\()_kernel_rst_end: |
698 | |
699 | #ifdef CONFIG_PPC_BOOK3S |
700 | interrupt_return_\srr\()_kernel_restart: |
701 | _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel_restart) |
702 | GET_PACA(r13) |
703 | ld r1,PACA_EXIT_SAVE_R1(r13) |
704 | LOAD_PACA_TOC() |
705 | addi r3,r1,STACK_INT_FRAME_REGS |
706 | li r11,IRQS_ALL_DISABLED |
707 | stb r11,PACAIRQSOFTMASK(r13) |
708 | bl CFUNC(interrupt_exit_kernel_restart) |
709 | std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */ |
710 | b .Linterrupt_return_\srr\()_kernel_rst_start |
711 | 1: |
712 | |
713 | SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, 1b) |
714 | RESTART_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, .Linterrupt_return_\srr\()_kernel_rst_end, interrupt_return_\srr\()_kernel_restart) |
715 | #endif |
716 | |
717 | .endm |
718 | |
719 | interrupt_return_macro srr |
720 | #ifdef CONFIG_PPC_BOOK3S |
721 | interrupt_return_macro hsrr |
722 | |
723 | .globl __end_soft_masked |
724 | __end_soft_masked: |
725 | DEFINE_FIXED_SYMBOL(__end_soft_masked, text) |
726 | #endif /* CONFIG_PPC_BOOK3S */ |
727 | |
728 | #ifdef CONFIG_PPC_BOOK3S |
729 | _GLOBAL(ret_from_fork_scv) |
730 | bl CFUNC(schedule_tail) |
731 | HANDLER_RESTORE_NVGPRS() |
732 | li r3,0 /* fork() return value */ |
733 | b .Lsyscall_vectored_common_exit |
734 | #endif |
735 | |
736 | _GLOBAL(ret_from_fork) |
737 | bl CFUNC(schedule_tail) |
738 | HANDLER_RESTORE_NVGPRS() |
739 | li r3,0 /* fork() return value */ |
740 | b .Lsyscall_exit |
741 | |
742 | _GLOBAL(ret_from_kernel_user_thread) |
743 | bl CFUNC(schedule_tail) |
744 | mtctr r14 |
745 | mr r3,r15 |
746 | #ifdef CONFIG_PPC64_ELF_ABI_V2 |
747 | mr r12,r14 |
748 | #endif |
749 | bctrl |
750 | li r3,0 |
751 | /* |
752 | * It does not matter whether this returns via the scv or sc path |
753 | * because it returns as execve() and therefore has no calling ABI |
754 | * (i.e., it sets registers according to the exec()ed entry point). |
755 | */ |
756 | b .Lsyscall_exit |
757 | |
758 | _GLOBAL(start_kernel_thread) |
759 | bl CFUNC(schedule_tail) |
760 | mtctr r14 |
761 | mr r3,r15 |
762 | #ifdef CONFIG_PPC64_ELF_ABI_V2 |
763 | mr r12,r14 |
764 | #endif |
765 | bctrl |
766 | /* |
767 | * This must not return. We actually want to BUG here, not WARN, |
768 | * because BUG will exit the process which is what the kernel thread |
769 | * should have done, which may give some hope of continuing. |
770 | */ |
771 | 100: trap |
772 | EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,0 |
773 | |