1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * PowerPC version |
4 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
5 | * |
6 | * Derived from "arch/i386/kernel/signal.c" |
7 | * Copyright (C) 1991, 1992 Linus Torvalds |
8 | * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson |
9 | */ |
10 | |
11 | #include <linux/sched.h> |
12 | #include <linux/mm.h> |
13 | #include <linux/smp.h> |
14 | #include <linux/kernel.h> |
15 | #include <linux/signal.h> |
16 | #include <linux/errno.h> |
17 | #include <linux/wait.h> |
18 | #include <linux/unistd.h> |
19 | #include <linux/stddef.h> |
20 | #include <linux/elf.h> |
21 | #include <linux/ptrace.h> |
22 | #include <linux/ratelimit.h> |
23 | #include <linux/syscalls.h> |
24 | #include <linux/pagemap.h> |
25 | |
26 | #include <asm/sigcontext.h> |
27 | #include <asm/ucontext.h> |
28 | #include <linux/uaccess.h> |
29 | #include <asm/unistd.h> |
30 | #include <asm/cacheflush.h> |
31 | #include <asm/syscalls.h> |
32 | #include <asm/vdso.h> |
33 | #include <asm/switch_to.h> |
34 | #include <asm/tm.h> |
35 | #include <asm/asm-prototypes.h> |
36 | |
37 | #include "signal.h" |
38 | |
39 | |
40 | #define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs)) |
41 | #define FP_REGS_SIZE sizeof(elf_fpregset_t) |
42 | |
43 | #define TRAMP_TRACEBACK 4 |
44 | #define TRAMP_SIZE 7 |
45 | |
46 | /* |
47 | * When we have signals to deliver, we set up on the user stack, |
48 | * going down from the original stack pointer: |
49 | * 1) a rt_sigframe struct which contains the ucontext |
50 | * 2) a gap of __SIGNAL_FRAMESIZE bytes which acts as a dummy caller |
51 | * frame for the signal handler. |
52 | */ |
53 | |
54 | struct rt_sigframe { |
55 | /* sys_rt_sigreturn requires the ucontext be the first field */ |
56 | struct ucontext uc; |
57 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
58 | struct ucontext uc_transact; |
59 | #endif |
60 | unsigned long _unused[2]; |
61 | unsigned int tramp[TRAMP_SIZE]; |
62 | struct siginfo __user *pinfo; |
63 | void __user *puc; |
64 | struct siginfo info; |
65 | /* New 64 bit little-endian ABI allows redzone of 512 bytes below sp */ |
66 | char abigap[USER_REDZONE_SIZE]; |
67 | } __attribute__ ((aligned (16))); |
68 | |
69 | unsigned long get_min_sigframe_size_64(void) |
70 | { |
71 | return sizeof(struct rt_sigframe) + __SIGNAL_FRAMESIZE; |
72 | } |
73 | |
74 | /* |
75 | * This computes a quad word aligned pointer inside the vmx_reserve array |
76 | * element. For historical reasons sigcontext might not be quad word aligned, |
77 | * but the location we write the VMX regs to must be. See the comment in |
78 | * sigcontext for more detail. |
79 | */ |
80 | #ifdef CONFIG_ALTIVEC |
81 | static elf_vrreg_t __user *sigcontext_vmx_regs(struct sigcontext __user *sc) |
82 | { |
83 | return (elf_vrreg_t __user *) (((unsigned long)sc->vmx_reserve + 15) & ~0xful); |
84 | } |
85 | #endif |
86 | |
87 | static void prepare_setup_sigcontext(struct task_struct *tsk) |
88 | { |
89 | #ifdef CONFIG_ALTIVEC |
90 | /* save altivec registers */ |
91 | if (tsk->thread.used_vr) |
92 | flush_altivec_to_thread(tsk); |
93 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) |
94 | tsk->thread.vrsave = mfspr(SPRN_VRSAVE); |
95 | #endif /* CONFIG_ALTIVEC */ |
96 | |
97 | flush_fp_to_thread(tsk); |
98 | |
99 | #ifdef CONFIG_VSX |
100 | if (tsk->thread.used_vsr) |
101 | flush_vsx_to_thread(tsk); |
102 | #endif /* CONFIG_VSX */ |
103 | } |
104 | |
105 | /* |
106 | * Set up the sigcontext for the signal frame. |
107 | */ |
108 | |
109 | #define unsafe_setup_sigcontext(sc, tsk, signr, set, handler, ctx_has_vsx_region, label)\ |
110 | do { \ |
111 | if (__unsafe_setup_sigcontext(sc, tsk, signr, set, handler, ctx_has_vsx_region))\ |
112 | goto label; \ |
113 | } while (0) |
114 | static long notrace __unsafe_setup_sigcontext(struct sigcontext __user *sc, |
115 | struct task_struct *tsk, int signr, sigset_t *set, |
116 | unsigned long handler, int ctx_has_vsx_region) |
117 | { |
118 | /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the |
119 | * process never used altivec yet (MSR_VEC is zero in pt_regs of |
120 | * the context). This is very important because we must ensure we |
121 | * don't lose the VRSAVE content that may have been set prior to |
122 | * the process doing its first vector operation |
123 | * Userland shall check AT_HWCAP to know whether it can rely on the |
124 | * v_regs pointer or not |
125 | */ |
126 | #ifdef CONFIG_ALTIVEC |
127 | elf_vrreg_t __user *v_regs = sigcontext_vmx_regs(sc); |
128 | #endif |
129 | struct pt_regs *regs = tsk->thread.regs; |
130 | unsigned long msr = regs->msr; |
131 | /* Force usr to always see softe as 1 (interrupts enabled) */ |
132 | unsigned long softe = 0x1; |
133 | |
134 | BUG_ON(tsk != current); |
135 | |
136 | #ifdef CONFIG_ALTIVEC |
137 | unsafe_put_user(v_regs, &sc->v_regs, efault_out); |
138 | |
139 | /* save altivec registers */ |
140 | if (tsk->thread.used_vr) { |
141 | /* Copy 33 vec registers (vr0..31 and vscr) to the stack */ |
142 | unsafe_copy_to_user(v_regs, &tsk->thread.vr_state, |
143 | 33 * sizeof(vector128), efault_out); |
144 | /* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg) |
145 | * contains valid data. |
146 | */ |
147 | msr |= MSR_VEC; |
148 | } |
149 | /* We always copy to/from vrsave, it's 0 if we don't have or don't |
150 | * use altivec. |
151 | */ |
152 | unsafe_put_user(tsk->thread.vrsave, (u32 __user *)&v_regs[33], efault_out); |
153 | #else /* CONFIG_ALTIVEC */ |
154 | unsafe_put_user(0, &sc->v_regs, efault_out); |
155 | #endif /* CONFIG_ALTIVEC */ |
156 | /* copy fpr regs and fpscr */ |
157 | unsafe_copy_fpr_to_user(&sc->fp_regs, tsk, efault_out); |
158 | |
159 | /* |
160 | * Clear the MSR VSX bit to indicate there is no valid state attached |
161 | * to this context, except in the specific case below where we set it. |
162 | */ |
163 | msr &= ~MSR_VSX; |
164 | #ifdef CONFIG_VSX |
165 | /* |
166 | * Copy VSX low doubleword to local buffer for formatting, |
167 | * then out to userspace. Update v_regs to point after the |
168 | * VMX data. |
169 | */ |
170 | if (tsk->thread.used_vsr && ctx_has_vsx_region) { |
171 | v_regs += ELF_NVRREG; |
172 | unsafe_copy_vsx_to_user(v_regs, tsk, efault_out); |
173 | /* set MSR_VSX in the MSR value in the frame to |
174 | * indicate that sc->vs_reg) contains valid data. |
175 | */ |
176 | msr |= MSR_VSX; |
177 | } |
178 | #endif /* CONFIG_VSX */ |
179 | unsafe_put_user(&sc->gp_regs, &sc->regs, efault_out); |
180 | unsafe_copy_to_user(&sc->gp_regs, regs, GP_REGS_SIZE, efault_out); |
181 | unsafe_put_user(msr, &sc->gp_regs[PT_MSR], efault_out); |
182 | unsafe_put_user(softe, &sc->gp_regs[PT_SOFTE], efault_out); |
183 | unsafe_put_user(signr, &sc->signal, efault_out); |
184 | unsafe_put_user(handler, &sc->handler, efault_out); |
185 | if (set != NULL) |
186 | unsafe_put_user(set->sig[0], &sc->oldmask, efault_out); |
187 | |
188 | return 0; |
189 | |
190 | efault_out: |
191 | return -EFAULT; |
192 | } |
193 | |
194 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
195 | /* |
196 | * As above, but Transactional Memory is in use, so deliver sigcontexts |
197 | * containing checkpointed and transactional register states. |
198 | * |
199 | * To do this, we treclaim (done before entering here) to gather both sets of |
200 | * registers and set up the 'normal' sigcontext registers with rolled-back |
201 | * register values such that a simple signal handler sees a correct |
202 | * checkpointed register state. If interested, a TM-aware sighandler can |
203 | * examine the transactional registers in the 2nd sigcontext to determine the |
204 | * real origin of the signal. |
205 | */ |
206 | static long setup_tm_sigcontexts(struct sigcontext __user *sc, |
207 | struct sigcontext __user *tm_sc, |
208 | struct task_struct *tsk, |
209 | int signr, sigset_t *set, unsigned long handler, |
210 | unsigned long msr) |
211 | { |
212 | /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the |
213 | * process never used altivec yet (MSR_VEC is zero in pt_regs of |
214 | * the context). This is very important because we must ensure we |
215 | * don't lose the VRSAVE content that may have been set prior to |
216 | * the process doing its first vector operation |
217 | * Userland shall check AT_HWCAP to know wether it can rely on the |
218 | * v_regs pointer or not. |
219 | */ |
220 | #ifdef CONFIG_ALTIVEC |
221 | elf_vrreg_t __user *v_regs = sigcontext_vmx_regs(sc); |
222 | elf_vrreg_t __user *tm_v_regs = sigcontext_vmx_regs(tm_sc); |
223 | #endif |
224 | struct pt_regs *regs = tsk->thread.regs; |
225 | long err = 0; |
226 | |
227 | BUG_ON(tsk != current); |
228 | |
229 | BUG_ON(!MSR_TM_ACTIVE(msr)); |
230 | |
231 | WARN_ON(tm_suspend_disabled); |
232 | |
233 | /* Restore checkpointed FP, VEC, and VSX bits from ckpt_regs as |
234 | * it contains the correct FP, VEC, VSX state after we treclaimed |
235 | * the transaction and giveup_all() was called on reclaiming. |
236 | */ |
237 | msr |= tsk->thread.ckpt_regs.msr & (MSR_FP | MSR_VEC | MSR_VSX); |
238 | |
239 | #ifdef CONFIG_ALTIVEC |
240 | err |= __put_user(v_regs, &sc->v_regs); |
241 | err |= __put_user(tm_v_regs, &tm_sc->v_regs); |
242 | |
243 | /* save altivec registers */ |
244 | if (tsk->thread.used_vr) { |
245 | /* Copy 33 vec registers (vr0..31 and vscr) to the stack */ |
246 | err |= __copy_to_user(v_regs, &tsk->thread.ckvr_state, |
247 | 33 * sizeof(vector128)); |
248 | /* If VEC was enabled there are transactional VRs valid too, |
249 | * else they're a copy of the checkpointed VRs. |
250 | */ |
251 | if (msr & MSR_VEC) |
252 | err |= __copy_to_user(tm_v_regs, |
253 | &tsk->thread.vr_state, |
254 | 33 * sizeof(vector128)); |
255 | else |
256 | err |= __copy_to_user(tm_v_regs, |
257 | &tsk->thread.ckvr_state, |
258 | 33 * sizeof(vector128)); |
259 | |
260 | /* set MSR_VEC in the MSR value in the frame to indicate |
261 | * that sc->v_reg contains valid data. |
262 | */ |
263 | msr |= MSR_VEC; |
264 | } |
265 | /* We always copy to/from vrsave, it's 0 if we don't have or don't |
266 | * use altivec. |
267 | */ |
268 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) |
269 | tsk->thread.ckvrsave = mfspr(SPRN_VRSAVE); |
270 | err |= __put_user(tsk->thread.ckvrsave, (u32 __user *)&v_regs[33]); |
271 | if (msr & MSR_VEC) |
272 | err |= __put_user(tsk->thread.vrsave, |
273 | (u32 __user *)&tm_v_regs[33]); |
274 | else |
275 | err |= __put_user(tsk->thread.ckvrsave, |
276 | (u32 __user *)&tm_v_regs[33]); |
277 | |
278 | #else /* CONFIG_ALTIVEC */ |
279 | err |= __put_user(0, &sc->v_regs); |
280 | err |= __put_user(0, &tm_sc->v_regs); |
281 | #endif /* CONFIG_ALTIVEC */ |
282 | |
283 | /* copy fpr regs and fpscr */ |
284 | err |= copy_ckfpr_to_user(&sc->fp_regs, tsk); |
285 | if (msr & MSR_FP) |
286 | err |= copy_fpr_to_user(&tm_sc->fp_regs, tsk); |
287 | else |
288 | err |= copy_ckfpr_to_user(&tm_sc->fp_regs, tsk); |
289 | |
290 | #ifdef CONFIG_VSX |
291 | /* |
292 | * Copy VSX low doubleword to local buffer for formatting, |
293 | * then out to userspace. Update v_regs to point after the |
294 | * VMX data. |
295 | */ |
296 | if (tsk->thread.used_vsr) { |
297 | v_regs += ELF_NVRREG; |
298 | tm_v_regs += ELF_NVRREG; |
299 | |
300 | err |= copy_ckvsx_to_user(v_regs, tsk); |
301 | |
302 | if (msr & MSR_VSX) |
303 | err |= copy_vsx_to_user(tm_v_regs, tsk); |
304 | else |
305 | err |= copy_ckvsx_to_user(tm_v_regs, tsk); |
306 | |
307 | /* set MSR_VSX in the MSR value in the frame to |
308 | * indicate that sc->vs_reg) contains valid data. |
309 | */ |
310 | msr |= MSR_VSX; |
311 | } |
312 | #endif /* CONFIG_VSX */ |
313 | |
314 | err |= __put_user(&sc->gp_regs, &sc->regs); |
315 | err |= __put_user(&tm_sc->gp_regs, &tm_sc->regs); |
316 | err |= __copy_to_user(&tm_sc->gp_regs, regs, GP_REGS_SIZE); |
317 | err |= __copy_to_user(&sc->gp_regs, |
318 | &tsk->thread.ckpt_regs, GP_REGS_SIZE); |
319 | err |= __put_user(msr, &tm_sc->gp_regs[PT_MSR]); |
320 | err |= __put_user(msr, &sc->gp_regs[PT_MSR]); |
321 | err |= __put_user(signr, &sc->signal); |
322 | err |= __put_user(handler, &sc->handler); |
323 | if (set != NULL) |
324 | err |= __put_user(set->sig[0], &sc->oldmask); |
325 | |
326 | return err; |
327 | } |
328 | #endif |
329 | |
330 | /* |
331 | * Restore the sigcontext from the signal frame. |
332 | */ |
333 | #define unsafe_restore_sigcontext(tsk, set, sig, sc, label) do { \ |
334 | if (__unsafe_restore_sigcontext(tsk, set, sig, sc)) \ |
335 | goto label; \ |
336 | } while (0) |
337 | static long notrace __unsafe_restore_sigcontext(struct task_struct *tsk, sigset_t *set, |
338 | int sig, struct sigcontext __user *sc) |
339 | { |
340 | #ifdef CONFIG_ALTIVEC |
341 | elf_vrreg_t __user *v_regs; |
342 | #endif |
343 | unsigned long save_r13 = 0; |
344 | unsigned long msr; |
345 | struct pt_regs *regs = tsk->thread.regs; |
346 | #ifdef CONFIG_VSX |
347 | int i; |
348 | #endif |
349 | |
350 | BUG_ON(tsk != current); |
351 | |
352 | /* If this is not a signal return, we preserve the TLS in r13 */ |
353 | if (!sig) |
354 | save_r13 = regs->gpr[13]; |
355 | |
356 | /* copy the GPRs */ |
357 | unsafe_copy_from_user(regs->gpr, sc->gp_regs, sizeof(regs->gpr), efault_out); |
358 | unsafe_get_user(regs->nip, &sc->gp_regs[PT_NIP], efault_out); |
359 | /* get MSR separately, transfer the LE bit if doing signal return */ |
360 | unsafe_get_user(msr, &sc->gp_regs[PT_MSR], efault_out); |
361 | if (sig) |
362 | regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE)); |
363 | unsafe_get_user(regs->orig_gpr3, &sc->gp_regs[PT_ORIG_R3], efault_out); |
364 | unsafe_get_user(regs->ctr, &sc->gp_regs[PT_CTR], efault_out); |
365 | unsafe_get_user(regs->link, &sc->gp_regs[PT_LNK], efault_out); |
366 | unsafe_get_user(regs->xer, &sc->gp_regs[PT_XER], efault_out); |
367 | unsafe_get_user(regs->ccr, &sc->gp_regs[PT_CCR], efault_out); |
368 | /* Don't allow userspace to set SOFTE */ |
369 | set_trap_norestart(regs); |
370 | unsafe_get_user(regs->dar, &sc->gp_regs[PT_DAR], efault_out); |
371 | unsafe_get_user(regs->dsisr, &sc->gp_regs[PT_DSISR], efault_out); |
372 | unsafe_get_user(regs->result, &sc->gp_regs[PT_RESULT], efault_out); |
373 | |
374 | if (!sig) |
375 | regs->gpr[13] = save_r13; |
376 | if (set != NULL) |
377 | unsafe_get_user(set->sig[0], &sc->oldmask, efault_out); |
378 | |
379 | /* |
380 | * Force reload of FP/VEC/VSX so userspace sees any changes. |
381 | * Clear these bits from the user process' MSR before copying into the |
382 | * thread struct. If we are rescheduled or preempted and another task |
383 | * uses FP/VEC/VSX, and this process has the MSR bits set, then the |
384 | * context switch code will save the current CPU state into the |
385 | * thread_struct - possibly overwriting the data we are updating here. |
386 | */ |
387 | regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX)); |
388 | |
389 | #ifdef CONFIG_ALTIVEC |
390 | unsafe_get_user(v_regs, &sc->v_regs, efault_out); |
391 | if (v_regs && !access_ok(v_regs, 34 * sizeof(vector128))) |
392 | return -EFAULT; |
393 | /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ |
394 | if (v_regs != NULL && (msr & MSR_VEC) != 0) { |
395 | unsafe_copy_from_user(&tsk->thread.vr_state, v_regs, |
396 | 33 * sizeof(vector128), efault_out); |
397 | tsk->thread.used_vr = true; |
398 | } else if (tsk->thread.used_vr) { |
399 | memset(&tsk->thread.vr_state, 0, 33 * sizeof(vector128)); |
400 | } |
401 | /* Always get VRSAVE back */ |
402 | if (v_regs != NULL) |
403 | unsafe_get_user(tsk->thread.vrsave, (u32 __user *)&v_regs[33], efault_out); |
404 | else |
405 | tsk->thread.vrsave = 0; |
406 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) |
407 | mtspr(SPRN_VRSAVE, tsk->thread.vrsave); |
408 | #endif /* CONFIG_ALTIVEC */ |
409 | /* restore floating point */ |
410 | unsafe_copy_fpr_from_user(tsk, &sc->fp_regs, efault_out); |
411 | #ifdef CONFIG_VSX |
412 | /* |
413 | * Get additional VSX data. Update v_regs to point after the |
414 | * VMX data. Copy VSX low doubleword from userspace to local |
415 | * buffer for formatting, then into the taskstruct. |
416 | */ |
417 | v_regs += ELF_NVRREG; |
418 | if ((msr & MSR_VSX) != 0) { |
419 | unsafe_copy_vsx_from_user(tsk, v_regs, efault_out); |
420 | tsk->thread.used_vsr = true; |
421 | } else { |
422 | for (i = 0; i < 32 ; i++) |
423 | tsk->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; |
424 | } |
425 | #endif |
426 | return 0; |
427 | |
428 | efault_out: |
429 | return -EFAULT; |
430 | } |
431 | |
432 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
433 | /* |
434 | * Restore the two sigcontexts from the frame of a transactional processes. |
435 | */ |
436 | |
437 | static long restore_tm_sigcontexts(struct task_struct *tsk, |
438 | struct sigcontext __user *sc, |
439 | struct sigcontext __user *tm_sc) |
440 | { |
441 | #ifdef CONFIG_ALTIVEC |
442 | elf_vrreg_t __user *v_regs, *tm_v_regs; |
443 | #endif |
444 | unsigned long err = 0; |
445 | unsigned long msr; |
446 | struct pt_regs *regs = tsk->thread.regs; |
447 | #ifdef CONFIG_VSX |
448 | int i; |
449 | #endif |
450 | |
451 | BUG_ON(tsk != current); |
452 | |
453 | if (tm_suspend_disabled) |
454 | return -EINVAL; |
455 | |
456 | /* copy the GPRs */ |
457 | err |= __copy_from_user(regs->gpr, tm_sc->gp_regs, sizeof(regs->gpr)); |
458 | err |= __copy_from_user(&tsk->thread.ckpt_regs, sc->gp_regs, |
459 | sizeof(regs->gpr)); |
460 | |
461 | /* |
462 | * TFHAR is restored from the checkpointed 'wound-back' ucontext's NIP. |
463 | * TEXASR was set by the signal delivery reclaim, as was TFIAR. |
464 | * Users doing anything abhorrent like thread-switching w/ signals for |
465 | * TM-Suspended code will have to back TEXASR/TFIAR up themselves. |
466 | * For the case of getting a signal and simply returning from it, |
467 | * we don't need to re-copy them here. |
468 | */ |
469 | err |= __get_user(regs->nip, &tm_sc->gp_regs[PT_NIP]); |
470 | err |= __get_user(tsk->thread.tm_tfhar, &sc->gp_regs[PT_NIP]); |
471 | |
472 | /* get MSR separately, transfer the LE bit if doing signal return */ |
473 | err |= __get_user(msr, &sc->gp_regs[PT_MSR]); |
474 | /* Don't allow reserved mode. */ |
475 | if (MSR_TM_RESV(msr)) |
476 | return -EINVAL; |
477 | |
478 | /* pull in MSR LE from user context */ |
479 | regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE)); |
480 | |
481 | /* The following non-GPR non-FPR non-VR state is also checkpointed: */ |
482 | err |= __get_user(regs->ctr, &tm_sc->gp_regs[PT_CTR]); |
483 | err |= __get_user(regs->link, &tm_sc->gp_regs[PT_LNK]); |
484 | err |= __get_user(regs->xer, &tm_sc->gp_regs[PT_XER]); |
485 | err |= __get_user(regs->ccr, &tm_sc->gp_regs[PT_CCR]); |
486 | err |= __get_user(tsk->thread.ckpt_regs.ctr, |
487 | &sc->gp_regs[PT_CTR]); |
488 | err |= __get_user(tsk->thread.ckpt_regs.link, |
489 | &sc->gp_regs[PT_LNK]); |
490 | err |= __get_user(tsk->thread.ckpt_regs.xer, |
491 | &sc->gp_regs[PT_XER]); |
492 | err |= __get_user(tsk->thread.ckpt_regs.ccr, |
493 | &sc->gp_regs[PT_CCR]); |
494 | /* Don't allow userspace to set SOFTE */ |
495 | set_trap_norestart(regs); |
496 | /* These regs are not checkpointed; they can go in 'regs'. */ |
497 | err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]); |
498 | err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]); |
499 | err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]); |
500 | |
501 | /* |
502 | * Force reload of FP/VEC. |
503 | * This has to be done before copying stuff into tsk->thread.fpr/vr |
504 | * for the reasons explained in the previous comment. |
505 | */ |
506 | regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX)); |
507 | |
508 | #ifdef CONFIG_ALTIVEC |
509 | err |= __get_user(v_regs, &sc->v_regs); |
510 | err |= __get_user(tm_v_regs, &tm_sc->v_regs); |
511 | if (err) |
512 | return err; |
513 | if (v_regs && !access_ok(v_regs, 34 * sizeof(vector128))) |
514 | return -EFAULT; |
515 | if (tm_v_regs && !access_ok(tm_v_regs, 34 * sizeof(vector128))) |
516 | return -EFAULT; |
517 | /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ |
518 | if (v_regs != NULL && tm_v_regs != NULL && (msr & MSR_VEC) != 0) { |
519 | err |= __copy_from_user(&tsk->thread.ckvr_state, v_regs, |
520 | 33 * sizeof(vector128)); |
521 | err |= __copy_from_user(&tsk->thread.vr_state, tm_v_regs, |
522 | 33 * sizeof(vector128)); |
523 | current->thread.used_vr = true; |
524 | } |
525 | else if (tsk->thread.used_vr) { |
526 | memset(&tsk->thread.vr_state, 0, 33 * sizeof(vector128)); |
527 | memset(&tsk->thread.ckvr_state, 0, 33 * sizeof(vector128)); |
528 | } |
529 | /* Always get VRSAVE back */ |
530 | if (v_regs != NULL && tm_v_regs != NULL) { |
531 | err |= __get_user(tsk->thread.ckvrsave, |
532 | (u32 __user *)&v_regs[33]); |
533 | err |= __get_user(tsk->thread.vrsave, |
534 | (u32 __user *)&tm_v_regs[33]); |
535 | } |
536 | else { |
537 | tsk->thread.vrsave = 0; |
538 | tsk->thread.ckvrsave = 0; |
539 | } |
540 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) |
541 | mtspr(SPRN_VRSAVE, tsk->thread.vrsave); |
542 | #endif /* CONFIG_ALTIVEC */ |
543 | /* restore floating point */ |
544 | err |= copy_fpr_from_user(tsk, &tm_sc->fp_regs); |
545 | err |= copy_ckfpr_from_user(tsk, &sc->fp_regs); |
546 | #ifdef CONFIG_VSX |
547 | /* |
548 | * Get additional VSX data. Update v_regs to point after the |
549 | * VMX data. Copy VSX low doubleword from userspace to local |
550 | * buffer for formatting, then into the taskstruct. |
551 | */ |
552 | if (v_regs && ((msr & MSR_VSX) != 0)) { |
553 | v_regs += ELF_NVRREG; |
554 | tm_v_regs += ELF_NVRREG; |
555 | err |= copy_vsx_from_user(tsk, tm_v_regs); |
556 | err |= copy_ckvsx_from_user(tsk, v_regs); |
557 | tsk->thread.used_vsr = true; |
558 | } else { |
559 | for (i = 0; i < 32 ; i++) { |
560 | tsk->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; |
561 | tsk->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0; |
562 | } |
563 | } |
564 | #endif |
565 | tm_enable(); |
566 | /* Make sure the transaction is marked as failed */ |
567 | tsk->thread.tm_texasr |= TEXASR_FS; |
568 | |
569 | /* |
570 | * Disabling preemption, since it is unsafe to be preempted |
571 | * with MSR[TS] set without recheckpointing. |
572 | */ |
573 | preempt_disable(); |
574 | |
575 | /* pull in MSR TS bits from user context */ |
576 | regs_set_return_msr(regs, regs->msr | (msr & MSR_TS_MASK)); |
577 | |
578 | /* |
579 | * Ensure that TM is enabled in regs->msr before we leave the signal |
580 | * handler. It could be the case that (a) user disabled the TM bit |
581 | * through the manipulation of the MSR bits in uc_mcontext or (b) the |
582 | * TM bit was disabled because a sufficient number of context switches |
583 | * happened whilst in the signal handler and load_tm overflowed, |
584 | * disabling the TM bit. In either case we can end up with an illegal |
585 | * TM state leading to a TM Bad Thing when we return to userspace. |
586 | * |
587 | * CAUTION: |
588 | * After regs->MSR[TS] being updated, make sure that get_user(), |
589 | * put_user() or similar functions are *not* called. These |
590 | * functions can generate page faults which will cause the process |
591 | * to be de-scheduled with MSR[TS] set but without calling |
592 | * tm_recheckpoint(). This can cause a bug. |
593 | */ |
594 | regs_set_return_msr(regs, regs->msr | MSR_TM); |
595 | |
596 | /* This loads the checkpointed FP/VEC state, if used */ |
597 | tm_recheckpoint(&tsk->thread); |
598 | |
599 | msr_check_and_set(msr & (MSR_FP | MSR_VEC)); |
600 | if (msr & MSR_FP) { |
601 | load_fp_state(&tsk->thread.fp_state); |
602 | regs_set_return_msr(regs, regs->msr | (MSR_FP | tsk->thread.fpexc_mode)); |
603 | } |
604 | if (msr & MSR_VEC) { |
605 | load_vr_state(&tsk->thread.vr_state); |
606 | regs_set_return_msr(regs, regs->msr | MSR_VEC); |
607 | } |
608 | |
609 | preempt_enable(); |
610 | |
611 | return err; |
612 | } |
613 | #else /* !CONFIG_PPC_TRANSACTIONAL_MEM */ |
614 | static long restore_tm_sigcontexts(struct task_struct *tsk, struct sigcontext __user *sc, |
615 | struct sigcontext __user *tm_sc) |
616 | { |
617 | return -EINVAL; |
618 | } |
619 | #endif |
620 | |
621 | /* |
622 | * Setup the trampoline code on the stack |
623 | */ |
624 | static long setup_trampoline(unsigned int syscall, unsigned int __user *tramp) |
625 | { |
626 | int i; |
627 | long err = 0; |
628 | |
629 | /* Call the handler and pop the dummy stackframe*/ |
630 | err |= __put_user(PPC_RAW_BCTRL(), &tramp[0]); |
631 | err |= __put_user(PPC_RAW_ADDI(_R1, _R1, __SIGNAL_FRAMESIZE), &tramp[1]); |
632 | |
633 | err |= __put_user(PPC_RAW_LI(_R0, syscall), &tramp[2]); |
634 | err |= __put_user(PPC_RAW_SC(), &tramp[3]); |
635 | |
636 | /* Minimal traceback info */ |
637 | for (i=TRAMP_TRACEBACK; i < TRAMP_SIZE ;i++) |
638 | err |= __put_user(0, &tramp[i]); |
639 | |
640 | if (!err) |
641 | flush_icache_range(start: (unsigned long) &tramp[0], |
642 | end: (unsigned long) &tramp[TRAMP_SIZE]); |
643 | |
644 | return err; |
645 | } |
646 | |
647 | /* |
648 | * Userspace code may pass a ucontext which doesn't include VSX added |
649 | * at the end. We need to check for this case. |
650 | */ |
651 | #define UCONTEXTSIZEWITHOUTVSX \ |
652 | (sizeof(struct ucontext) - 32*sizeof(long)) |
653 | |
654 | /* |
655 | * Handle {get,set,swap}_context operations |
656 | */ |
657 | SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx, |
658 | struct ucontext __user *, new_ctx, long, ctx_size) |
659 | { |
660 | sigset_t set; |
661 | unsigned long new_msr = 0; |
662 | int ctx_has_vsx_region = 0; |
663 | |
664 | if (new_ctx && |
665 | get_user(new_msr, &new_ctx->uc_mcontext.gp_regs[PT_MSR])) |
666 | return -EFAULT; |
667 | /* |
668 | * Check that the context is not smaller than the original |
669 | * size (with VMX but without VSX) |
670 | */ |
671 | if (ctx_size < UCONTEXTSIZEWITHOUTVSX) |
672 | return -EINVAL; |
673 | /* |
674 | * If the new context state sets the MSR VSX bits but |
675 | * it doesn't provide VSX state. |
676 | */ |
677 | if ((ctx_size < sizeof(struct ucontext)) && |
678 | (new_msr & MSR_VSX)) |
679 | return -EINVAL; |
680 | /* Does the context have enough room to store VSX data? */ |
681 | if (ctx_size >= sizeof(struct ucontext)) |
682 | ctx_has_vsx_region = 1; |
683 | |
684 | if (old_ctx != NULL) { |
685 | prepare_setup_sigcontext(current); |
686 | if (!user_write_access_begin(old_ctx, ctx_size)) |
687 | return -EFAULT; |
688 | |
689 | unsafe_setup_sigcontext(&old_ctx->uc_mcontext, current, 0, NULL, |
690 | 0, ctx_has_vsx_region, efault_out); |
691 | unsafe_copy_to_user(&old_ctx->uc_sigmask, ¤t->blocked, |
692 | sizeof(sigset_t), efault_out); |
693 | |
694 | user_write_access_end(); |
695 | } |
696 | if (new_ctx == NULL) |
697 | return 0; |
698 | if (!access_ok(new_ctx, ctx_size) || |
699 | fault_in_readable(uaddr: (char __user *)new_ctx, size: ctx_size)) |
700 | return -EFAULT; |
701 | |
702 | /* |
703 | * If we get a fault copying the context into the kernel's |
704 | * image of the user's registers, we can't just return -EFAULT |
705 | * because the user's registers will be corrupted. For instance |
706 | * the NIP value may have been updated but not some of the |
707 | * other registers. Given that we have done the access_ok |
708 | * and successfully read the first and last bytes of the region |
709 | * above, this should only happen in an out-of-memory situation |
710 | * or if another thread unmaps the region containing the context. |
711 | * We kill the task with a SIGSEGV in this situation. |
712 | */ |
713 | |
714 | if (__get_user_sigset(dst: &set, src: &new_ctx->uc_sigmask)) { |
715 | force_exit_sig(SIGSEGV); |
716 | return -EFAULT; |
717 | } |
718 | set_current_blocked(&set); |
719 | |
720 | if (!user_read_access_begin(new_ctx, ctx_size)) |
721 | return -EFAULT; |
722 | if (__unsafe_restore_sigcontext(current, NULL, sig: 0, sc: &new_ctx->uc_mcontext)) { |
723 | user_read_access_end(); |
724 | force_exit_sig(SIGSEGV); |
725 | return -EFAULT; |
726 | } |
727 | user_read_access_end(); |
728 | |
729 | /* This returns like rt_sigreturn */ |
730 | set_thread_flag(TIF_RESTOREALL); |
731 | |
732 | return 0; |
733 | |
734 | efault_out: |
735 | user_write_access_end(); |
736 | return -EFAULT; |
737 | } |
738 | |
739 | |
740 | /* |
741 | * Do a signal return; undo the signal stack. |
742 | */ |
743 | |
744 | SYSCALL_DEFINE0(rt_sigreturn) |
745 | { |
746 | struct pt_regs *regs = current_pt_regs(); |
747 | struct ucontext __user *uc = (struct ucontext __user *)regs->gpr[1]; |
748 | sigset_t set; |
749 | unsigned long msr; |
750 | |
751 | /* Always make any pending restarted system calls return -EINTR */ |
752 | current->restart_block.fn = do_no_restart_syscall; |
753 | |
754 | if (!access_ok(uc, sizeof(*uc))) |
755 | goto badframe; |
756 | |
757 | if (__get_user_sigset(dst: &set, src: &uc->uc_sigmask)) |
758 | goto badframe; |
759 | set_current_blocked(&set); |
760 | |
761 | if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM)) { |
762 | /* |
763 | * If there is a transactional state then throw it away. |
764 | * The purpose of a sigreturn is to destroy all traces of the |
765 | * signal frame, this includes any transactional state created |
766 | * within in. We only check for suspended as we can never be |
767 | * active in the kernel, we are active, there is nothing better to |
768 | * do than go ahead and Bad Thing later. |
769 | * The cause is not important as there will never be a |
770 | * recheckpoint so it's not user visible. |
771 | */ |
772 | if (MSR_TM_SUSPENDED(mfmsr())) |
773 | tm_reclaim_current(0); |
774 | |
775 | /* |
776 | * Disable MSR[TS] bit also, so, if there is an exception in the |
777 | * code below (as a page fault in copy_ckvsx_to_user()), it does |
778 | * not recheckpoint this task if there was a context switch inside |
779 | * the exception. |
780 | * |
781 | * A major page fault can indirectly call schedule(). A reschedule |
782 | * process in the middle of an exception can have a side effect |
783 | * (Changing the CPU MSR[TS] state), since schedule() is called |
784 | * with the CPU MSR[TS] disable and returns with MSR[TS]=Suspended |
785 | * (switch_to() calls tm_recheckpoint() for the 'new' process). In |
786 | * this case, the process continues to be the same in the CPU, but |
787 | * the CPU state just changed. |
788 | * |
789 | * This can cause a TM Bad Thing, since the MSR in the stack will |
790 | * have the MSR[TS]=0, and this is what will be used to RFID. |
791 | * |
792 | * Clearing MSR[TS] state here will avoid a recheckpoint if there |
793 | * is any process reschedule in kernel space. The MSR[TS] state |
794 | * does not need to be saved also, since it will be replaced with |
795 | * the MSR[TS] that came from user context later, at |
796 | * restore_tm_sigcontexts. |
797 | */ |
798 | regs_set_return_msr(regs, regs->msr & ~MSR_TS_MASK); |
799 | |
800 | if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR])) |
801 | goto badframe; |
802 | } |
803 | |
804 | if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) && MSR_TM_ACTIVE(msr)) { |
805 | /* We recheckpoint on return. */ |
806 | struct ucontext __user *uc_transact; |
807 | |
808 | /* Trying to start TM on non TM system */ |
809 | if (!cpu_has_feature(CPU_FTR_TM)) |
810 | goto badframe; |
811 | |
812 | if (__get_user(uc_transact, &uc->uc_link)) |
813 | goto badframe; |
814 | if (restore_tm_sigcontexts(current, sc: &uc->uc_mcontext, |
815 | tm_sc: &uc_transact->uc_mcontext)) |
816 | goto badframe; |
817 | } else { |
818 | /* |
819 | * Fall through, for non-TM restore |
820 | * |
821 | * Unset MSR[TS] on the thread regs since MSR from user |
822 | * context does not have MSR active, and recheckpoint was |
823 | * not called since restore_tm_sigcontexts() was not called |
824 | * also. |
825 | * |
826 | * If not unsetting it, the code can RFID to userspace with |
827 | * MSR[TS] set, but without CPU in the proper state, |
828 | * causing a TM bad thing. |
829 | */ |
830 | regs_set_return_msr(current->thread.regs, |
831 | current->thread.regs->msr & ~MSR_TS_MASK); |
832 | if (!user_read_access_begin(&uc->uc_mcontext, sizeof(uc->uc_mcontext))) |
833 | goto badframe; |
834 | |
835 | unsafe_restore_sigcontext(current, NULL, 1, &uc->uc_mcontext, |
836 | badframe_block); |
837 | |
838 | user_read_access_end(); |
839 | } |
840 | |
841 | if (restore_altstack(&uc->uc_stack)) |
842 | goto badframe; |
843 | |
844 | set_thread_flag(TIF_RESTOREALL); |
845 | |
846 | return 0; |
847 | |
848 | badframe_block: |
849 | user_read_access_end(); |
850 | badframe: |
851 | signal_fault(current, regs, where: "rt_sigreturn" , ptr: uc); |
852 | |
853 | force_sig(SIGSEGV); |
854 | return 0; |
855 | } |
856 | |
857 | int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, |
858 | struct task_struct *tsk) |
859 | { |
860 | struct rt_sigframe __user *frame; |
861 | unsigned long newsp = 0; |
862 | long err = 0; |
863 | struct pt_regs *regs = tsk->thread.regs; |
864 | /* Save the thread's msr before get_tm_stackpointer() changes it */ |
865 | unsigned long msr = regs->msr; |
866 | |
867 | frame = get_sigframe(ksig, tsk, frame_size: sizeof(*frame), is_32: 0); |
868 | |
869 | /* |
870 | * This only applies when calling unsafe_setup_sigcontext() and must be |
871 | * called before opening the uaccess window. |
872 | */ |
873 | if (!MSR_TM_ACTIVE(msr)) |
874 | prepare_setup_sigcontext(tsk); |
875 | |
876 | if (!user_write_access_begin(frame, sizeof(*frame))) |
877 | goto badframe; |
878 | |
879 | unsafe_put_user(&frame->info, &frame->pinfo, badframe_block); |
880 | unsafe_put_user(&frame->uc, &frame->puc, badframe_block); |
881 | |
882 | /* Create the ucontext. */ |
883 | unsafe_put_user(0, &frame->uc.uc_flags, badframe_block); |
884 | unsafe_save_altstack(&frame->uc.uc_stack, regs->gpr[1], badframe_block); |
885 | |
886 | if (MSR_TM_ACTIVE(msr)) { |
887 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
888 | /* The ucontext_t passed to userland points to the second |
889 | * ucontext_t (for transactional state) with its uc_link ptr. |
890 | */ |
891 | unsafe_put_user(&frame->uc_transact, &frame->uc.uc_link, badframe_block); |
892 | |
893 | user_write_access_end(); |
894 | |
895 | err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext, |
896 | &frame->uc_transact.uc_mcontext, |
897 | tsk, ksig->sig, NULL, |
898 | (unsigned long)ksig->ka.sa.sa_handler, |
899 | msr); |
900 | |
901 | if (!user_write_access_begin(&frame->uc.uc_sigmask, |
902 | sizeof(frame->uc.uc_sigmask))) |
903 | goto badframe; |
904 | |
905 | #endif |
906 | } else { |
907 | unsafe_put_user(0, &frame->uc.uc_link, badframe_block); |
908 | unsafe_setup_sigcontext(&frame->uc.uc_mcontext, tsk, ksig->sig, |
909 | NULL, (unsigned long)ksig->ka.sa.sa_handler, |
910 | 1, badframe_block); |
911 | } |
912 | |
913 | unsafe_copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set), badframe_block); |
914 | user_write_access_end(); |
915 | |
916 | /* Save the siginfo outside of the unsafe block. */ |
917 | if (copy_siginfo_to_user(to: &frame->info, from: &ksig->info)) |
918 | goto badframe; |
919 | |
920 | /* Make sure signal handler doesn't get spurious FP exceptions */ |
921 | tsk->thread.fp_state.fpscr = 0; |
922 | |
923 | /* Set up to return from userspace. */ |
924 | if (tsk->mm->context.vdso) { |
925 | regs_set_return_ip(regs, VDSO64_SYMBOL(tsk->mm->context.vdso, sigtramp_rt64)); |
926 | } else { |
927 | err |= setup_trampoline(__NR_rt_sigreturn, tramp: &frame->tramp[0]); |
928 | if (err) |
929 | goto badframe; |
930 | regs_set_return_ip(regs, (unsigned long) &frame->tramp[0]); |
931 | } |
932 | |
933 | /* Allocate a dummy caller frame for the signal handler. */ |
934 | newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE; |
935 | err |= put_user(regs->gpr[1], (unsigned long __user *)newsp); |
936 | |
937 | /* Set up "regs" so we "return" to the signal handler. */ |
938 | if (is_elf2_task()) { |
939 | regs->ctr = (unsigned long) ksig->ka.sa.sa_handler; |
940 | regs->gpr[12] = regs->ctr; |
941 | } else { |
942 | /* Handler is *really* a pointer to the function descriptor for |
943 | * the signal routine. The first entry in the function |
944 | * descriptor is the entry address of signal and the second |
945 | * entry is the TOC value we need to use. |
946 | */ |
947 | struct func_desc __user *ptr = |
948 | (struct func_desc __user *)ksig->ka.sa.sa_handler; |
949 | |
950 | err |= get_user(regs->ctr, &ptr->addr); |
951 | err |= get_user(regs->gpr[2], &ptr->toc); |
952 | } |
953 | |
954 | /* enter the signal handler in native-endian mode */ |
955 | regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE)); |
956 | regs->gpr[1] = newsp; |
957 | regs->gpr[3] = ksig->sig; |
958 | regs->result = 0; |
959 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) { |
960 | regs->gpr[4] = (unsigned long)&frame->info; |
961 | regs->gpr[5] = (unsigned long)&frame->uc; |
962 | regs->gpr[6] = (unsigned long) frame; |
963 | } else { |
964 | regs->gpr[4] = (unsigned long)&frame->uc.uc_mcontext; |
965 | } |
966 | if (err) |
967 | goto badframe; |
968 | |
969 | return 0; |
970 | |
971 | badframe_block: |
972 | user_write_access_end(); |
973 | badframe: |
974 | signal_fault(current, regs, where: "handle_rt_signal64" , ptr: frame); |
975 | |
976 | return 1; |
977 | } |
978 | |