1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Kernel support for the ptrace() and syscall tracing interfaces. |
4 | * |
5 | * Copyright (C) 2000 Hewlett-Packard Co, Linuxcare Inc. |
6 | * Copyright (C) 2000 Matthew Wilcox <matthew@wil.cx> |
7 | * Copyright (C) 2000 David Huggins-Daines <dhd@debian.org> |
8 | * Copyright (C) 2008-2016 Helge Deller <deller@gmx.de> |
9 | */ |
10 | |
11 | #include <linux/kernel.h> |
12 | #include <linux/sched.h> |
13 | #include <linux/mm.h> |
14 | #include <linux/smp.h> |
15 | #include <linux/elf.h> |
16 | #include <linux/errno.h> |
17 | #include <linux/ptrace.h> |
18 | #include <linux/user.h> |
19 | #include <linux/personality.h> |
20 | #include <linux/regset.h> |
21 | #include <linux/security.h> |
22 | #include <linux/seccomp.h> |
23 | #include <linux/compat.h> |
24 | #include <linux/signal.h> |
25 | #include <linux/audit.h> |
26 | |
27 | #include <linux/uaccess.h> |
28 | #include <asm/processor.h> |
29 | #include <asm/asm-offsets.h> |
30 | |
31 | /* PSW bits we allow the debugger to modify */ |
32 | #define USER_PSW_BITS (PSW_N | PSW_B | PSW_V | PSW_CB) |
33 | |
34 | #define CREATE_TRACE_POINTS |
35 | #include <trace/events/syscalls.h> |
36 | |
37 | /* |
38 | * These are our native regset flavors. |
39 | */ |
40 | enum parisc_regset { |
41 | REGSET_GENERAL, |
42 | REGSET_FP |
43 | }; |
44 | |
45 | /* |
46 | * Called by kernel/ptrace.c when detaching.. |
47 | * |
48 | * Make sure single step bits etc are not set. |
49 | */ |
50 | void ptrace_disable(struct task_struct *task) |
51 | { |
52 | clear_tsk_thread_flag(tsk: task, TIF_SINGLESTEP); |
53 | clear_tsk_thread_flag(tsk: task, TIF_BLOCKSTEP); |
54 | |
55 | /* make sure the trap bits are not set */ |
56 | pa_psw(task)->r = 0; |
57 | pa_psw(task)->t = 0; |
58 | pa_psw(task)->h = 0; |
59 | pa_psw(task)->l = 0; |
60 | } |
61 | |
62 | /* |
63 | * The following functions are called by ptrace_resume() when |
64 | * enabling or disabling single/block tracing. |
65 | */ |
66 | void user_disable_single_step(struct task_struct *task) |
67 | { |
68 | ptrace_disable(task); |
69 | } |
70 | |
71 | void user_enable_single_step(struct task_struct *task) |
72 | { |
73 | clear_tsk_thread_flag(tsk: task, TIF_BLOCKSTEP); |
74 | set_tsk_thread_flag(tsk: task, TIF_SINGLESTEP); |
75 | |
76 | if (pa_psw(task)->n) { |
77 | /* Nullified, just crank over the queue. */ |
78 | task_regs(task)->iaoq[0] = task_regs(task)->iaoq[1]; |
79 | task_regs(task)->iasq[0] = task_regs(task)->iasq[1]; |
80 | task_regs(task)->iaoq[1] = task_regs(task)->iaoq[0] + 4; |
81 | pa_psw(task)->n = 0; |
82 | pa_psw(task)->x = 0; |
83 | pa_psw(task)->y = 0; |
84 | pa_psw(task)->z = 0; |
85 | pa_psw(task)->b = 0; |
86 | ptrace_disable(task); |
87 | /* Don't wake up the task, but let the |
88 | parent know something happened. */ |
89 | force_sig_fault_to_task(SIGTRAP, TRAP_TRACE, |
90 | addr: (void __user *) (task_regs(task)->iaoq[0] & ~3), |
91 | t: task); |
92 | /* notify_parent(task, SIGCHLD); */ |
93 | return; |
94 | } |
95 | |
96 | /* Enable recovery counter traps. The recovery counter |
97 | * itself will be set to zero on a task switch. If the |
98 | * task is suspended on a syscall then the syscall return |
99 | * path will overwrite the recovery counter with a suitable |
100 | * value such that it traps once back in user space. We |
101 | * disable interrupts in the tasks PSW here also, to avoid |
102 | * interrupts while the recovery counter is decrementing. |
103 | */ |
104 | pa_psw(task)->r = 1; |
105 | pa_psw(task)->t = 0; |
106 | pa_psw(task)->h = 0; |
107 | pa_psw(task)->l = 0; |
108 | } |
109 | |
110 | void user_enable_block_step(struct task_struct *task) |
111 | { |
112 | clear_tsk_thread_flag(tsk: task, TIF_SINGLESTEP); |
113 | set_tsk_thread_flag(tsk: task, TIF_BLOCKSTEP); |
114 | |
115 | /* Enable taken branch trap. */ |
116 | pa_psw(task)->r = 0; |
117 | pa_psw(task)->t = 1; |
118 | pa_psw(task)->h = 0; |
119 | pa_psw(task)->l = 0; |
120 | } |
121 | |
122 | long arch_ptrace(struct task_struct *child, long request, |
123 | unsigned long addr, unsigned long data) |
124 | { |
125 | unsigned long __user *datap = (unsigned long __user *)data; |
126 | unsigned long tmp; |
127 | long ret = -EIO; |
128 | |
129 | unsigned long user_regs_struct_size = sizeof(struct user_regs_struct); |
130 | #ifdef CONFIG_64BIT |
131 | if (is_compat_task()) |
132 | user_regs_struct_size /= 2; |
133 | #endif |
134 | |
135 | switch (request) { |
136 | |
137 | /* Read the word at location addr in the USER area. For ptraced |
138 | processes, the kernel saves all regs on a syscall. */ |
139 | case PTRACE_PEEKUSR: |
140 | if ((addr & (sizeof(unsigned long)-1)) || |
141 | addr >= sizeof(struct pt_regs)) |
142 | break; |
143 | tmp = *(unsigned long *) ((char *) task_regs(child) + addr); |
144 | ret = put_user(tmp, datap); |
145 | break; |
146 | |
147 | /* Write the word at location addr in the USER area. This will need |
148 | to change when the kernel no longer saves all regs on a syscall. |
149 | FIXME. There is a problem at the moment in that r3-r18 are only |
150 | saved if the process is ptraced on syscall entry, and even then |
151 | those values are overwritten by actual register values on syscall |
152 | exit. */ |
153 | case PTRACE_POKEUSR: |
154 | /* Some register values written here may be ignored in |
155 | * entry.S:syscall_restore_rfi; e.g. iaoq is written with |
156 | * r31/r31+4, and not with the values in pt_regs. |
157 | */ |
158 | if (addr == PT_PSW) { |
159 | /* Allow writing to Nullify, Divide-step-correction, |
160 | * and carry/borrow bits. |
161 | * BEWARE, if you set N, and then single step, it won't |
162 | * stop on the nullified instruction. |
163 | */ |
164 | data &= USER_PSW_BITS; |
165 | task_regs(child)->gr[0] &= ~USER_PSW_BITS; |
166 | task_regs(child)->gr[0] |= data; |
167 | ret = 0; |
168 | break; |
169 | } |
170 | |
171 | if ((addr & (sizeof(unsigned long)-1)) || |
172 | addr >= sizeof(struct pt_regs)) |
173 | break; |
174 | if (addr == PT_IAOQ0 || addr == PT_IAOQ1) { |
175 | data |= PRIV_USER; /* ensure userspace privilege */ |
176 | } |
177 | if ((addr >= PT_GR1 && addr <= PT_GR31) || |
178 | addr == PT_IAOQ0 || addr == PT_IAOQ1 || |
179 | (addr >= PT_FR0 && addr <= PT_FR31 + 4) || |
180 | addr == PT_SAR) { |
181 | *(unsigned long *) ((char *) task_regs(child) + addr) = data; |
182 | ret = 0; |
183 | } |
184 | break; |
185 | |
186 | case PTRACE_GETREGS: /* Get all gp regs from the child. */ |
187 | return copy_regset_to_user(target: child, |
188 | view: task_user_regset_view(current), |
189 | setno: REGSET_GENERAL, |
190 | offset: 0, size: user_regs_struct_size, |
191 | data: datap); |
192 | |
193 | case PTRACE_SETREGS: /* Set all gp regs in the child. */ |
194 | return copy_regset_from_user(target: child, |
195 | view: task_user_regset_view(current), |
196 | setno: REGSET_GENERAL, |
197 | offset: 0, size: user_regs_struct_size, |
198 | data: datap); |
199 | |
200 | case PTRACE_GETFPREGS: /* Get the child FPU state. */ |
201 | return copy_regset_to_user(child, |
202 | task_user_regset_view(current), |
203 | REGSET_FP, |
204 | 0, sizeof(struct user_fp_struct), |
205 | datap); |
206 | |
207 | case PTRACE_SETFPREGS: /* Set the child FPU state. */ |
208 | return copy_regset_from_user(child, |
209 | task_user_regset_view(current), |
210 | REGSET_FP, |
211 | 0, sizeof(struct user_fp_struct), |
212 | datap); |
213 | |
214 | default: |
215 | ret = ptrace_request(child, request, addr, data); |
216 | break; |
217 | } |
218 | |
219 | return ret; |
220 | } |
221 | |
222 | |
223 | #ifdef CONFIG_COMPAT |
224 | |
225 | /* This function is needed to translate 32 bit pt_regs offsets in to |
226 | * 64 bit pt_regs offsets. For example, a 32 bit gdb under a 64 bit kernel |
227 | * will request offset 12 if it wants gr3, but the lower 32 bits of |
228 | * the 64 bit kernels view of gr3 will be at offset 28 (3*8 + 4). |
229 | * This code relies on a 32 bit pt_regs being comprised of 32 bit values |
230 | * except for the fp registers which (a) are 64 bits, and (b) follow |
231 | * the gr registers at the start of pt_regs. The 32 bit pt_regs should |
232 | * be half the size of the 64 bit pt_regs, plus 32*4 to allow for fr[] |
233 | * being 64 bit in both cases. |
234 | */ |
235 | |
236 | static compat_ulong_t translate_usr_offset(compat_ulong_t offset) |
237 | { |
238 | compat_ulong_t pos; |
239 | |
240 | if (offset < 32*4) /* gr[0..31] */ |
241 | pos = offset * 2 + 4; |
242 | else if (offset < 32*4+32*8) /* fr[0] ... fr[31] */ |
243 | pos = (offset - 32*4) + PT_FR0; |
244 | else if (offset < sizeof(struct pt_regs)/2 + 32*4) /* sr[0] ... ipsw */ |
245 | pos = (offset - 32*4 - 32*8) * 2 + PT_SR0 + 4; |
246 | else |
247 | pos = sizeof(struct pt_regs); |
248 | |
249 | return pos; |
250 | } |
251 | |
252 | long compat_arch_ptrace(struct task_struct *child, compat_long_t request, |
253 | compat_ulong_t addr, compat_ulong_t data) |
254 | { |
255 | compat_uint_t tmp; |
256 | long ret = -EIO; |
257 | |
258 | switch (request) { |
259 | |
260 | case PTRACE_PEEKUSR: |
261 | if (addr & (sizeof(compat_uint_t)-1)) |
262 | break; |
263 | addr = translate_usr_offset(offset: addr); |
264 | if (addr >= sizeof(struct pt_regs)) |
265 | break; |
266 | |
267 | tmp = *(compat_uint_t *) ((char *) task_regs(child) + addr); |
268 | ret = put_user(tmp, (compat_uint_t *) (unsigned long) data); |
269 | break; |
270 | |
271 | /* Write the word at location addr in the USER area. This will need |
272 | to change when the kernel no longer saves all regs on a syscall. |
273 | FIXME. There is a problem at the moment in that r3-r18 are only |
274 | saved if the process is ptraced on syscall entry, and even then |
275 | those values are overwritten by actual register values on syscall |
276 | exit. */ |
277 | case PTRACE_POKEUSR: |
278 | /* Some register values written here may be ignored in |
279 | * entry.S:syscall_restore_rfi; e.g. iaoq is written with |
280 | * r31/r31+4, and not with the values in pt_regs. |
281 | */ |
282 | if (addr == PT_PSW) { |
283 | /* Since PT_PSW==0, it is valid for 32 bit processes |
284 | * under 64 bit kernels as well. |
285 | */ |
286 | ret = arch_ptrace(child, request, addr, data); |
287 | } else { |
288 | if (addr & (sizeof(compat_uint_t)-1)) |
289 | break; |
290 | addr = translate_usr_offset(offset: addr); |
291 | if (addr >= sizeof(struct pt_regs)) |
292 | break; |
293 | if (addr == PT_IAOQ0+4 || addr == PT_IAOQ1+4) { |
294 | data |= PRIV_USER; /* ensure userspace privilege */ |
295 | } |
296 | if (addr >= PT_FR0 && addr <= PT_FR31 + 4) { |
297 | /* Special case, fp regs are 64 bits anyway */ |
298 | *(__u32 *) ((char *) task_regs(child) + addr) = data; |
299 | ret = 0; |
300 | } |
301 | else if ((addr >= PT_GR1+4 && addr <= PT_GR31+4) || |
302 | addr == PT_IAOQ0+4 || addr == PT_IAOQ1+4 || |
303 | addr == PT_SAR+4) { |
304 | /* Zero the top 32 bits */ |
305 | *(__u32 *) ((char *) task_regs(child) + addr - 4) = 0; |
306 | *(__u32 *) ((char *) task_regs(child) + addr) = data; |
307 | ret = 0; |
308 | } |
309 | } |
310 | break; |
311 | case PTRACE_GETREGS: |
312 | case PTRACE_SETREGS: |
313 | case PTRACE_GETFPREGS: |
314 | case PTRACE_SETFPREGS: |
315 | return arch_ptrace(child, request, addr, data); |
316 | |
317 | default: |
318 | ret = compat_ptrace_request(child, request, addr, data); |
319 | break; |
320 | } |
321 | |
322 | return ret; |
323 | } |
324 | #endif |
325 | |
326 | long do_syscall_trace_enter(struct pt_regs *regs) |
327 | { |
328 | if (test_thread_flag(TIF_SYSCALL_TRACE)) { |
329 | int rc = ptrace_report_syscall_entry(regs); |
330 | |
331 | /* |
332 | * As tracesys_next does not set %r28 to -ENOSYS |
333 | * when %r20 is set to -1, initialize it here. |
334 | */ |
335 | regs->gr[28] = -ENOSYS; |
336 | |
337 | if (rc) { |
338 | /* |
339 | * A nonzero return code from |
340 | * ptrace_report_syscall_entry() tells us |
341 | * to prevent the syscall execution. Skip |
342 | * the syscall call and the syscall restart handling. |
343 | * |
344 | * Note that the tracer may also just change |
345 | * regs->gr[20] to an invalid syscall number, |
346 | * that is handled by tracesys_next. |
347 | */ |
348 | regs->gr[20] = -1UL; |
349 | return -1; |
350 | } |
351 | } |
352 | |
353 | /* Do the secure computing check after ptrace. */ |
354 | if (secure_computing() == -1) |
355 | return -1; |
356 | |
357 | #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS |
358 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) |
359 | trace_sys_enter(regs, id: regs->gr[20]); |
360 | #endif |
361 | |
362 | #ifdef CONFIG_64BIT |
363 | if (!is_compat_task()) |
364 | audit_syscall_entry(major: regs->gr[20], a0: regs->gr[26], a1: regs->gr[25], |
365 | a2: regs->gr[24], a3: regs->gr[23]); |
366 | else |
367 | #endif |
368 | audit_syscall_entry(major: regs->gr[20] & 0xffffffff, |
369 | a0: regs->gr[26] & 0xffffffff, |
370 | a1: regs->gr[25] & 0xffffffff, |
371 | a2: regs->gr[24] & 0xffffffff, |
372 | a3: regs->gr[23] & 0xffffffff); |
373 | |
374 | /* |
375 | * Sign extend the syscall number to 64bit since it may have been |
376 | * modified by a compat ptrace call |
377 | */ |
378 | return (int) ((u32) regs->gr[20]); |
379 | } |
380 | |
381 | void do_syscall_trace_exit(struct pt_regs *regs) |
382 | { |
383 | int stepping = test_thread_flag(TIF_SINGLESTEP) || |
384 | test_thread_flag(TIF_BLOCKSTEP); |
385 | |
386 | audit_syscall_exit(pt_regs: regs); |
387 | |
388 | #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS |
389 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) |
390 | trace_sys_exit(regs, ret: regs->gr[20]); |
391 | #endif |
392 | |
393 | if (stepping || test_thread_flag(TIF_SYSCALL_TRACE)) |
394 | ptrace_report_syscall_exit(regs, step: stepping); |
395 | } |
396 | |
397 | |
398 | /* |
399 | * regset functions. |
400 | */ |
401 | |
402 | static int fpr_get(struct task_struct *target, |
403 | const struct user_regset *regset, |
404 | struct membuf to) |
405 | { |
406 | struct pt_regs *regs = task_regs(target); |
407 | |
408 | return membuf_write(&to, regs->fr, ELF_NFPREG * sizeof(__u64)); |
409 | } |
410 | |
411 | static int fpr_set(struct task_struct *target, |
412 | const struct user_regset *regset, |
413 | unsigned int pos, unsigned int count, |
414 | const void *kbuf, const void __user *ubuf) |
415 | { |
416 | struct pt_regs *regs = task_regs(target); |
417 | const __u64 *k = kbuf; |
418 | const __u64 __user *u = ubuf; |
419 | __u64 reg; |
420 | |
421 | pos /= sizeof(reg); |
422 | count /= sizeof(reg); |
423 | |
424 | if (kbuf) |
425 | for (; count > 0 && pos < ELF_NFPREG; --count) |
426 | regs->fr[pos++] = *k++; |
427 | else |
428 | for (; count > 0 && pos < ELF_NFPREG; --count) { |
429 | if (__get_user(reg, u++)) |
430 | return -EFAULT; |
431 | regs->fr[pos++] = reg; |
432 | } |
433 | |
434 | kbuf = k; |
435 | ubuf = u; |
436 | pos *= sizeof(reg); |
437 | count *= sizeof(reg); |
438 | user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, |
439 | ELF_NFPREG * sizeof(reg), -1); |
440 | return 0; |
441 | } |
442 | |
443 | #define RI(reg) (offsetof(struct user_regs_struct,reg) / sizeof(long)) |
444 | |
445 | static unsigned long get_reg(struct pt_regs *regs, int num) |
446 | { |
447 | switch (num) { |
448 | case RI(gr[0]) ... RI(gr[31]): return regs->gr[num - RI(gr[0])]; |
449 | case RI(sr[0]) ... RI(sr[7]): return regs->sr[num - RI(sr[0])]; |
450 | case RI(iasq[0]): return regs->iasq[0]; |
451 | case RI(iasq[1]): return regs->iasq[1]; |
452 | case RI(iaoq[0]): return regs->iaoq[0]; |
453 | case RI(iaoq[1]): return regs->iaoq[1]; |
454 | case RI(sar): return regs->sar; |
455 | case RI(iir): return regs->iir; |
456 | case RI(isr): return regs->isr; |
457 | case RI(ior): return regs->ior; |
458 | case RI(ipsw): return regs->ipsw; |
459 | case RI(cr27): return regs->cr27; |
460 | case RI(cr0): return mfctl(0); |
461 | case RI(cr24): return mfctl(24); |
462 | case RI(cr25): return mfctl(25); |
463 | case RI(cr26): return mfctl(26); |
464 | case RI(cr28): return mfctl(28); |
465 | case RI(cr29): return mfctl(29); |
466 | case RI(cr30): return mfctl(30); |
467 | case RI(cr31): return mfctl(31); |
468 | case RI(cr8): return mfctl(8); |
469 | case RI(cr9): return mfctl(9); |
470 | case RI(cr12): return mfctl(12); |
471 | case RI(cr13): return mfctl(13); |
472 | case RI(cr10): return mfctl(10); |
473 | case RI(cr15): return mfctl(15); |
474 | default: return 0; |
475 | } |
476 | } |
477 | |
478 | static void set_reg(struct pt_regs *regs, int num, unsigned long val) |
479 | { |
480 | switch (num) { |
481 | case RI(gr[0]): /* |
482 | * PSW is in gr[0]. |
483 | * Allow writing to Nullify, Divide-step-correction, |
484 | * and carry/borrow bits. |
485 | * BEWARE, if you set N, and then single step, it won't |
486 | * stop on the nullified instruction. |
487 | */ |
488 | val &= USER_PSW_BITS; |
489 | regs->gr[0] &= ~USER_PSW_BITS; |
490 | regs->gr[0] |= val; |
491 | return; |
492 | case RI(gr[1]) ... RI(gr[31]): |
493 | regs->gr[num - RI(gr[0])] = val; |
494 | return; |
495 | case RI(iaoq[0]): |
496 | case RI(iaoq[1]): |
497 | /* set 2 lowest bits to ensure userspace privilege: */ |
498 | regs->iaoq[num - RI(iaoq[0])] = val | PRIV_USER; |
499 | return; |
500 | case RI(sar): regs->sar = val; |
501 | return; |
502 | default: return; |
503 | #if 0 |
504 | /* do not allow to change any of the following registers (yet) */ |
505 | case RI(sr[0]) ... RI(sr[7]): return regs->sr[num - RI(sr[0])]; |
506 | case RI(iasq[0]): return regs->iasq[0]; |
507 | case RI(iasq[1]): return regs->iasq[1]; |
508 | case RI(iir): return regs->iir; |
509 | case RI(isr): return regs->isr; |
510 | case RI(ior): return regs->ior; |
511 | case RI(ipsw): return regs->ipsw; |
512 | case RI(cr27): return regs->cr27; |
513 | case cr0, cr24, cr25, cr26, cr27, cr28, cr29, cr30, cr31; |
514 | case cr8, cr9, cr12, cr13, cr10, cr15; |
515 | #endif |
516 | } |
517 | } |
518 | |
519 | static int gpr_get(struct task_struct *target, |
520 | const struct user_regset *regset, |
521 | struct membuf to) |
522 | { |
523 | struct pt_regs *regs = task_regs(target); |
524 | unsigned int pos; |
525 | |
526 | for (pos = 0; pos < ELF_NGREG; pos++) |
527 | membuf_store(&to, get_reg(regs, pos)); |
528 | return 0; |
529 | } |
530 | |
531 | static int gpr_set(struct task_struct *target, |
532 | const struct user_regset *regset, |
533 | unsigned int pos, unsigned int count, |
534 | const void *kbuf, const void __user *ubuf) |
535 | { |
536 | struct pt_regs *regs = task_regs(target); |
537 | const unsigned long *k = kbuf; |
538 | const unsigned long __user *u = ubuf; |
539 | unsigned long reg; |
540 | |
541 | pos /= sizeof(reg); |
542 | count /= sizeof(reg); |
543 | |
544 | if (kbuf) |
545 | for (; count > 0 && pos < ELF_NGREG; --count) |
546 | set_reg(regs, num: pos++, val: *k++); |
547 | else |
548 | for (; count > 0 && pos < ELF_NGREG; --count) { |
549 | if (__get_user(reg, u++)) |
550 | return -EFAULT; |
551 | set_reg(regs, num: pos++, val: reg); |
552 | } |
553 | |
554 | kbuf = k; |
555 | ubuf = u; |
556 | pos *= sizeof(reg); |
557 | count *= sizeof(reg); |
558 | user_regset_copyin_ignore(pos: &pos, count: &count, kbuf: &kbuf, ubuf: &ubuf, |
559 | ELF_NGREG * sizeof(reg), end_pos: -1); |
560 | return 0; |
561 | } |
562 | |
563 | static const struct user_regset native_regsets[] = { |
564 | [REGSET_GENERAL] = { |
565 | .core_note_type = NT_PRSTATUS, .n = ELF_NGREG, |
566 | .size = sizeof(long), .align = sizeof(long), |
567 | .regset_get = gpr_get, .set = gpr_set |
568 | }, |
569 | [REGSET_FP] = { |
570 | .core_note_type = NT_PRFPREG, .n = ELF_NFPREG, |
571 | .size = sizeof(__u64), .align = sizeof(__u64), |
572 | .regset_get = fpr_get, .set = fpr_set |
573 | } |
574 | }; |
575 | |
576 | static const struct user_regset_view user_parisc_native_view = { |
577 | .name = "parisc" , .e_machine = ELF_ARCH, .ei_osabi = ELFOSABI_LINUX, |
578 | .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets) |
579 | }; |
580 | |
581 | #ifdef CONFIG_64BIT |
582 | static int gpr32_get(struct task_struct *target, |
583 | const struct user_regset *regset, |
584 | struct membuf to) |
585 | { |
586 | struct pt_regs *regs = task_regs(target); |
587 | unsigned int pos; |
588 | |
589 | for (pos = 0; pos < ELF_NGREG; pos++) |
590 | membuf_store(&to, (compat_ulong_t)get_reg(regs, pos)); |
591 | |
592 | return 0; |
593 | } |
594 | |
595 | static int gpr32_set(struct task_struct *target, |
596 | const struct user_regset *regset, |
597 | unsigned int pos, unsigned int count, |
598 | const void *kbuf, const void __user *ubuf) |
599 | { |
600 | struct pt_regs *regs = task_regs(target); |
601 | const compat_ulong_t *k = kbuf; |
602 | const compat_ulong_t __user *u = ubuf; |
603 | compat_ulong_t reg; |
604 | |
605 | pos /= sizeof(reg); |
606 | count /= sizeof(reg); |
607 | |
608 | if (kbuf) |
609 | for (; count > 0 && pos < ELF_NGREG; --count) |
610 | set_reg(regs, num: pos++, val: *k++); |
611 | else |
612 | for (; count > 0 && pos < ELF_NGREG; --count) { |
613 | if (__get_user(reg, u++)) |
614 | return -EFAULT; |
615 | set_reg(regs, num: pos++, val: reg); |
616 | } |
617 | |
618 | kbuf = k; |
619 | ubuf = u; |
620 | pos *= sizeof(reg); |
621 | count *= sizeof(reg); |
622 | user_regset_copyin_ignore(pos: &pos, count: &count, kbuf: &kbuf, ubuf: &ubuf, |
623 | ELF_NGREG * sizeof(reg), end_pos: -1); |
624 | return 0; |
625 | } |
626 | |
627 | /* |
628 | * These are the regset flavors matching the 32bit native set. |
629 | */ |
630 | static const struct user_regset compat_regsets[] = { |
631 | [REGSET_GENERAL] = { |
632 | .core_note_type = NT_PRSTATUS, .n = ELF_NGREG, |
633 | .size = sizeof(compat_long_t), .align = sizeof(compat_long_t), |
634 | .regset_get = gpr32_get, .set = gpr32_set |
635 | }, |
636 | [REGSET_FP] = { |
637 | .core_note_type = NT_PRFPREG, .n = ELF_NFPREG, |
638 | .size = sizeof(__u64), .align = sizeof(__u64), |
639 | .regset_get = fpr_get, .set = fpr_set |
640 | } |
641 | }; |
642 | |
643 | static const struct user_regset_view user_parisc_compat_view = { |
644 | .name = "parisc" , .e_machine = EM_PARISC, .ei_osabi = ELFOSABI_LINUX, |
645 | .regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets) |
646 | }; |
647 | #endif /* CONFIG_64BIT */ |
648 | |
649 | const struct user_regset_view *task_user_regset_view(struct task_struct *task) |
650 | { |
651 | BUILD_BUG_ON(sizeof(struct user_regs_struct)/sizeof(long) != ELF_NGREG); |
652 | BUILD_BUG_ON(sizeof(struct user_fp_struct)/sizeof(__u64) != ELF_NFPREG); |
653 | #ifdef CONFIG_64BIT |
654 | if (is_compat_task()) |
655 | return &user_parisc_compat_view; |
656 | #endif |
657 | return &user_parisc_native_view; |
658 | } |
659 | |
660 | |
661 | /* HAVE_REGS_AND_STACK_ACCESS_API feature */ |
662 | |
663 | struct pt_regs_offset { |
664 | const char *name; |
665 | int offset; |
666 | }; |
667 | |
668 | #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} |
669 | #define REG_OFFSET_INDEX(r,i) {.name = #r#i, .offset = offsetof(struct pt_regs, r[i])} |
670 | #define REG_OFFSET_END {.name = NULL, .offset = 0} |
671 | |
672 | static const struct pt_regs_offset regoffset_table[] = { |
673 | REG_OFFSET_INDEX(gr,0), |
674 | REG_OFFSET_INDEX(gr,1), |
675 | REG_OFFSET_INDEX(gr,2), |
676 | REG_OFFSET_INDEX(gr,3), |
677 | REG_OFFSET_INDEX(gr,4), |
678 | REG_OFFSET_INDEX(gr,5), |
679 | REG_OFFSET_INDEX(gr,6), |
680 | REG_OFFSET_INDEX(gr,7), |
681 | REG_OFFSET_INDEX(gr,8), |
682 | REG_OFFSET_INDEX(gr,9), |
683 | REG_OFFSET_INDEX(gr,10), |
684 | REG_OFFSET_INDEX(gr,11), |
685 | REG_OFFSET_INDEX(gr,12), |
686 | REG_OFFSET_INDEX(gr,13), |
687 | REG_OFFSET_INDEX(gr,14), |
688 | REG_OFFSET_INDEX(gr,15), |
689 | REG_OFFSET_INDEX(gr,16), |
690 | REG_OFFSET_INDEX(gr,17), |
691 | REG_OFFSET_INDEX(gr,18), |
692 | REG_OFFSET_INDEX(gr,19), |
693 | REG_OFFSET_INDEX(gr,20), |
694 | REG_OFFSET_INDEX(gr,21), |
695 | REG_OFFSET_INDEX(gr,22), |
696 | REG_OFFSET_INDEX(gr,23), |
697 | REG_OFFSET_INDEX(gr,24), |
698 | REG_OFFSET_INDEX(gr,25), |
699 | REG_OFFSET_INDEX(gr,26), |
700 | REG_OFFSET_INDEX(gr,27), |
701 | REG_OFFSET_INDEX(gr,28), |
702 | REG_OFFSET_INDEX(gr,29), |
703 | REG_OFFSET_INDEX(gr,30), |
704 | REG_OFFSET_INDEX(gr,31), |
705 | REG_OFFSET_INDEX(sr,0), |
706 | REG_OFFSET_INDEX(sr,1), |
707 | REG_OFFSET_INDEX(sr,2), |
708 | REG_OFFSET_INDEX(sr,3), |
709 | REG_OFFSET_INDEX(sr,4), |
710 | REG_OFFSET_INDEX(sr,5), |
711 | REG_OFFSET_INDEX(sr,6), |
712 | REG_OFFSET_INDEX(sr,7), |
713 | REG_OFFSET_INDEX(iasq,0), |
714 | REG_OFFSET_INDEX(iasq,1), |
715 | REG_OFFSET_INDEX(iaoq,0), |
716 | REG_OFFSET_INDEX(iaoq,1), |
717 | REG_OFFSET_NAME(cr27), |
718 | REG_OFFSET_NAME(ksp), |
719 | REG_OFFSET_NAME(kpc), |
720 | REG_OFFSET_NAME(sar), |
721 | REG_OFFSET_NAME(iir), |
722 | REG_OFFSET_NAME(isr), |
723 | REG_OFFSET_NAME(ior), |
724 | REG_OFFSET_NAME(ipsw), |
725 | REG_OFFSET_END, |
726 | }; |
727 | |
728 | /** |
729 | * regs_query_register_offset() - query register offset from its name |
730 | * @name: the name of a register |
731 | * |
732 | * regs_query_register_offset() returns the offset of a register in struct |
733 | * pt_regs from its name. If the name is invalid, this returns -EINVAL; |
734 | */ |
735 | int regs_query_register_offset(const char *name) |
736 | { |
737 | const struct pt_regs_offset *roff; |
738 | for (roff = regoffset_table; roff->name != NULL; roff++) |
739 | if (!strcmp(roff->name, name)) |
740 | return roff->offset; |
741 | return -EINVAL; |
742 | } |
743 | |
744 | /** |
745 | * regs_query_register_name() - query register name from its offset |
746 | * @offset: the offset of a register in struct pt_regs. |
747 | * |
748 | * regs_query_register_name() returns the name of a register from its |
749 | * offset in struct pt_regs. If the @offset is invalid, this returns NULL; |
750 | */ |
751 | const char *regs_query_register_name(unsigned int offset) |
752 | { |
753 | const struct pt_regs_offset *roff; |
754 | for (roff = regoffset_table; roff->name != NULL; roff++) |
755 | if (roff->offset == offset) |
756 | return roff->name; |
757 | return NULL; |
758 | } |
759 | |
760 | /** |
761 | * regs_within_kernel_stack() - check the address in the stack |
762 | * @regs: pt_regs which contains kernel stack pointer. |
763 | * @addr: address which is checked. |
764 | * |
765 | * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). |
766 | * If @addr is within the kernel stack, it returns true. If not, returns false. |
767 | */ |
768 | int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) |
769 | { |
770 | return ((addr & ~(THREAD_SIZE - 1)) == |
771 | (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))); |
772 | } |
773 | |
774 | /** |
775 | * regs_get_kernel_stack_nth() - get Nth entry of the stack |
776 | * @regs: pt_regs which contains kernel stack pointer. |
777 | * @n: stack entry number. |
778 | * |
779 | * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which |
780 | * is specified by @regs. If the @n th entry is NOT in the kernel stack, |
781 | * this returns 0. |
782 | */ |
783 | unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) |
784 | { |
785 | unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); |
786 | |
787 | addr -= n; |
788 | |
789 | if (!regs_within_kernel_stack(regs, (unsigned long)addr)) |
790 | return 0; |
791 | |
792 | return *addr; |
793 | } |
794 | |