1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Based on arch/arm/kernel/ptrace.c |
4 | * |
5 | * By Ross Biro 1/23/92 |
6 | * edited by Linus Torvalds |
7 | * ARM modifications Copyright (C) 2000 Russell King |
8 | * Copyright (C) 2012 ARM Ltd. |
9 | */ |
10 | |
11 | #include <linux/audit.h> |
12 | #include <linux/compat.h> |
13 | #include <linux/kernel.h> |
14 | #include <linux/sched/signal.h> |
15 | #include <linux/sched/task_stack.h> |
16 | #include <linux/mm.h> |
17 | #include <linux/nospec.h> |
18 | #include <linux/smp.h> |
19 | #include <linux/ptrace.h> |
20 | #include <linux/user.h> |
21 | #include <linux/seccomp.h> |
22 | #include <linux/security.h> |
23 | #include <linux/init.h> |
24 | #include <linux/signal.h> |
25 | #include <linux/string.h> |
26 | #include <linux/uaccess.h> |
27 | #include <linux/perf_event.h> |
28 | #include <linux/hw_breakpoint.h> |
29 | #include <linux/regset.h> |
30 | #include <linux/elf.h> |
31 | #include <linux/rseq.h> |
32 | |
33 | #include <asm/compat.h> |
34 | #include <asm/cpufeature.h> |
35 | #include <asm/debug-monitors.h> |
36 | #include <asm/fpsimd.h> |
37 | #include <asm/mte.h> |
38 | #include <asm/pointer_auth.h> |
39 | #include <asm/stacktrace.h> |
40 | #include <asm/syscall.h> |
41 | #include <asm/traps.h> |
42 | #include <asm/system_misc.h> |
43 | |
44 | #define CREATE_TRACE_POINTS |
45 | #include <trace/events/syscalls.h> |
46 | |
47 | struct pt_regs_offset { |
48 | const char *name; |
49 | int offset; |
50 | }; |
51 | |
52 | #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} |
53 | #define REG_OFFSET_END {.name = NULL, .offset = 0} |
54 | #define GPR_OFFSET_NAME(r) \ |
55 | {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])} |
56 | |
57 | static const struct pt_regs_offset regoffset_table[] = { |
58 | GPR_OFFSET_NAME(0), |
59 | GPR_OFFSET_NAME(1), |
60 | GPR_OFFSET_NAME(2), |
61 | GPR_OFFSET_NAME(3), |
62 | GPR_OFFSET_NAME(4), |
63 | GPR_OFFSET_NAME(5), |
64 | GPR_OFFSET_NAME(6), |
65 | GPR_OFFSET_NAME(7), |
66 | GPR_OFFSET_NAME(8), |
67 | GPR_OFFSET_NAME(9), |
68 | GPR_OFFSET_NAME(10), |
69 | GPR_OFFSET_NAME(11), |
70 | GPR_OFFSET_NAME(12), |
71 | GPR_OFFSET_NAME(13), |
72 | GPR_OFFSET_NAME(14), |
73 | GPR_OFFSET_NAME(15), |
74 | GPR_OFFSET_NAME(16), |
75 | GPR_OFFSET_NAME(17), |
76 | GPR_OFFSET_NAME(18), |
77 | GPR_OFFSET_NAME(19), |
78 | GPR_OFFSET_NAME(20), |
79 | GPR_OFFSET_NAME(21), |
80 | GPR_OFFSET_NAME(22), |
81 | GPR_OFFSET_NAME(23), |
82 | GPR_OFFSET_NAME(24), |
83 | GPR_OFFSET_NAME(25), |
84 | GPR_OFFSET_NAME(26), |
85 | GPR_OFFSET_NAME(27), |
86 | GPR_OFFSET_NAME(28), |
87 | GPR_OFFSET_NAME(29), |
88 | GPR_OFFSET_NAME(30), |
89 | {.name = "lr" , .offset = offsetof(struct pt_regs, regs[30])}, |
90 | REG_OFFSET_NAME(sp), |
91 | REG_OFFSET_NAME(pc), |
92 | REG_OFFSET_NAME(pstate), |
93 | REG_OFFSET_END, |
94 | }; |
95 | |
96 | /** |
97 | * regs_query_register_offset() - query register offset from its name |
98 | * @name: the name of a register |
99 | * |
100 | * regs_query_register_offset() returns the offset of a register in struct |
101 | * pt_regs from its name. If the name is invalid, this returns -EINVAL; |
102 | */ |
103 | int regs_query_register_offset(const char *name) |
104 | { |
105 | const struct pt_regs_offset *roff; |
106 | |
107 | for (roff = regoffset_table; roff->name != NULL; roff++) |
108 | if (!strcmp(roff->name, name)) |
109 | return roff->offset; |
110 | return -EINVAL; |
111 | } |
112 | |
113 | /** |
114 | * regs_within_kernel_stack() - check the address in the stack |
115 | * @regs: pt_regs which contains kernel stack pointer. |
116 | * @addr: address which is checked. |
117 | * |
118 | * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). |
119 | * If @addr is within the kernel stack, it returns true. If not, returns false. |
120 | */ |
121 | static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) |
122 | { |
123 | return ((addr & ~(THREAD_SIZE - 1)) == |
124 | (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) || |
125 | on_irq_stack(addr, sizeof(unsigned long)); |
126 | } |
127 | |
128 | /** |
129 | * regs_get_kernel_stack_nth() - get Nth entry of the stack |
130 | * @regs: pt_regs which contains kernel stack pointer. |
131 | * @n: stack entry number. |
132 | * |
133 | * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which |
134 | * is specified by @regs. If the @n th entry is NOT in the kernel stack, |
135 | * this returns 0. |
136 | */ |
137 | unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) |
138 | { |
139 | unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); |
140 | |
141 | addr += n; |
142 | if (regs_within_kernel_stack(regs, addr: (unsigned long)addr)) |
143 | return *addr; |
144 | else |
145 | return 0; |
146 | } |
147 | |
148 | /* |
149 | * TODO: does not yet catch signals sent when the child dies. |
150 | * in exit.c or in signal.c. |
151 | */ |
152 | |
153 | /* |
154 | * Called by kernel/ptrace.c when detaching.. |
155 | */ |
156 | void ptrace_disable(struct task_struct *child) |
157 | { |
158 | /* |
159 | * This would be better off in core code, but PTRACE_DETACH has |
160 | * grown its fair share of arch-specific worts and changing it |
161 | * is likely to cause regressions on obscure architectures. |
162 | */ |
163 | user_disable_single_step(child); |
164 | } |
165 | |
166 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
167 | /* |
168 | * Handle hitting a HW-breakpoint. |
169 | */ |
170 | static void ptrace_hbptriggered(struct perf_event *bp, |
171 | struct perf_sample_data *data, |
172 | struct pt_regs *regs) |
173 | { |
174 | struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); |
175 | const char *desc = "Hardware breakpoint trap (ptrace)" ; |
176 | |
177 | if (is_compat_task()) { |
178 | int si_errno = 0; |
179 | int i; |
180 | |
181 | for (i = 0; i < ARM_MAX_BRP; ++i) { |
182 | if (current->thread.debug.hbp_break[i] == bp) { |
183 | si_errno = (i << 1) + 1; |
184 | break; |
185 | } |
186 | } |
187 | |
188 | for (i = 0; i < ARM_MAX_WRP; ++i) { |
189 | if (current->thread.debug.hbp_watch[i] == bp) { |
190 | si_errno = -((i << 1) + 1); |
191 | break; |
192 | } |
193 | } |
194 | arm64_force_sig_ptrace_errno_trap(si_errno, bkpt->trigger, |
195 | desc); |
196 | return; |
197 | } |
198 | |
199 | arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT, bkpt->trigger, desc); |
200 | } |
201 | |
202 | /* |
203 | * Unregister breakpoints from this task and reset the pointers in |
204 | * the thread_struct. |
205 | */ |
206 | void flush_ptrace_hw_breakpoint(struct task_struct *tsk) |
207 | { |
208 | int i; |
209 | struct thread_struct *t = &tsk->thread; |
210 | |
211 | for (i = 0; i < ARM_MAX_BRP; i++) { |
212 | if (t->debug.hbp_break[i]) { |
213 | unregister_hw_breakpoint(bp: t->debug.hbp_break[i]); |
214 | t->debug.hbp_break[i] = NULL; |
215 | } |
216 | } |
217 | |
218 | for (i = 0; i < ARM_MAX_WRP; i++) { |
219 | if (t->debug.hbp_watch[i]) { |
220 | unregister_hw_breakpoint(bp: t->debug.hbp_watch[i]); |
221 | t->debug.hbp_watch[i] = NULL; |
222 | } |
223 | } |
224 | } |
225 | |
226 | void ptrace_hw_copy_thread(struct task_struct *tsk) |
227 | { |
228 | memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); |
229 | } |
230 | |
231 | static struct perf_event *ptrace_hbp_get_event(unsigned int note_type, |
232 | struct task_struct *tsk, |
233 | unsigned long idx) |
234 | { |
235 | struct perf_event *bp = ERR_PTR(error: -EINVAL); |
236 | |
237 | switch (note_type) { |
238 | case NT_ARM_HW_BREAK: |
239 | if (idx >= ARM_MAX_BRP) |
240 | goto out; |
241 | idx = array_index_nospec(idx, ARM_MAX_BRP); |
242 | bp = tsk->thread.debug.hbp_break[idx]; |
243 | break; |
244 | case NT_ARM_HW_WATCH: |
245 | if (idx >= ARM_MAX_WRP) |
246 | goto out; |
247 | idx = array_index_nospec(idx, ARM_MAX_WRP); |
248 | bp = tsk->thread.debug.hbp_watch[idx]; |
249 | break; |
250 | } |
251 | |
252 | out: |
253 | return bp; |
254 | } |
255 | |
256 | static int ptrace_hbp_set_event(unsigned int note_type, |
257 | struct task_struct *tsk, |
258 | unsigned long idx, |
259 | struct perf_event *bp) |
260 | { |
261 | int err = -EINVAL; |
262 | |
263 | switch (note_type) { |
264 | case NT_ARM_HW_BREAK: |
265 | if (idx >= ARM_MAX_BRP) |
266 | goto out; |
267 | idx = array_index_nospec(idx, ARM_MAX_BRP); |
268 | tsk->thread.debug.hbp_break[idx] = bp; |
269 | err = 0; |
270 | break; |
271 | case NT_ARM_HW_WATCH: |
272 | if (idx >= ARM_MAX_WRP) |
273 | goto out; |
274 | idx = array_index_nospec(idx, ARM_MAX_WRP); |
275 | tsk->thread.debug.hbp_watch[idx] = bp; |
276 | err = 0; |
277 | break; |
278 | } |
279 | |
280 | out: |
281 | return err; |
282 | } |
283 | |
284 | static struct perf_event *ptrace_hbp_create(unsigned int note_type, |
285 | struct task_struct *tsk, |
286 | unsigned long idx) |
287 | { |
288 | struct perf_event *bp; |
289 | struct perf_event_attr attr; |
290 | int err, type; |
291 | |
292 | switch (note_type) { |
293 | case NT_ARM_HW_BREAK: |
294 | type = HW_BREAKPOINT_X; |
295 | break; |
296 | case NT_ARM_HW_WATCH: |
297 | type = HW_BREAKPOINT_RW; |
298 | break; |
299 | default: |
300 | return ERR_PTR(error: -EINVAL); |
301 | } |
302 | |
303 | ptrace_breakpoint_init(attr: &attr); |
304 | |
305 | /* |
306 | * Initialise fields to sane defaults |
307 | * (i.e. values that will pass validation). |
308 | */ |
309 | attr.bp_addr = 0; |
310 | attr.bp_len = HW_BREAKPOINT_LEN_4; |
311 | attr.bp_type = type; |
312 | attr.disabled = 1; |
313 | |
314 | bp = register_user_hw_breakpoint(attr: &attr, triggered: ptrace_hbptriggered, NULL, tsk); |
315 | if (IS_ERR(ptr: bp)) |
316 | return bp; |
317 | |
318 | err = ptrace_hbp_set_event(note_type, tsk, idx, bp); |
319 | if (err) |
320 | return ERR_PTR(error: err); |
321 | |
322 | return bp; |
323 | } |
324 | |
325 | static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type, |
326 | struct arch_hw_breakpoint_ctrl ctrl, |
327 | struct perf_event_attr *attr) |
328 | { |
329 | int err, len, type, offset, disabled = !ctrl.enabled; |
330 | |
331 | attr->disabled = disabled; |
332 | if (disabled) |
333 | return 0; |
334 | |
335 | err = arch_bp_generic_fields(x86_len: ctrl, x86_type: &len, gen_len: &type, gen_type: &offset); |
336 | if (err) |
337 | return err; |
338 | |
339 | switch (note_type) { |
340 | case NT_ARM_HW_BREAK: |
341 | if ((type & HW_BREAKPOINT_X) != type) |
342 | return -EINVAL; |
343 | break; |
344 | case NT_ARM_HW_WATCH: |
345 | if ((type & HW_BREAKPOINT_RW) != type) |
346 | return -EINVAL; |
347 | break; |
348 | default: |
349 | return -EINVAL; |
350 | } |
351 | |
352 | attr->bp_len = len; |
353 | attr->bp_type = type; |
354 | attr->bp_addr += offset; |
355 | |
356 | return 0; |
357 | } |
358 | |
359 | static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info) |
360 | { |
361 | u8 num; |
362 | u32 reg = 0; |
363 | |
364 | switch (note_type) { |
365 | case NT_ARM_HW_BREAK: |
366 | num = hw_breakpoint_slots(TYPE_INST); |
367 | break; |
368 | case NT_ARM_HW_WATCH: |
369 | num = hw_breakpoint_slots(TYPE_DATA); |
370 | break; |
371 | default: |
372 | return -EINVAL; |
373 | } |
374 | |
375 | reg |= debug_monitors_arch(); |
376 | reg <<= 8; |
377 | reg |= num; |
378 | |
379 | *info = reg; |
380 | return 0; |
381 | } |
382 | |
383 | static int ptrace_hbp_get_ctrl(unsigned int note_type, |
384 | struct task_struct *tsk, |
385 | unsigned long idx, |
386 | u32 *ctrl) |
387 | { |
388 | struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); |
389 | |
390 | if (IS_ERR(ptr: bp)) |
391 | return PTR_ERR(ptr: bp); |
392 | |
393 | *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0; |
394 | return 0; |
395 | } |
396 | |
397 | static int ptrace_hbp_get_addr(unsigned int note_type, |
398 | struct task_struct *tsk, |
399 | unsigned long idx, |
400 | u64 *addr) |
401 | { |
402 | struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); |
403 | |
404 | if (IS_ERR(ptr: bp)) |
405 | return PTR_ERR(ptr: bp); |
406 | |
407 | *addr = bp ? counter_arch_bp(bp)->address : 0; |
408 | return 0; |
409 | } |
410 | |
411 | static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type, |
412 | struct task_struct *tsk, |
413 | unsigned long idx) |
414 | { |
415 | struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); |
416 | |
417 | if (!bp) |
418 | bp = ptrace_hbp_create(note_type, tsk, idx); |
419 | |
420 | return bp; |
421 | } |
422 | |
423 | static int ptrace_hbp_set_ctrl(unsigned int note_type, |
424 | struct task_struct *tsk, |
425 | unsigned long idx, |
426 | u32 uctrl) |
427 | { |
428 | int err; |
429 | struct perf_event *bp; |
430 | struct perf_event_attr attr; |
431 | struct arch_hw_breakpoint_ctrl ctrl; |
432 | |
433 | bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); |
434 | if (IS_ERR(ptr: bp)) { |
435 | err = PTR_ERR(ptr: bp); |
436 | return err; |
437 | } |
438 | |
439 | attr = bp->attr; |
440 | decode_ctrl_reg(uctrl, &ctrl); |
441 | err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl: ctrl, attr: &attr); |
442 | if (err) |
443 | return err; |
444 | |
445 | return modify_user_hw_breakpoint(bp, attr: &attr); |
446 | } |
447 | |
448 | static int ptrace_hbp_set_addr(unsigned int note_type, |
449 | struct task_struct *tsk, |
450 | unsigned long idx, |
451 | u64 addr) |
452 | { |
453 | int err; |
454 | struct perf_event *bp; |
455 | struct perf_event_attr attr; |
456 | |
457 | bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); |
458 | if (IS_ERR(ptr: bp)) { |
459 | err = PTR_ERR(ptr: bp); |
460 | return err; |
461 | } |
462 | |
463 | attr = bp->attr; |
464 | attr.bp_addr = addr; |
465 | err = modify_user_hw_breakpoint(bp, attr: &attr); |
466 | return err; |
467 | } |
468 | |
469 | #define PTRACE_HBP_ADDR_SZ sizeof(u64) |
470 | #define PTRACE_HBP_CTRL_SZ sizeof(u32) |
471 | #define PTRACE_HBP_PAD_SZ sizeof(u32) |
472 | |
473 | static int hw_break_get(struct task_struct *target, |
474 | const struct user_regset *regset, |
475 | struct membuf to) |
476 | { |
477 | unsigned int note_type = regset->core_note_type; |
478 | int ret, idx = 0; |
479 | u32 info, ctrl; |
480 | u64 addr; |
481 | |
482 | /* Resource info */ |
483 | ret = ptrace_hbp_get_resource_info(note_type, info: &info); |
484 | if (ret) |
485 | return ret; |
486 | |
487 | membuf_write(s: &to, v: &info, size: sizeof(info)); |
488 | membuf_zero(s: &to, size: sizeof(u32)); |
489 | /* (address, ctrl) registers */ |
490 | while (to.left) { |
491 | ret = ptrace_hbp_get_addr(note_type, tsk: target, idx, addr: &addr); |
492 | if (ret) |
493 | return ret; |
494 | ret = ptrace_hbp_get_ctrl(note_type, tsk: target, idx, ctrl: &ctrl); |
495 | if (ret) |
496 | return ret; |
497 | membuf_store(&to, addr); |
498 | membuf_store(&to, ctrl); |
499 | membuf_zero(s: &to, size: sizeof(u32)); |
500 | idx++; |
501 | } |
502 | return 0; |
503 | } |
504 | |
505 | static int hw_break_set(struct task_struct *target, |
506 | const struct user_regset *regset, |
507 | unsigned int pos, unsigned int count, |
508 | const void *kbuf, const void __user *ubuf) |
509 | { |
510 | unsigned int note_type = regset->core_note_type; |
511 | int ret, idx = 0, offset, limit; |
512 | u32 ctrl; |
513 | u64 addr; |
514 | |
515 | /* Resource info and pad */ |
516 | offset = offsetof(struct user_hwdebug_state, dbg_regs); |
517 | user_regset_copyin_ignore(pos: &pos, count: &count, kbuf: &kbuf, ubuf: &ubuf, start_pos: 0, end_pos: offset); |
518 | |
519 | /* (address, ctrl) registers */ |
520 | limit = regset->n * regset->size; |
521 | while (count && offset < limit) { |
522 | if (count < PTRACE_HBP_ADDR_SZ) |
523 | return -EINVAL; |
524 | ret = user_regset_copyin(pos: &pos, count: &count, kbuf: &kbuf, ubuf: &ubuf, data: &addr, |
525 | start_pos: offset, end_pos: offset + PTRACE_HBP_ADDR_SZ); |
526 | if (ret) |
527 | return ret; |
528 | ret = ptrace_hbp_set_addr(note_type, tsk: target, idx, addr); |
529 | if (ret) |
530 | return ret; |
531 | offset += PTRACE_HBP_ADDR_SZ; |
532 | |
533 | if (!count) |
534 | break; |
535 | ret = user_regset_copyin(pos: &pos, count: &count, kbuf: &kbuf, ubuf: &ubuf, data: &ctrl, |
536 | start_pos: offset, end_pos: offset + PTRACE_HBP_CTRL_SZ); |
537 | if (ret) |
538 | return ret; |
539 | ret = ptrace_hbp_set_ctrl(note_type, tsk: target, idx, uctrl: ctrl); |
540 | if (ret) |
541 | return ret; |
542 | offset += PTRACE_HBP_CTRL_SZ; |
543 | |
544 | user_regset_copyin_ignore(pos: &pos, count: &count, kbuf: &kbuf, ubuf: &ubuf, |
545 | start_pos: offset, end_pos: offset + PTRACE_HBP_PAD_SZ); |
546 | offset += PTRACE_HBP_PAD_SZ; |
547 | idx++; |
548 | } |
549 | |
550 | return 0; |
551 | } |
552 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ |
553 | |
554 | static int gpr_get(struct task_struct *target, |
555 | const struct user_regset *regset, |
556 | struct membuf to) |
557 | { |
558 | struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs; |
559 | return membuf_write(&to, uregs, sizeof(*uregs)); |
560 | } |
561 | |
562 | static int gpr_set(struct task_struct *target, const struct user_regset *regset, |
563 | unsigned int pos, unsigned int count, |
564 | const void *kbuf, const void __user *ubuf) |
565 | { |
566 | int ret; |
567 | struct user_pt_regs newregs = task_pt_regs(target)->user_regs; |
568 | |
569 | ret = user_regset_copyin(pos: &pos, count: &count, kbuf: &kbuf, ubuf: &ubuf, data: &newregs, start_pos: 0, end_pos: -1); |
570 | if (ret) |
571 | return ret; |
572 | |
573 | if (!valid_user_regs(&newregs, target)) |
574 | return -EINVAL; |
575 | |
576 | task_pt_regs(target)->user_regs = newregs; |
577 | return 0; |
578 | } |
579 | |
580 | static int fpr_active(struct task_struct *target, const struct user_regset *regset) |
581 | { |
582 | if (!system_supports_fpsimd()) |
583 | return -ENODEV; |
584 | return regset->n; |
585 | } |
586 | |
587 | /* |
588 | * TODO: update fp accessors for lazy context switching (sync/flush hwstate) |
589 | */ |
590 | static int __fpr_get(struct task_struct *target, |
591 | const struct user_regset *regset, |
592 | struct membuf to) |
593 | { |
594 | struct user_fpsimd_state *uregs; |
595 | |
596 | sve_sync_to_fpsimd(target); |
597 | |
598 | uregs = &target->thread.uw.fpsimd_state; |
599 | |
600 | return membuf_write(&to, uregs, sizeof(*uregs)); |
601 | } |
602 | |
603 | static int fpr_get(struct task_struct *target, const struct user_regset *regset, |
604 | struct membuf to) |
605 | { |
606 | if (!system_supports_fpsimd()) |
607 | return -EINVAL; |
608 | |
609 | if (target == current) |
610 | fpsimd_preserve_current_state(); |
611 | |
612 | return __fpr_get(target, regset, to); |
613 | } |
614 | |
615 | static int __fpr_set(struct task_struct *target, |
616 | const struct user_regset *regset, |
617 | unsigned int pos, unsigned int count, |
618 | const void *kbuf, const void __user *ubuf, |
619 | unsigned int start_pos) |
620 | { |
621 | int ret; |
622 | struct user_fpsimd_state newstate; |
623 | |
624 | /* |
625 | * Ensure target->thread.uw.fpsimd_state is up to date, so that a |
626 | * short copyin can't resurrect stale data. |
627 | */ |
628 | sve_sync_to_fpsimd(target); |
629 | |
630 | newstate = target->thread.uw.fpsimd_state; |
631 | |
632 | ret = user_regset_copyin(pos: &pos, count: &count, kbuf: &kbuf, ubuf: &ubuf, data: &newstate, |
633 | start_pos, end_pos: start_pos + sizeof(newstate)); |
634 | if (ret) |
635 | return ret; |
636 | |
637 | target->thread.uw.fpsimd_state = newstate; |
638 | |
639 | return ret; |
640 | } |
641 | |
642 | static int fpr_set(struct task_struct *target, const struct user_regset *regset, |
643 | unsigned int pos, unsigned int count, |
644 | const void *kbuf, const void __user *ubuf) |
645 | { |
646 | int ret; |
647 | |
648 | if (!system_supports_fpsimd()) |
649 | return -EINVAL; |
650 | |
651 | ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, start_pos: 0); |
652 | if (ret) |
653 | return ret; |
654 | |
655 | sve_sync_from_fpsimd_zeropad(target); |
656 | fpsimd_flush_task_state(target); |
657 | |
658 | return ret; |
659 | } |
660 | |
661 | static int tls_get(struct task_struct *target, const struct user_regset *regset, |
662 | struct membuf to) |
663 | { |
664 | int ret; |
665 | |
666 | if (target == current) |
667 | tls_preserve_current_state(); |
668 | |
669 | ret = membuf_store(&to, target->thread.uw.tp_value); |
670 | if (system_supports_tpidr2()) |
671 | ret = membuf_store(&to, target->thread.tpidr2_el0); |
672 | else |
673 | ret = membuf_zero(s: &to, size: sizeof(u64)); |
674 | |
675 | return ret; |
676 | } |
677 | |
678 | static int tls_set(struct task_struct *target, const struct user_regset *regset, |
679 | unsigned int pos, unsigned int count, |
680 | const void *kbuf, const void __user *ubuf) |
681 | { |
682 | int ret; |
683 | unsigned long tls[2]; |
684 | |
685 | tls[0] = target->thread.uw.tp_value; |
686 | if (system_supports_tpidr2()) |
687 | tls[1] = target->thread.tpidr2_el0; |
688 | |
689 | ret = user_regset_copyin(pos: &pos, count: &count, kbuf: &kbuf, ubuf: &ubuf, data: tls, start_pos: 0, end_pos: count); |
690 | if (ret) |
691 | return ret; |
692 | |
693 | target->thread.uw.tp_value = tls[0]; |
694 | if (system_supports_tpidr2()) |
695 | target->thread.tpidr2_el0 = tls[1]; |
696 | |
697 | return ret; |
698 | } |
699 | |
700 | static int fpmr_get(struct task_struct *target, const struct user_regset *regset, |
701 | struct membuf to) |
702 | { |
703 | if (!system_supports_fpmr()) |
704 | return -EINVAL; |
705 | |
706 | if (target == current) |
707 | fpsimd_preserve_current_state(); |
708 | |
709 | return membuf_store(&to, target->thread.uw.fpmr); |
710 | } |
711 | |
712 | static int fpmr_set(struct task_struct *target, const struct user_regset *regset, |
713 | unsigned int pos, unsigned int count, |
714 | const void *kbuf, const void __user *ubuf) |
715 | { |
716 | int ret; |
717 | unsigned long fpmr; |
718 | |
719 | if (!system_supports_fpmr()) |
720 | return -EINVAL; |
721 | |
722 | ret = user_regset_copyin(pos: &pos, count: &count, kbuf: &kbuf, ubuf: &ubuf, data: &fpmr, start_pos: 0, end_pos: count); |
723 | if (ret) |
724 | return ret; |
725 | |
726 | target->thread.uw.fpmr = fpmr; |
727 | |
728 | fpsimd_flush_task_state(target); |
729 | |
730 | return 0; |
731 | } |
732 | |
733 | static int system_call_get(struct task_struct *target, |
734 | const struct user_regset *regset, |
735 | struct membuf to) |
736 | { |
737 | return membuf_store(&to, task_pt_regs(target)->syscallno); |
738 | } |
739 | |
740 | static int system_call_set(struct task_struct *target, |
741 | const struct user_regset *regset, |
742 | unsigned int pos, unsigned int count, |
743 | const void *kbuf, const void __user *ubuf) |
744 | { |
745 | int syscallno = task_pt_regs(target)->syscallno; |
746 | int ret; |
747 | |
748 | ret = user_regset_copyin(pos: &pos, count: &count, kbuf: &kbuf, ubuf: &ubuf, data: &syscallno, start_pos: 0, end_pos: -1); |
749 | if (ret) |
750 | return ret; |
751 | |
752 | task_pt_regs(target)->syscallno = syscallno; |
753 | return ret; |
754 | } |
755 | |
756 | #ifdef CONFIG_ARM64_SVE |
757 | |
758 | static void sve_init_header_from_task(struct user_sve_header *header, |
759 | struct task_struct *target, |
760 | enum vec_type type) |
761 | { |
762 | unsigned int vq; |
763 | bool active; |
764 | enum vec_type task_type; |
765 | |
766 | memset(header, 0, sizeof(*header)); |
767 | |
768 | /* Check if the requested registers are active for the task */ |
769 | if (thread_sm_enabled(&target->thread)) |
770 | task_type = ARM64_VEC_SME; |
771 | else |
772 | task_type = ARM64_VEC_SVE; |
773 | active = (task_type == type); |
774 | |
775 | switch (type) { |
776 | case ARM64_VEC_SVE: |
777 | if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT)) |
778 | header->flags |= SVE_PT_VL_INHERIT; |
779 | break; |
780 | case ARM64_VEC_SME: |
781 | if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT)) |
782 | header->flags |= SVE_PT_VL_INHERIT; |
783 | break; |
784 | default: |
785 | WARN_ON_ONCE(1); |
786 | return; |
787 | } |
788 | |
789 | if (active) { |
790 | if (target->thread.fp_type == FP_STATE_FPSIMD) { |
791 | header->flags |= SVE_PT_REGS_FPSIMD; |
792 | } else { |
793 | header->flags |= SVE_PT_REGS_SVE; |
794 | } |
795 | } |
796 | |
797 | header->vl = task_get_vl(target, type); |
798 | vq = sve_vq_from_vl(header->vl); |
799 | |
800 | header->max_vl = vec_max_vl(type); |
801 | header->size = SVE_PT_SIZE(vq, header->flags); |
802 | header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl), |
803 | SVE_PT_REGS_SVE); |
804 | } |
805 | |
806 | static unsigned int sve_size_from_header(struct user_sve_header const *header) |
807 | { |
808 | return ALIGN(header->size, SVE_VQ_BYTES); |
809 | } |
810 | |
811 | static int sve_get_common(struct task_struct *target, |
812 | const struct user_regset *regset, |
813 | struct membuf to, |
814 | enum vec_type type) |
815 | { |
816 | struct user_sve_header header; |
817 | unsigned int vq; |
818 | unsigned long start, end; |
819 | |
820 | /* Header */ |
821 | sve_init_header_from_task(&header, target, type); |
822 | vq = sve_vq_from_vl(header.vl); |
823 | |
824 | membuf_write(&to, &header, sizeof(header)); |
825 | |
826 | if (target == current) |
827 | fpsimd_preserve_current_state(); |
828 | |
829 | BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header)); |
830 | BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header)); |
831 | |
832 | switch ((header.flags & SVE_PT_REGS_MASK)) { |
833 | case SVE_PT_REGS_FPSIMD: |
834 | return __fpr_get(target, regset, to); |
835 | |
836 | case SVE_PT_REGS_SVE: |
837 | start = SVE_PT_SVE_OFFSET; |
838 | end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq); |
839 | membuf_write(&to, target->thread.sve_state, end - start); |
840 | |
841 | start = end; |
842 | end = SVE_PT_SVE_FPSR_OFFSET(vq); |
843 | membuf_zero(&to, end - start); |
844 | |
845 | /* |
846 | * Copy fpsr, and fpcr which must follow contiguously in |
847 | * struct fpsimd_state: |
848 | */ |
849 | start = end; |
850 | end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; |
851 | membuf_write(&to, &target->thread.uw.fpsimd_state.fpsr, |
852 | end - start); |
853 | |
854 | start = end; |
855 | end = sve_size_from_header(&header); |
856 | return membuf_zero(&to, end - start); |
857 | |
858 | default: |
859 | return 0; |
860 | } |
861 | } |
862 | |
863 | static int sve_get(struct task_struct *target, |
864 | const struct user_regset *regset, |
865 | struct membuf to) |
866 | { |
867 | if (!system_supports_sve()) |
868 | return -EINVAL; |
869 | |
870 | return sve_get_common(target, regset, to, ARM64_VEC_SVE); |
871 | } |
872 | |
873 | static int sve_set_common(struct task_struct *target, |
874 | const struct user_regset *regset, |
875 | unsigned int pos, unsigned int count, |
876 | const void *kbuf, const void __user *ubuf, |
877 | enum vec_type type) |
878 | { |
879 | int ret; |
880 | struct user_sve_header header; |
881 | unsigned int vq; |
882 | unsigned long start, end; |
883 | |
884 | /* Header */ |
885 | if (count < sizeof(header)) |
886 | return -EINVAL; |
887 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header, |
888 | 0, sizeof(header)); |
889 | if (ret) |
890 | goto out; |
891 | |
892 | /* |
893 | * Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are consumed by |
894 | * vec_set_vector_length(), which will also validate them for us: |
895 | */ |
896 | ret = vec_set_vector_length(target, type, header.vl, |
897 | ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16); |
898 | if (ret) |
899 | goto out; |
900 | |
901 | /* Actual VL set may be less than the user asked for: */ |
902 | vq = sve_vq_from_vl(task_get_vl(target, type)); |
903 | |
904 | /* Enter/exit streaming mode */ |
905 | if (system_supports_sme()) { |
906 | u64 old_svcr = target->thread.svcr; |
907 | |
908 | switch (type) { |
909 | case ARM64_VEC_SVE: |
910 | target->thread.svcr &= ~SVCR_SM_MASK; |
911 | break; |
912 | case ARM64_VEC_SME: |
913 | target->thread.svcr |= SVCR_SM_MASK; |
914 | |
915 | /* |
916 | * Disable traps and ensure there is SME storage but |
917 | * preserve any currently set values in ZA/ZT. |
918 | */ |
919 | sme_alloc(target, false); |
920 | set_tsk_thread_flag(target, TIF_SME); |
921 | break; |
922 | default: |
923 | WARN_ON_ONCE(1); |
924 | ret = -EINVAL; |
925 | goto out; |
926 | } |
927 | |
928 | /* |
929 | * If we switched then invalidate any existing SVE |
930 | * state and ensure there's storage. |
931 | */ |
932 | if (target->thread.svcr != old_svcr) |
933 | sve_alloc(target, true); |
934 | } |
935 | |
936 | /* Registers: FPSIMD-only case */ |
937 | |
938 | BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header)); |
939 | if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) { |
940 | ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, |
941 | SVE_PT_FPSIMD_OFFSET); |
942 | clear_tsk_thread_flag(target, TIF_SVE); |
943 | target->thread.fp_type = FP_STATE_FPSIMD; |
944 | goto out; |
945 | } |
946 | |
947 | /* |
948 | * Otherwise: no registers or full SVE case. For backwards |
949 | * compatibility reasons we treat empty flags as SVE registers. |
950 | */ |
951 | |
952 | /* |
953 | * If setting a different VL from the requested VL and there is |
954 | * register data, the data layout will be wrong: don't even |
955 | * try to set the registers in this case. |
956 | */ |
957 | if (count && vq != sve_vq_from_vl(header.vl)) { |
958 | ret = -EIO; |
959 | goto out; |
960 | } |
961 | |
962 | sve_alloc(target, true); |
963 | if (!target->thread.sve_state) { |
964 | ret = -ENOMEM; |
965 | clear_tsk_thread_flag(target, TIF_SVE); |
966 | target->thread.fp_type = FP_STATE_FPSIMD; |
967 | goto out; |
968 | } |
969 | |
970 | /* |
971 | * Ensure target->thread.sve_state is up to date with target's |
972 | * FPSIMD regs, so that a short copyin leaves trailing |
973 | * registers unmodified. Only enable SVE if we are |
974 | * configuring normal SVE, a system with streaming SVE may not |
975 | * have normal SVE. |
976 | */ |
977 | fpsimd_sync_to_sve(target); |
978 | if (type == ARM64_VEC_SVE) |
979 | set_tsk_thread_flag(target, TIF_SVE); |
980 | target->thread.fp_type = FP_STATE_SVE; |
981 | |
982 | BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header)); |
983 | start = SVE_PT_SVE_OFFSET; |
984 | end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq); |
985 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
986 | target->thread.sve_state, |
987 | start, end); |
988 | if (ret) |
989 | goto out; |
990 | |
991 | start = end; |
992 | end = SVE_PT_SVE_FPSR_OFFSET(vq); |
993 | user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, start, end); |
994 | |
995 | /* |
996 | * Copy fpsr, and fpcr which must follow contiguously in |
997 | * struct fpsimd_state: |
998 | */ |
999 | start = end; |
1000 | end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; |
1001 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
1002 | &target->thread.uw.fpsimd_state.fpsr, |
1003 | start, end); |
1004 | |
1005 | out: |
1006 | fpsimd_flush_task_state(target); |
1007 | return ret; |
1008 | } |
1009 | |
1010 | static int sve_set(struct task_struct *target, |
1011 | const struct user_regset *regset, |
1012 | unsigned int pos, unsigned int count, |
1013 | const void *kbuf, const void __user *ubuf) |
1014 | { |
1015 | if (!system_supports_sve()) |
1016 | return -EINVAL; |
1017 | |
1018 | return sve_set_common(target, regset, pos, count, kbuf, ubuf, |
1019 | ARM64_VEC_SVE); |
1020 | } |
1021 | |
1022 | #endif /* CONFIG_ARM64_SVE */ |
1023 | |
1024 | #ifdef CONFIG_ARM64_SME |
1025 | |
1026 | static int ssve_get(struct task_struct *target, |
1027 | const struct user_regset *regset, |
1028 | struct membuf to) |
1029 | { |
1030 | if (!system_supports_sme()) |
1031 | return -EINVAL; |
1032 | |
1033 | return sve_get_common(target, regset, to, ARM64_VEC_SME); |
1034 | } |
1035 | |
1036 | static int ssve_set(struct task_struct *target, |
1037 | const struct user_regset *regset, |
1038 | unsigned int pos, unsigned int count, |
1039 | const void *kbuf, const void __user *ubuf) |
1040 | { |
1041 | if (!system_supports_sme()) |
1042 | return -EINVAL; |
1043 | |
1044 | return sve_set_common(target, regset, pos, count, kbuf, ubuf, |
1045 | ARM64_VEC_SME); |
1046 | } |
1047 | |
1048 | static int za_get(struct task_struct *target, |
1049 | const struct user_regset *regset, |
1050 | struct membuf to) |
1051 | { |
1052 | struct user_za_header header; |
1053 | unsigned int vq; |
1054 | unsigned long start, end; |
1055 | |
1056 | if (!system_supports_sme()) |
1057 | return -EINVAL; |
1058 | |
1059 | /* Header */ |
1060 | memset(&header, 0, sizeof(header)); |
1061 | |
1062 | if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT)) |
1063 | header.flags |= ZA_PT_VL_INHERIT; |
1064 | |
1065 | header.vl = task_get_sme_vl(target); |
1066 | vq = sve_vq_from_vl(header.vl); |
1067 | header.max_vl = sme_max_vl(); |
1068 | header.max_size = ZA_PT_SIZE(vq); |
1069 | |
1070 | /* If ZA is not active there is only the header */ |
1071 | if (thread_za_enabled(&target->thread)) |
1072 | header.size = ZA_PT_SIZE(vq); |
1073 | else |
1074 | header.size = ZA_PT_ZA_OFFSET; |
1075 | |
1076 | membuf_write(&to, &header, sizeof(header)); |
1077 | |
1078 | BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header)); |
1079 | end = ZA_PT_ZA_OFFSET; |
1080 | |
1081 | if (target == current) |
1082 | fpsimd_preserve_current_state(); |
1083 | |
1084 | /* Any register data to include? */ |
1085 | if (thread_za_enabled(&target->thread)) { |
1086 | start = end; |
1087 | end = ZA_PT_SIZE(vq); |
1088 | membuf_write(&to, target->thread.sme_state, end - start); |
1089 | } |
1090 | |
1091 | /* Zero any trailing padding */ |
1092 | start = end; |
1093 | end = ALIGN(header.size, SVE_VQ_BYTES); |
1094 | return membuf_zero(&to, end - start); |
1095 | } |
1096 | |
1097 | static int za_set(struct task_struct *target, |
1098 | const struct user_regset *regset, |
1099 | unsigned int pos, unsigned int count, |
1100 | const void *kbuf, const void __user *ubuf) |
1101 | { |
1102 | int ret; |
1103 | struct user_za_header header; |
1104 | unsigned int vq; |
1105 | unsigned long start, end; |
1106 | |
1107 | if (!system_supports_sme()) |
1108 | return -EINVAL; |
1109 | |
1110 | /* Header */ |
1111 | if (count < sizeof(header)) |
1112 | return -EINVAL; |
1113 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header, |
1114 | 0, sizeof(header)); |
1115 | if (ret) |
1116 | goto out; |
1117 | |
1118 | /* |
1119 | * All current ZA_PT_* flags are consumed by |
1120 | * vec_set_vector_length(), which will also validate them for |
1121 | * us: |
1122 | */ |
1123 | ret = vec_set_vector_length(target, ARM64_VEC_SME, header.vl, |
1124 | ((unsigned long)header.flags) << 16); |
1125 | if (ret) |
1126 | goto out; |
1127 | |
1128 | /* Actual VL set may be less than the user asked for: */ |
1129 | vq = sve_vq_from_vl(task_get_sme_vl(target)); |
1130 | |
1131 | /* Ensure there is some SVE storage for streaming mode */ |
1132 | if (!target->thread.sve_state) { |
1133 | sve_alloc(target, false); |
1134 | if (!target->thread.sve_state) { |
1135 | ret = -ENOMEM; |
1136 | goto out; |
1137 | } |
1138 | } |
1139 | |
1140 | /* |
1141 | * Only flush the storage if PSTATE.ZA was not already set, |
1142 | * otherwise preserve any existing data. |
1143 | */ |
1144 | sme_alloc(target, !thread_za_enabled(&target->thread)); |
1145 | if (!target->thread.sme_state) |
1146 | return -ENOMEM; |
1147 | |
1148 | /* If there is no data then disable ZA */ |
1149 | if (!count) { |
1150 | target->thread.svcr &= ~SVCR_ZA_MASK; |
1151 | goto out; |
1152 | } |
1153 | |
1154 | /* |
1155 | * If setting a different VL from the requested VL and there is |
1156 | * register data, the data layout will be wrong: don't even |
1157 | * try to set the registers in this case. |
1158 | */ |
1159 | if (vq != sve_vq_from_vl(header.vl)) { |
1160 | ret = -EIO; |
1161 | goto out; |
1162 | } |
1163 | |
1164 | BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header)); |
1165 | start = ZA_PT_ZA_OFFSET; |
1166 | end = ZA_PT_SIZE(vq); |
1167 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
1168 | target->thread.sme_state, |
1169 | start, end); |
1170 | if (ret) |
1171 | goto out; |
1172 | |
1173 | /* Mark ZA as active and let userspace use it */ |
1174 | set_tsk_thread_flag(target, TIF_SME); |
1175 | target->thread.svcr |= SVCR_ZA_MASK; |
1176 | |
1177 | out: |
1178 | fpsimd_flush_task_state(target); |
1179 | return ret; |
1180 | } |
1181 | |
1182 | static int zt_get(struct task_struct *target, |
1183 | const struct user_regset *regset, |
1184 | struct membuf to) |
1185 | { |
1186 | if (!system_supports_sme2()) |
1187 | return -EINVAL; |
1188 | |
1189 | /* |
1190 | * If PSTATE.ZA is not set then ZT will be zeroed when it is |
1191 | * enabled so report the current register value as zero. |
1192 | */ |
1193 | if (thread_za_enabled(&target->thread)) |
1194 | membuf_write(&to, thread_zt_state(&target->thread), |
1195 | ZT_SIG_REG_BYTES); |
1196 | else |
1197 | membuf_zero(&to, ZT_SIG_REG_BYTES); |
1198 | |
1199 | return 0; |
1200 | } |
1201 | |
1202 | static int zt_set(struct task_struct *target, |
1203 | const struct user_regset *regset, |
1204 | unsigned int pos, unsigned int count, |
1205 | const void *kbuf, const void __user *ubuf) |
1206 | { |
1207 | int ret; |
1208 | |
1209 | if (!system_supports_sme2()) |
1210 | return -EINVAL; |
1211 | |
1212 | /* Ensure SVE storage in case this is first use of SME */ |
1213 | sve_alloc(target, false); |
1214 | if (!target->thread.sve_state) |
1215 | return -ENOMEM; |
1216 | |
1217 | if (!thread_za_enabled(&target->thread)) { |
1218 | sme_alloc(target, true); |
1219 | if (!target->thread.sme_state) |
1220 | return -ENOMEM; |
1221 | } |
1222 | |
1223 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
1224 | thread_zt_state(&target->thread), |
1225 | 0, ZT_SIG_REG_BYTES); |
1226 | if (ret == 0) { |
1227 | target->thread.svcr |= SVCR_ZA_MASK; |
1228 | set_tsk_thread_flag(target, TIF_SME); |
1229 | } |
1230 | |
1231 | fpsimd_flush_task_state(target); |
1232 | |
1233 | return ret; |
1234 | } |
1235 | |
1236 | #endif /* CONFIG_ARM64_SME */ |
1237 | |
1238 | #ifdef CONFIG_ARM64_PTR_AUTH |
1239 | static int pac_mask_get(struct task_struct *target, |
1240 | const struct user_regset *regset, |
1241 | struct membuf to) |
1242 | { |
1243 | /* |
1244 | * The PAC bits can differ across data and instruction pointers |
1245 | * depending on TCR_EL1.TBID*, which we may make use of in future, so |
1246 | * we expose separate masks. |
1247 | */ |
1248 | unsigned long mask = ptrauth_user_pac_mask(); |
1249 | struct user_pac_mask uregs = { |
1250 | .data_mask = mask, |
1251 | .insn_mask = mask, |
1252 | }; |
1253 | |
1254 | if (!system_supports_address_auth()) |
1255 | return -EINVAL; |
1256 | |
1257 | return membuf_write(&to, &uregs, sizeof(uregs)); |
1258 | } |
1259 | |
1260 | static int pac_enabled_keys_get(struct task_struct *target, |
1261 | const struct user_regset *regset, |
1262 | struct membuf to) |
1263 | { |
1264 | long enabled_keys = ptrauth_get_enabled_keys(target); |
1265 | |
1266 | if (IS_ERR_VALUE(enabled_keys)) |
1267 | return enabled_keys; |
1268 | |
1269 | return membuf_write(&to, &enabled_keys, sizeof(enabled_keys)); |
1270 | } |
1271 | |
1272 | static int pac_enabled_keys_set(struct task_struct *target, |
1273 | const struct user_regset *regset, |
1274 | unsigned int pos, unsigned int count, |
1275 | const void *kbuf, const void __user *ubuf) |
1276 | { |
1277 | int ret; |
1278 | long enabled_keys = ptrauth_get_enabled_keys(target); |
1279 | |
1280 | if (IS_ERR_VALUE(enabled_keys)) |
1281 | return enabled_keys; |
1282 | |
1283 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &enabled_keys, 0, |
1284 | sizeof(long)); |
1285 | if (ret) |
1286 | return ret; |
1287 | |
1288 | return ptrauth_set_enabled_keys(target, PR_PAC_ENABLED_KEYS_MASK, |
1289 | enabled_keys); |
1290 | } |
1291 | |
1292 | #ifdef CONFIG_CHECKPOINT_RESTORE |
1293 | static __uint128_t pac_key_to_user(const struct ptrauth_key *key) |
1294 | { |
1295 | return (__uint128_t)key->hi << 64 | key->lo; |
1296 | } |
1297 | |
1298 | static struct ptrauth_key pac_key_from_user(__uint128_t ukey) |
1299 | { |
1300 | struct ptrauth_key key = { |
1301 | .lo = (unsigned long)ukey, |
1302 | .hi = (unsigned long)(ukey >> 64), |
1303 | }; |
1304 | |
1305 | return key; |
1306 | } |
1307 | |
1308 | static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys, |
1309 | const struct ptrauth_keys_user *keys) |
1310 | { |
1311 | ukeys->apiakey = pac_key_to_user(&keys->apia); |
1312 | ukeys->apibkey = pac_key_to_user(&keys->apib); |
1313 | ukeys->apdakey = pac_key_to_user(&keys->apda); |
1314 | ukeys->apdbkey = pac_key_to_user(&keys->apdb); |
1315 | } |
1316 | |
1317 | static void pac_address_keys_from_user(struct ptrauth_keys_user *keys, |
1318 | const struct user_pac_address_keys *ukeys) |
1319 | { |
1320 | keys->apia = pac_key_from_user(ukeys->apiakey); |
1321 | keys->apib = pac_key_from_user(ukeys->apibkey); |
1322 | keys->apda = pac_key_from_user(ukeys->apdakey); |
1323 | keys->apdb = pac_key_from_user(ukeys->apdbkey); |
1324 | } |
1325 | |
1326 | static int pac_address_keys_get(struct task_struct *target, |
1327 | const struct user_regset *regset, |
1328 | struct membuf to) |
1329 | { |
1330 | struct ptrauth_keys_user *keys = &target->thread.keys_user; |
1331 | struct user_pac_address_keys user_keys; |
1332 | |
1333 | if (!system_supports_address_auth()) |
1334 | return -EINVAL; |
1335 | |
1336 | pac_address_keys_to_user(&user_keys, keys); |
1337 | |
1338 | return membuf_write(&to, &user_keys, sizeof(user_keys)); |
1339 | } |
1340 | |
1341 | static int pac_address_keys_set(struct task_struct *target, |
1342 | const struct user_regset *regset, |
1343 | unsigned int pos, unsigned int count, |
1344 | const void *kbuf, const void __user *ubuf) |
1345 | { |
1346 | struct ptrauth_keys_user *keys = &target->thread.keys_user; |
1347 | struct user_pac_address_keys user_keys; |
1348 | int ret; |
1349 | |
1350 | if (!system_supports_address_auth()) |
1351 | return -EINVAL; |
1352 | |
1353 | pac_address_keys_to_user(&user_keys, keys); |
1354 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
1355 | &user_keys, 0, -1); |
1356 | if (ret) |
1357 | return ret; |
1358 | pac_address_keys_from_user(keys, &user_keys); |
1359 | |
1360 | return 0; |
1361 | } |
1362 | |
1363 | static void pac_generic_keys_to_user(struct user_pac_generic_keys *ukeys, |
1364 | const struct ptrauth_keys_user *keys) |
1365 | { |
1366 | ukeys->apgakey = pac_key_to_user(&keys->apga); |
1367 | } |
1368 | |
1369 | static void pac_generic_keys_from_user(struct ptrauth_keys_user *keys, |
1370 | const struct user_pac_generic_keys *ukeys) |
1371 | { |
1372 | keys->apga = pac_key_from_user(ukeys->apgakey); |
1373 | } |
1374 | |
1375 | static int pac_generic_keys_get(struct task_struct *target, |
1376 | const struct user_regset *regset, |
1377 | struct membuf to) |
1378 | { |
1379 | struct ptrauth_keys_user *keys = &target->thread.keys_user; |
1380 | struct user_pac_generic_keys user_keys; |
1381 | |
1382 | if (!system_supports_generic_auth()) |
1383 | return -EINVAL; |
1384 | |
1385 | pac_generic_keys_to_user(&user_keys, keys); |
1386 | |
1387 | return membuf_write(&to, &user_keys, sizeof(user_keys)); |
1388 | } |
1389 | |
1390 | static int pac_generic_keys_set(struct task_struct *target, |
1391 | const struct user_regset *regset, |
1392 | unsigned int pos, unsigned int count, |
1393 | const void *kbuf, const void __user *ubuf) |
1394 | { |
1395 | struct ptrauth_keys_user *keys = &target->thread.keys_user; |
1396 | struct user_pac_generic_keys user_keys; |
1397 | int ret; |
1398 | |
1399 | if (!system_supports_generic_auth()) |
1400 | return -EINVAL; |
1401 | |
1402 | pac_generic_keys_to_user(&user_keys, keys); |
1403 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
1404 | &user_keys, 0, -1); |
1405 | if (ret) |
1406 | return ret; |
1407 | pac_generic_keys_from_user(keys, &user_keys); |
1408 | |
1409 | return 0; |
1410 | } |
1411 | #endif /* CONFIG_CHECKPOINT_RESTORE */ |
1412 | #endif /* CONFIG_ARM64_PTR_AUTH */ |
1413 | |
1414 | #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI |
1415 | static int tagged_addr_ctrl_get(struct task_struct *target, |
1416 | const struct user_regset *regset, |
1417 | struct membuf to) |
1418 | { |
1419 | long ctrl = get_tagged_addr_ctrl(target); |
1420 | |
1421 | if (IS_ERR_VALUE(ctrl)) |
1422 | return ctrl; |
1423 | |
1424 | return membuf_write(&to, &ctrl, sizeof(ctrl)); |
1425 | } |
1426 | |
1427 | static int tagged_addr_ctrl_set(struct task_struct *target, const struct |
1428 | user_regset *regset, unsigned int pos, |
1429 | unsigned int count, const void *kbuf, const |
1430 | void __user *ubuf) |
1431 | { |
1432 | int ret; |
1433 | long ctrl; |
1434 | |
1435 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1); |
1436 | if (ret) |
1437 | return ret; |
1438 | |
1439 | return set_tagged_addr_ctrl(target, ctrl); |
1440 | } |
1441 | #endif |
1442 | |
1443 | enum aarch64_regset { |
1444 | REGSET_GPR, |
1445 | REGSET_FPR, |
1446 | REGSET_TLS, |
1447 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
1448 | REGSET_HW_BREAK, |
1449 | REGSET_HW_WATCH, |
1450 | #endif |
1451 | REGSET_FPMR, |
1452 | REGSET_SYSTEM_CALL, |
1453 | #ifdef CONFIG_ARM64_SVE |
1454 | REGSET_SVE, |
1455 | #endif |
1456 | #ifdef CONFIG_ARM64_SME |
1457 | REGSET_SSVE, |
1458 | REGSET_ZA, |
1459 | REGSET_ZT, |
1460 | #endif |
1461 | #ifdef CONFIG_ARM64_PTR_AUTH |
1462 | REGSET_PAC_MASK, |
1463 | REGSET_PAC_ENABLED_KEYS, |
1464 | #ifdef CONFIG_CHECKPOINT_RESTORE |
1465 | REGSET_PACA_KEYS, |
1466 | REGSET_PACG_KEYS, |
1467 | #endif |
1468 | #endif |
1469 | #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI |
1470 | REGSET_TAGGED_ADDR_CTRL, |
1471 | #endif |
1472 | }; |
1473 | |
1474 | static const struct user_regset aarch64_regsets[] = { |
1475 | [REGSET_GPR] = { |
1476 | .core_note_type = NT_PRSTATUS, |
1477 | .n = sizeof(struct user_pt_regs) / sizeof(u64), |
1478 | .size = sizeof(u64), |
1479 | .align = sizeof(u64), |
1480 | .regset_get = gpr_get, |
1481 | .set = gpr_set |
1482 | }, |
1483 | [REGSET_FPR] = { |
1484 | .core_note_type = NT_PRFPREG, |
1485 | .n = sizeof(struct user_fpsimd_state) / sizeof(u32), |
1486 | /* |
1487 | * We pretend we have 32-bit registers because the fpsr and |
1488 | * fpcr are 32-bits wide. |
1489 | */ |
1490 | .size = sizeof(u32), |
1491 | .align = sizeof(u32), |
1492 | .active = fpr_active, |
1493 | .regset_get = fpr_get, |
1494 | .set = fpr_set |
1495 | }, |
1496 | [REGSET_TLS] = { |
1497 | .core_note_type = NT_ARM_TLS, |
1498 | .n = 2, |
1499 | .size = sizeof(void *), |
1500 | .align = sizeof(void *), |
1501 | .regset_get = tls_get, |
1502 | .set = tls_set, |
1503 | }, |
1504 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
1505 | [REGSET_HW_BREAK] = { |
1506 | .core_note_type = NT_ARM_HW_BREAK, |
1507 | .n = sizeof(struct user_hwdebug_state) / sizeof(u32), |
1508 | .size = sizeof(u32), |
1509 | .align = sizeof(u32), |
1510 | .regset_get = hw_break_get, |
1511 | .set = hw_break_set, |
1512 | }, |
1513 | [REGSET_HW_WATCH] = { |
1514 | .core_note_type = NT_ARM_HW_WATCH, |
1515 | .n = sizeof(struct user_hwdebug_state) / sizeof(u32), |
1516 | .size = sizeof(u32), |
1517 | .align = sizeof(u32), |
1518 | .regset_get = hw_break_get, |
1519 | .set = hw_break_set, |
1520 | }, |
1521 | #endif |
1522 | [REGSET_SYSTEM_CALL] = { |
1523 | .core_note_type = NT_ARM_SYSTEM_CALL, |
1524 | .n = 1, |
1525 | .size = sizeof(int), |
1526 | .align = sizeof(int), |
1527 | .regset_get = system_call_get, |
1528 | .set = system_call_set, |
1529 | }, |
1530 | [REGSET_FPMR] = { |
1531 | .core_note_type = NT_ARM_FPMR, |
1532 | .n = 1, |
1533 | .size = sizeof(u64), |
1534 | .align = sizeof(u64), |
1535 | .regset_get = fpmr_get, |
1536 | .set = fpmr_set, |
1537 | }, |
1538 | #ifdef CONFIG_ARM64_SVE |
1539 | [REGSET_SVE] = { /* Scalable Vector Extension */ |
1540 | .core_note_type = NT_ARM_SVE, |
1541 | .n = DIV_ROUND_UP(SVE_PT_SIZE(ARCH_SVE_VQ_MAX, |
1542 | SVE_PT_REGS_SVE), |
1543 | SVE_VQ_BYTES), |
1544 | .size = SVE_VQ_BYTES, |
1545 | .align = SVE_VQ_BYTES, |
1546 | .regset_get = sve_get, |
1547 | .set = sve_set, |
1548 | }, |
1549 | #endif |
1550 | #ifdef CONFIG_ARM64_SME |
1551 | [REGSET_SSVE] = { /* Streaming mode SVE */ |
1552 | .core_note_type = NT_ARM_SSVE, |
1553 | .n = DIV_ROUND_UP(SVE_PT_SIZE(SME_VQ_MAX, SVE_PT_REGS_SVE), |
1554 | SVE_VQ_BYTES), |
1555 | .size = SVE_VQ_BYTES, |
1556 | .align = SVE_VQ_BYTES, |
1557 | .regset_get = ssve_get, |
1558 | .set = ssve_set, |
1559 | }, |
1560 | [REGSET_ZA] = { /* SME ZA */ |
1561 | .core_note_type = NT_ARM_ZA, |
1562 | /* |
1563 | * ZA is a single register but it's variably sized and |
1564 | * the ptrace core requires that the size of any data |
1565 | * be an exact multiple of the configured register |
1566 | * size so report as though we had SVE_VQ_BYTES |
1567 | * registers. These values aren't exposed to |
1568 | * userspace. |
1569 | */ |
1570 | .n = DIV_ROUND_UP(ZA_PT_SIZE(SME_VQ_MAX), SVE_VQ_BYTES), |
1571 | .size = SVE_VQ_BYTES, |
1572 | .align = SVE_VQ_BYTES, |
1573 | .regset_get = za_get, |
1574 | .set = za_set, |
1575 | }, |
1576 | [REGSET_ZT] = { /* SME ZT */ |
1577 | .core_note_type = NT_ARM_ZT, |
1578 | .n = 1, |
1579 | .size = ZT_SIG_REG_BYTES, |
1580 | .align = sizeof(u64), |
1581 | .regset_get = zt_get, |
1582 | .set = zt_set, |
1583 | }, |
1584 | #endif |
1585 | #ifdef CONFIG_ARM64_PTR_AUTH |
1586 | [REGSET_PAC_MASK] = { |
1587 | .core_note_type = NT_ARM_PAC_MASK, |
1588 | .n = sizeof(struct user_pac_mask) / sizeof(u64), |
1589 | .size = sizeof(u64), |
1590 | .align = sizeof(u64), |
1591 | .regset_get = pac_mask_get, |
1592 | /* this cannot be set dynamically */ |
1593 | }, |
1594 | [REGSET_PAC_ENABLED_KEYS] = { |
1595 | .core_note_type = NT_ARM_PAC_ENABLED_KEYS, |
1596 | .n = 1, |
1597 | .size = sizeof(long), |
1598 | .align = sizeof(long), |
1599 | .regset_get = pac_enabled_keys_get, |
1600 | .set = pac_enabled_keys_set, |
1601 | }, |
1602 | #ifdef CONFIG_CHECKPOINT_RESTORE |
1603 | [REGSET_PACA_KEYS] = { |
1604 | .core_note_type = NT_ARM_PACA_KEYS, |
1605 | .n = sizeof(struct user_pac_address_keys) / sizeof(__uint128_t), |
1606 | .size = sizeof(__uint128_t), |
1607 | .align = sizeof(__uint128_t), |
1608 | .regset_get = pac_address_keys_get, |
1609 | .set = pac_address_keys_set, |
1610 | }, |
1611 | [REGSET_PACG_KEYS] = { |
1612 | .core_note_type = NT_ARM_PACG_KEYS, |
1613 | .n = sizeof(struct user_pac_generic_keys) / sizeof(__uint128_t), |
1614 | .size = sizeof(__uint128_t), |
1615 | .align = sizeof(__uint128_t), |
1616 | .regset_get = pac_generic_keys_get, |
1617 | .set = pac_generic_keys_set, |
1618 | }, |
1619 | #endif |
1620 | #endif |
1621 | #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI |
1622 | [REGSET_TAGGED_ADDR_CTRL] = { |
1623 | .core_note_type = NT_ARM_TAGGED_ADDR_CTRL, |
1624 | .n = 1, |
1625 | .size = sizeof(long), |
1626 | .align = sizeof(long), |
1627 | .regset_get = tagged_addr_ctrl_get, |
1628 | .set = tagged_addr_ctrl_set, |
1629 | }, |
1630 | #endif |
1631 | }; |
1632 | |
1633 | static const struct user_regset_view user_aarch64_view = { |
1634 | .name = "aarch64" , .e_machine = EM_AARCH64, |
1635 | .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets) |
1636 | }; |
1637 | |
1638 | enum compat_regset { |
1639 | REGSET_COMPAT_GPR, |
1640 | REGSET_COMPAT_VFP, |
1641 | }; |
1642 | |
1643 | static inline compat_ulong_t compat_get_user_reg(struct task_struct *task, int idx) |
1644 | { |
1645 | struct pt_regs *regs = task_pt_regs(task); |
1646 | |
1647 | switch (idx) { |
1648 | case 15: |
1649 | return regs->pc; |
1650 | case 16: |
1651 | return pstate_to_compat_psr(regs->pstate); |
1652 | case 17: |
1653 | return regs->orig_x0; |
1654 | default: |
1655 | return regs->regs[idx]; |
1656 | } |
1657 | } |
1658 | |
1659 | static int compat_gpr_get(struct task_struct *target, |
1660 | const struct user_regset *regset, |
1661 | struct membuf to) |
1662 | { |
1663 | int i = 0; |
1664 | |
1665 | while (to.left) |
1666 | membuf_store(&to, compat_get_user_reg(target, i++)); |
1667 | return 0; |
1668 | } |
1669 | |
1670 | static int compat_gpr_set(struct task_struct *target, |
1671 | const struct user_regset *regset, |
1672 | unsigned int pos, unsigned int count, |
1673 | const void *kbuf, const void __user *ubuf) |
1674 | { |
1675 | struct pt_regs newregs; |
1676 | int ret = 0; |
1677 | unsigned int i, start, num_regs; |
1678 | |
1679 | /* Calculate the number of AArch32 registers contained in count */ |
1680 | num_regs = count / regset->size; |
1681 | |
1682 | /* Convert pos into an register number */ |
1683 | start = pos / regset->size; |
1684 | |
1685 | if (start + num_regs > regset->n) |
1686 | return -EIO; |
1687 | |
1688 | newregs = *task_pt_regs(target); |
1689 | |
1690 | for (i = 0; i < num_regs; ++i) { |
1691 | unsigned int idx = start + i; |
1692 | compat_ulong_t reg; |
1693 | |
1694 | if (kbuf) { |
1695 | memcpy(®, kbuf, sizeof(reg)); |
1696 | kbuf += sizeof(reg); |
1697 | } else { |
1698 | ret = copy_from_user(to: ®, from: ubuf, n: sizeof(reg)); |
1699 | if (ret) { |
1700 | ret = -EFAULT; |
1701 | break; |
1702 | } |
1703 | |
1704 | ubuf += sizeof(reg); |
1705 | } |
1706 | |
1707 | switch (idx) { |
1708 | case 15: |
1709 | newregs.pc = reg; |
1710 | break; |
1711 | case 16: |
1712 | reg = compat_psr_to_pstate(reg); |
1713 | newregs.pstate = reg; |
1714 | break; |
1715 | case 17: |
1716 | newregs.orig_x0 = reg; |
1717 | break; |
1718 | default: |
1719 | newregs.regs[idx] = reg; |
1720 | } |
1721 | |
1722 | } |
1723 | |
1724 | if (valid_user_regs(&newregs.user_regs, target)) |
1725 | *task_pt_regs(target) = newregs; |
1726 | else |
1727 | ret = -EINVAL; |
1728 | |
1729 | return ret; |
1730 | } |
1731 | |
1732 | static int compat_vfp_get(struct task_struct *target, |
1733 | const struct user_regset *regset, |
1734 | struct membuf to) |
1735 | { |
1736 | struct user_fpsimd_state *uregs; |
1737 | compat_ulong_t fpscr; |
1738 | |
1739 | if (!system_supports_fpsimd()) |
1740 | return -EINVAL; |
1741 | |
1742 | uregs = &target->thread.uw.fpsimd_state; |
1743 | |
1744 | if (target == current) |
1745 | fpsimd_preserve_current_state(); |
1746 | |
1747 | /* |
1748 | * The VFP registers are packed into the fpsimd_state, so they all sit |
1749 | * nicely together for us. We just need to create the fpscr separately. |
1750 | */ |
1751 | membuf_write(&to, uregs, VFP_STATE_SIZE - sizeof(compat_ulong_t)); |
1752 | fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) | |
1753 | (uregs->fpcr & VFP_FPSCR_CTRL_MASK); |
1754 | return membuf_store(&to, fpscr); |
1755 | } |
1756 | |
1757 | static int compat_vfp_set(struct task_struct *target, |
1758 | const struct user_regset *regset, |
1759 | unsigned int pos, unsigned int count, |
1760 | const void *kbuf, const void __user *ubuf) |
1761 | { |
1762 | struct user_fpsimd_state *uregs; |
1763 | compat_ulong_t fpscr; |
1764 | int ret, vregs_end_pos; |
1765 | |
1766 | if (!system_supports_fpsimd()) |
1767 | return -EINVAL; |
1768 | |
1769 | uregs = &target->thread.uw.fpsimd_state; |
1770 | |
1771 | vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t); |
1772 | ret = user_regset_copyin(pos: &pos, count: &count, kbuf: &kbuf, ubuf: &ubuf, data: uregs, start_pos: 0, |
1773 | end_pos: vregs_end_pos); |
1774 | |
1775 | if (count && !ret) { |
1776 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpscr, |
1777 | vregs_end_pos, VFP_STATE_SIZE); |
1778 | if (!ret) { |
1779 | uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK; |
1780 | uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK; |
1781 | } |
1782 | } |
1783 | |
1784 | fpsimd_flush_task_state(target); |
1785 | return ret; |
1786 | } |
1787 | |
1788 | static int compat_tls_get(struct task_struct *target, |
1789 | const struct user_regset *regset, |
1790 | struct membuf to) |
1791 | { |
1792 | return membuf_store(&to, (compat_ulong_t)target->thread.uw.tp_value); |
1793 | } |
1794 | |
1795 | static int compat_tls_set(struct task_struct *target, |
1796 | const struct user_regset *regset, unsigned int pos, |
1797 | unsigned int count, const void *kbuf, |
1798 | const void __user *ubuf) |
1799 | { |
1800 | int ret; |
1801 | compat_ulong_t tls = target->thread.uw.tp_value; |
1802 | |
1803 | ret = user_regset_copyin(pos: &pos, count: &count, kbuf: &kbuf, ubuf: &ubuf, data: &tls, start_pos: 0, end_pos: -1); |
1804 | if (ret) |
1805 | return ret; |
1806 | |
1807 | target->thread.uw.tp_value = tls; |
1808 | return ret; |
1809 | } |
1810 | |
1811 | static const struct user_regset aarch32_regsets[] = { |
1812 | [REGSET_COMPAT_GPR] = { |
1813 | .core_note_type = NT_PRSTATUS, |
1814 | .n = COMPAT_ELF_NGREG, |
1815 | .size = sizeof(compat_elf_greg_t), |
1816 | .align = sizeof(compat_elf_greg_t), |
1817 | .regset_get = compat_gpr_get, |
1818 | .set = compat_gpr_set |
1819 | }, |
1820 | [REGSET_COMPAT_VFP] = { |
1821 | .core_note_type = NT_ARM_VFP, |
1822 | .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), |
1823 | .size = sizeof(compat_ulong_t), |
1824 | .align = sizeof(compat_ulong_t), |
1825 | .active = fpr_active, |
1826 | .regset_get = compat_vfp_get, |
1827 | .set = compat_vfp_set |
1828 | }, |
1829 | }; |
1830 | |
1831 | static const struct user_regset_view user_aarch32_view = { |
1832 | .name = "aarch32" , .e_machine = EM_ARM, |
1833 | .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets) |
1834 | }; |
1835 | |
1836 | static const struct user_regset aarch32_ptrace_regsets[] = { |
1837 | [REGSET_GPR] = { |
1838 | .core_note_type = NT_PRSTATUS, |
1839 | .n = COMPAT_ELF_NGREG, |
1840 | .size = sizeof(compat_elf_greg_t), |
1841 | .align = sizeof(compat_elf_greg_t), |
1842 | .regset_get = compat_gpr_get, |
1843 | .set = compat_gpr_set |
1844 | }, |
1845 | [REGSET_FPR] = { |
1846 | .core_note_type = NT_ARM_VFP, |
1847 | .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), |
1848 | .size = sizeof(compat_ulong_t), |
1849 | .align = sizeof(compat_ulong_t), |
1850 | .regset_get = compat_vfp_get, |
1851 | .set = compat_vfp_set |
1852 | }, |
1853 | [REGSET_TLS] = { |
1854 | .core_note_type = NT_ARM_TLS, |
1855 | .n = 1, |
1856 | .size = sizeof(compat_ulong_t), |
1857 | .align = sizeof(compat_ulong_t), |
1858 | .regset_get = compat_tls_get, |
1859 | .set = compat_tls_set, |
1860 | }, |
1861 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
1862 | [REGSET_HW_BREAK] = { |
1863 | .core_note_type = NT_ARM_HW_BREAK, |
1864 | .n = sizeof(struct user_hwdebug_state) / sizeof(u32), |
1865 | .size = sizeof(u32), |
1866 | .align = sizeof(u32), |
1867 | .regset_get = hw_break_get, |
1868 | .set = hw_break_set, |
1869 | }, |
1870 | [REGSET_HW_WATCH] = { |
1871 | .core_note_type = NT_ARM_HW_WATCH, |
1872 | .n = sizeof(struct user_hwdebug_state) / sizeof(u32), |
1873 | .size = sizeof(u32), |
1874 | .align = sizeof(u32), |
1875 | .regset_get = hw_break_get, |
1876 | .set = hw_break_set, |
1877 | }, |
1878 | #endif |
1879 | [REGSET_SYSTEM_CALL] = { |
1880 | .core_note_type = NT_ARM_SYSTEM_CALL, |
1881 | .n = 1, |
1882 | .size = sizeof(int), |
1883 | .align = sizeof(int), |
1884 | .regset_get = system_call_get, |
1885 | .set = system_call_set, |
1886 | }, |
1887 | }; |
1888 | |
1889 | static const struct user_regset_view user_aarch32_ptrace_view = { |
1890 | .name = "aarch32" , .e_machine = EM_ARM, |
1891 | .regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets) |
1892 | }; |
1893 | |
1894 | #ifdef CONFIG_COMPAT |
1895 | static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off, |
1896 | compat_ulong_t __user *ret) |
1897 | { |
1898 | compat_ulong_t tmp; |
1899 | |
1900 | if (off & 3) |
1901 | return -EIO; |
1902 | |
1903 | if (off == COMPAT_PT_TEXT_ADDR) |
1904 | tmp = tsk->mm->start_code; |
1905 | else if (off == COMPAT_PT_DATA_ADDR) |
1906 | tmp = tsk->mm->start_data; |
1907 | else if (off == COMPAT_PT_TEXT_END_ADDR) |
1908 | tmp = tsk->mm->end_code; |
1909 | else if (off < sizeof(compat_elf_gregset_t)) |
1910 | tmp = compat_get_user_reg(task: tsk, idx: off >> 2); |
1911 | else if (off >= COMPAT_USER_SZ) |
1912 | return -EIO; |
1913 | else |
1914 | tmp = 0; |
1915 | |
1916 | return put_user(tmp, ret); |
1917 | } |
1918 | |
1919 | static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off, |
1920 | compat_ulong_t val) |
1921 | { |
1922 | struct pt_regs newregs = *task_pt_regs(tsk); |
1923 | unsigned int idx = off / 4; |
1924 | |
1925 | if (off & 3 || off >= COMPAT_USER_SZ) |
1926 | return -EIO; |
1927 | |
1928 | if (off >= sizeof(compat_elf_gregset_t)) |
1929 | return 0; |
1930 | |
1931 | switch (idx) { |
1932 | case 15: |
1933 | newregs.pc = val; |
1934 | break; |
1935 | case 16: |
1936 | newregs.pstate = compat_psr_to_pstate(val); |
1937 | break; |
1938 | case 17: |
1939 | newregs.orig_x0 = val; |
1940 | break; |
1941 | default: |
1942 | newregs.regs[idx] = val; |
1943 | } |
1944 | |
1945 | if (!valid_user_regs(&newregs.user_regs, tsk)) |
1946 | return -EINVAL; |
1947 | |
1948 | *task_pt_regs(tsk) = newregs; |
1949 | return 0; |
1950 | } |
1951 | |
1952 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
1953 | |
1954 | /* |
1955 | * Convert a virtual register number into an index for a thread_info |
1956 | * breakpoint array. Breakpoints are identified using positive numbers |
1957 | * whilst watchpoints are negative. The registers are laid out as pairs |
1958 | * of (address, control), each pair mapping to a unique hw_breakpoint struct. |
1959 | * Register 0 is reserved for describing resource information. |
1960 | */ |
1961 | static int compat_ptrace_hbp_num_to_idx(compat_long_t num) |
1962 | { |
1963 | return (abs(num) - 1) >> 1; |
1964 | } |
1965 | |
1966 | static int compat_ptrace_hbp_get_resource_info(u32 *kdata) |
1967 | { |
1968 | u8 num_brps, num_wrps, debug_arch, wp_len; |
1969 | u32 reg = 0; |
1970 | |
1971 | num_brps = hw_breakpoint_slots(TYPE_INST); |
1972 | num_wrps = hw_breakpoint_slots(TYPE_DATA); |
1973 | |
1974 | debug_arch = debug_monitors_arch(); |
1975 | wp_len = 8; |
1976 | reg |= debug_arch; |
1977 | reg <<= 8; |
1978 | reg |= wp_len; |
1979 | reg <<= 8; |
1980 | reg |= num_wrps; |
1981 | reg <<= 8; |
1982 | reg |= num_brps; |
1983 | |
1984 | *kdata = reg; |
1985 | return 0; |
1986 | } |
1987 | |
1988 | static int compat_ptrace_hbp_get(unsigned int note_type, |
1989 | struct task_struct *tsk, |
1990 | compat_long_t num, |
1991 | u32 *kdata) |
1992 | { |
1993 | u64 addr = 0; |
1994 | u32 ctrl = 0; |
1995 | |
1996 | int err, idx = compat_ptrace_hbp_num_to_idx(num); |
1997 | |
1998 | if (num & 1) { |
1999 | err = ptrace_hbp_get_addr(note_type, tsk, idx, addr: &addr); |
2000 | *kdata = (u32)addr; |
2001 | } else { |
2002 | err = ptrace_hbp_get_ctrl(note_type, tsk, idx, ctrl: &ctrl); |
2003 | *kdata = ctrl; |
2004 | } |
2005 | |
2006 | return err; |
2007 | } |
2008 | |
2009 | static int compat_ptrace_hbp_set(unsigned int note_type, |
2010 | struct task_struct *tsk, |
2011 | compat_long_t num, |
2012 | u32 *kdata) |
2013 | { |
2014 | u64 addr; |
2015 | u32 ctrl; |
2016 | |
2017 | int err, idx = compat_ptrace_hbp_num_to_idx(num); |
2018 | |
2019 | if (num & 1) { |
2020 | addr = *kdata; |
2021 | err = ptrace_hbp_set_addr(note_type, tsk, idx, addr); |
2022 | } else { |
2023 | ctrl = *kdata; |
2024 | err = ptrace_hbp_set_ctrl(note_type, tsk, idx, uctrl: ctrl); |
2025 | } |
2026 | |
2027 | return err; |
2028 | } |
2029 | |
2030 | static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num, |
2031 | compat_ulong_t __user *data) |
2032 | { |
2033 | int ret; |
2034 | u32 kdata; |
2035 | |
2036 | /* Watchpoint */ |
2037 | if (num < 0) { |
2038 | ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, kdata: &kdata); |
2039 | /* Resource info */ |
2040 | } else if (num == 0) { |
2041 | ret = compat_ptrace_hbp_get_resource_info(kdata: &kdata); |
2042 | /* Breakpoint */ |
2043 | } else { |
2044 | ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, kdata: &kdata); |
2045 | } |
2046 | |
2047 | if (!ret) |
2048 | ret = put_user(kdata, data); |
2049 | |
2050 | return ret; |
2051 | } |
2052 | |
2053 | static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num, |
2054 | compat_ulong_t __user *data) |
2055 | { |
2056 | int ret; |
2057 | u32 kdata = 0; |
2058 | |
2059 | if (num == 0) |
2060 | return 0; |
2061 | |
2062 | ret = get_user(kdata, data); |
2063 | if (ret) |
2064 | return ret; |
2065 | |
2066 | if (num < 0) |
2067 | ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, kdata: &kdata); |
2068 | else |
2069 | ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, kdata: &kdata); |
2070 | |
2071 | return ret; |
2072 | } |
2073 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ |
2074 | |
2075 | long compat_arch_ptrace(struct task_struct *child, compat_long_t request, |
2076 | compat_ulong_t caddr, compat_ulong_t cdata) |
2077 | { |
2078 | unsigned long addr = caddr; |
2079 | unsigned long data = cdata; |
2080 | void __user *datap = compat_ptr(uptr: data); |
2081 | int ret; |
2082 | |
2083 | switch (request) { |
2084 | case PTRACE_PEEKUSR: |
2085 | ret = compat_ptrace_read_user(tsk: child, off: addr, ret: datap); |
2086 | break; |
2087 | |
2088 | case PTRACE_POKEUSR: |
2089 | ret = compat_ptrace_write_user(tsk: child, off: addr, val: data); |
2090 | break; |
2091 | |
2092 | case COMPAT_PTRACE_GETREGS: |
2093 | ret = copy_regset_to_user(child, |
2094 | &user_aarch32_view, |
2095 | REGSET_COMPAT_GPR, |
2096 | 0, sizeof(compat_elf_gregset_t), |
2097 | datap); |
2098 | break; |
2099 | |
2100 | case COMPAT_PTRACE_SETREGS: |
2101 | ret = copy_regset_from_user(child, |
2102 | &user_aarch32_view, |
2103 | REGSET_COMPAT_GPR, |
2104 | 0, sizeof(compat_elf_gregset_t), |
2105 | datap); |
2106 | break; |
2107 | |
2108 | case COMPAT_PTRACE_GET_THREAD_AREA: |
2109 | ret = put_user((compat_ulong_t)child->thread.uw.tp_value, |
2110 | (compat_ulong_t __user *)datap); |
2111 | break; |
2112 | |
2113 | case COMPAT_PTRACE_SET_SYSCALL: |
2114 | task_pt_regs(child)->syscallno = data; |
2115 | ret = 0; |
2116 | break; |
2117 | |
2118 | case COMPAT_PTRACE_GETVFPREGS: |
2119 | ret = copy_regset_to_user(child, |
2120 | &user_aarch32_view, |
2121 | REGSET_COMPAT_VFP, |
2122 | 0, VFP_STATE_SIZE, |
2123 | datap); |
2124 | break; |
2125 | |
2126 | case COMPAT_PTRACE_SETVFPREGS: |
2127 | ret = copy_regset_from_user(child, |
2128 | &user_aarch32_view, |
2129 | REGSET_COMPAT_VFP, |
2130 | 0, VFP_STATE_SIZE, |
2131 | datap); |
2132 | break; |
2133 | |
2134 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
2135 | case COMPAT_PTRACE_GETHBPREGS: |
2136 | ret = compat_ptrace_gethbpregs(tsk: child, num: addr, data: datap); |
2137 | break; |
2138 | |
2139 | case COMPAT_PTRACE_SETHBPREGS: |
2140 | ret = compat_ptrace_sethbpregs(tsk: child, num: addr, data: datap); |
2141 | break; |
2142 | #endif |
2143 | |
2144 | default: |
2145 | ret = compat_ptrace_request(child, request, addr, |
2146 | data); |
2147 | break; |
2148 | } |
2149 | |
2150 | return ret; |
2151 | } |
2152 | #endif /* CONFIG_COMPAT */ |
2153 | |
2154 | const struct user_regset_view *task_user_regset_view(struct task_struct *task) |
2155 | { |
2156 | /* |
2157 | * Core dumping of 32-bit tasks or compat ptrace requests must use the |
2158 | * user_aarch32_view compatible with arm32. Native ptrace requests on |
2159 | * 32-bit children use an extended user_aarch32_ptrace_view to allow |
2160 | * access to the TLS register. |
2161 | */ |
2162 | if (is_compat_task()) |
2163 | return &user_aarch32_view; |
2164 | else if (is_compat_thread(task_thread_info(task))) |
2165 | return &user_aarch32_ptrace_view; |
2166 | |
2167 | return &user_aarch64_view; |
2168 | } |
2169 | |
2170 | long arch_ptrace(struct task_struct *child, long request, |
2171 | unsigned long addr, unsigned long data) |
2172 | { |
2173 | switch (request) { |
2174 | case PTRACE_PEEKMTETAGS: |
2175 | case PTRACE_POKEMTETAGS: |
2176 | return mte_ptrace_copy_tags(child, request, addr, data); |
2177 | } |
2178 | |
2179 | return ptrace_request(child, request, addr, data); |
2180 | } |
2181 | |
2182 | enum ptrace_syscall_dir { |
2183 | PTRACE_SYSCALL_ENTER = 0, |
2184 | PTRACE_SYSCALL_EXIT, |
2185 | }; |
2186 | |
2187 | static void report_syscall(struct pt_regs *regs, enum ptrace_syscall_dir dir) |
2188 | { |
2189 | int regno; |
2190 | unsigned long saved_reg; |
2191 | |
2192 | /* |
2193 | * We have some ABI weirdness here in the way that we handle syscall |
2194 | * exit stops because we indicate whether or not the stop has been |
2195 | * signalled from syscall entry or syscall exit by clobbering a general |
2196 | * purpose register (ip/r12 for AArch32, x7 for AArch64) in the tracee |
2197 | * and restoring its old value after the stop. This means that: |
2198 | * |
2199 | * - Any writes by the tracer to this register during the stop are |
2200 | * ignored/discarded. |
2201 | * |
2202 | * - The actual value of the register is not available during the stop, |
2203 | * so the tracer cannot save it and restore it later. |
2204 | * |
2205 | * - Syscall stops behave differently to seccomp and pseudo-step traps |
2206 | * (the latter do not nobble any registers). |
2207 | */ |
2208 | regno = (is_compat_task() ? 12 : 7); |
2209 | saved_reg = regs->regs[regno]; |
2210 | regs->regs[regno] = dir; |
2211 | |
2212 | if (dir == PTRACE_SYSCALL_ENTER) { |
2213 | if (ptrace_report_syscall_entry(regs)) |
2214 | forget_syscall(regs); |
2215 | regs->regs[regno] = saved_reg; |
2216 | } else if (!test_thread_flag(TIF_SINGLESTEP)) { |
2217 | ptrace_report_syscall_exit(regs, step: 0); |
2218 | regs->regs[regno] = saved_reg; |
2219 | } else { |
2220 | regs->regs[regno] = saved_reg; |
2221 | |
2222 | /* |
2223 | * Signal a pseudo-step exception since we are stepping but |
2224 | * tracer modifications to the registers may have rewound the |
2225 | * state machine. |
2226 | */ |
2227 | ptrace_report_syscall_exit(regs, step: 1); |
2228 | } |
2229 | } |
2230 | |
2231 | int syscall_trace_enter(struct pt_regs *regs) |
2232 | { |
2233 | unsigned long flags = read_thread_flags(); |
2234 | |
2235 | if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) { |
2236 | report_syscall(regs, dir: PTRACE_SYSCALL_ENTER); |
2237 | if (flags & _TIF_SYSCALL_EMU) |
2238 | return NO_SYSCALL; |
2239 | } |
2240 | |
2241 | /* Do the secure computing after ptrace; failures should be fast. */ |
2242 | if (secure_computing() == -1) |
2243 | return NO_SYSCALL; |
2244 | |
2245 | if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) |
2246 | trace_sys_enter(regs, id: regs->syscallno); |
2247 | |
2248 | audit_syscall_entry(major: regs->syscallno, a0: regs->orig_x0, a1: regs->regs[1], |
2249 | a2: regs->regs[2], a3: regs->regs[3]); |
2250 | |
2251 | return regs->syscallno; |
2252 | } |
2253 | |
2254 | void syscall_trace_exit(struct pt_regs *regs) |
2255 | { |
2256 | unsigned long flags = read_thread_flags(); |
2257 | |
2258 | audit_syscall_exit(pt_regs: regs); |
2259 | |
2260 | if (flags & _TIF_SYSCALL_TRACEPOINT) |
2261 | trace_sys_exit(regs, ret: syscall_get_return_value(current, regs)); |
2262 | |
2263 | if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP)) |
2264 | report_syscall(regs, dir: PTRACE_SYSCALL_EXIT); |
2265 | |
2266 | rseq_syscall(regs); |
2267 | } |
2268 | |
2269 | /* |
2270 | * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a. |
2271 | * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is |
2272 | * not described in ARM DDI 0487D.a. |
2273 | * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may |
2274 | * be allocated an EL0 meaning in future. |
2275 | * Userspace cannot use these until they have an architectural meaning. |
2276 | * Note that this follows the SPSR_ELx format, not the AArch32 PSR format. |
2277 | * We also reserve IL for the kernel; SS is handled dynamically. |
2278 | */ |
2279 | #define SPSR_EL1_AARCH64_RES0_BITS \ |
2280 | (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 26) | GENMASK_ULL(23, 22) | \ |
2281 | GENMASK_ULL(20, 13) | GENMASK_ULL(5, 5)) |
2282 | #define SPSR_EL1_AARCH32_RES0_BITS \ |
2283 | (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20)) |
2284 | |
2285 | static int valid_compat_regs(struct user_pt_regs *regs) |
2286 | { |
2287 | regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS; |
2288 | |
2289 | if (!system_supports_mixed_endian_el0()) { |
2290 | if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) |
2291 | regs->pstate |= PSR_AA32_E_BIT; |
2292 | else |
2293 | regs->pstate &= ~PSR_AA32_E_BIT; |
2294 | } |
2295 | |
2296 | if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) && |
2297 | (regs->pstate & PSR_AA32_A_BIT) == 0 && |
2298 | (regs->pstate & PSR_AA32_I_BIT) == 0 && |
2299 | (regs->pstate & PSR_AA32_F_BIT) == 0) { |
2300 | return 1; |
2301 | } |
2302 | |
2303 | /* |
2304 | * Force PSR to a valid 32-bit EL0t, preserving the same bits as |
2305 | * arch/arm. |
2306 | */ |
2307 | regs->pstate &= PSR_AA32_N_BIT | PSR_AA32_Z_BIT | |
2308 | PSR_AA32_C_BIT | PSR_AA32_V_BIT | |
2309 | PSR_AA32_Q_BIT | PSR_AA32_IT_MASK | |
2310 | PSR_AA32_GE_MASK | PSR_AA32_E_BIT | |
2311 | PSR_AA32_T_BIT; |
2312 | regs->pstate |= PSR_MODE32_BIT; |
2313 | |
2314 | return 0; |
2315 | } |
2316 | |
2317 | static int valid_native_regs(struct user_pt_regs *regs) |
2318 | { |
2319 | regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS; |
2320 | |
2321 | if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) && |
2322 | (regs->pstate & PSR_D_BIT) == 0 && |
2323 | (regs->pstate & PSR_A_BIT) == 0 && |
2324 | (regs->pstate & PSR_I_BIT) == 0 && |
2325 | (regs->pstate & PSR_F_BIT) == 0) { |
2326 | return 1; |
2327 | } |
2328 | |
2329 | /* Force PSR to a valid 64-bit EL0t */ |
2330 | regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT; |
2331 | |
2332 | return 0; |
2333 | } |
2334 | |
2335 | /* |
2336 | * Are the current registers suitable for user mode? (used to maintain |
2337 | * security in signal handlers) |
2338 | */ |
2339 | int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task) |
2340 | { |
2341 | /* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */ |
2342 | user_regs_reset_single_step(regs, task); |
2343 | |
2344 | if (is_compat_thread(task_thread_info(task))) |
2345 | return valid_compat_regs(regs); |
2346 | else |
2347 | return valid_native_regs(regs); |
2348 | } |
2349 | |