1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Based on arch/arm/kernel/process.c |
4 | * |
5 | * Original Copyright (C) 1995 Linus Torvalds |
6 | * Copyright (C) 1996-2000 Russell King - Converted to ARM. |
7 | * Copyright (C) 2012 ARM Ltd. |
8 | */ |
9 | #include <linux/compat.h> |
10 | #include <linux/efi.h> |
11 | #include <linux/elf.h> |
12 | #include <linux/export.h> |
13 | #include <linux/sched.h> |
14 | #include <linux/sched/debug.h> |
15 | #include <linux/sched/task.h> |
16 | #include <linux/sched/task_stack.h> |
17 | #include <linux/kernel.h> |
18 | #include <linux/mman.h> |
19 | #include <linux/mm.h> |
20 | #include <linux/nospec.h> |
21 | #include <linux/stddef.h> |
22 | #include <linux/sysctl.h> |
23 | #include <linux/unistd.h> |
24 | #include <linux/user.h> |
25 | #include <linux/delay.h> |
26 | #include <linux/reboot.h> |
27 | #include <linux/interrupt.h> |
28 | #include <linux/init.h> |
29 | #include <linux/cpu.h> |
30 | #include <linux/elfcore.h> |
31 | #include <linux/pm.h> |
32 | #include <linux/tick.h> |
33 | #include <linux/utsname.h> |
34 | #include <linux/uaccess.h> |
35 | #include <linux/random.h> |
36 | #include <linux/hw_breakpoint.h> |
37 | #include <linux/personality.h> |
38 | #include <linux/notifier.h> |
39 | #include <trace/events/power.h> |
40 | #include <linux/percpu.h> |
41 | #include <linux/thread_info.h> |
42 | #include <linux/prctl.h> |
43 | #include <linux/stacktrace.h> |
44 | |
45 | #include <asm/alternative.h> |
46 | #include <asm/compat.h> |
47 | #include <asm/cpufeature.h> |
48 | #include <asm/cacheflush.h> |
49 | #include <asm/exec.h> |
50 | #include <asm/fpsimd.h> |
51 | #include <asm/mmu_context.h> |
52 | #include <asm/mte.h> |
53 | #include <asm/processor.h> |
54 | #include <asm/pointer_auth.h> |
55 | #include <asm/stacktrace.h> |
56 | #include <asm/switch_to.h> |
57 | #include <asm/system_misc.h> |
58 | |
59 | #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK) |
60 | #include <linux/stackprotector.h> |
61 | unsigned long __stack_chk_guard __ro_after_init; |
62 | EXPORT_SYMBOL(__stack_chk_guard); |
63 | #endif |
64 | |
65 | /* |
66 | * Function pointers to optional machine specific functions |
67 | */ |
68 | void (*pm_power_off)(void); |
69 | EXPORT_SYMBOL_GPL(pm_power_off); |
70 | |
71 | #ifdef CONFIG_HOTPLUG_CPU |
72 | void __noreturn arch_cpu_idle_dead(void) |
73 | { |
74 | cpu_die(); |
75 | } |
76 | #endif |
77 | |
78 | /* |
79 | * Called by kexec, immediately prior to machine_kexec(). |
80 | * |
81 | * This must completely disable all secondary CPUs; simply causing those CPUs |
82 | * to execute e.g. a RAM-based pin loop is not sufficient. This allows the |
83 | * kexec'd kernel to use any and all RAM as it sees fit, without having to |
84 | * avoid any code or data used by any SW CPU pin loop. The CPU hotplug |
85 | * functionality embodied in smpt_shutdown_nonboot_cpus() to achieve this. |
86 | */ |
87 | void machine_shutdown(void) |
88 | { |
89 | smp_shutdown_nonboot_cpus(primary_cpu: reboot_cpu); |
90 | } |
91 | |
92 | /* |
93 | * Halting simply requires that the secondary CPUs stop performing any |
94 | * activity (executing tasks, handling interrupts). smp_send_stop() |
95 | * achieves this. |
96 | */ |
97 | void machine_halt(void) |
98 | { |
99 | local_irq_disable(); |
100 | smp_send_stop(); |
101 | while (1); |
102 | } |
103 | |
104 | /* |
105 | * Power-off simply requires that the secondary CPUs stop performing any |
106 | * activity (executing tasks, handling interrupts). smp_send_stop() |
107 | * achieves this. When the system power is turned off, it will take all CPUs |
108 | * with it. |
109 | */ |
110 | void machine_power_off(void) |
111 | { |
112 | local_irq_disable(); |
113 | smp_send_stop(); |
114 | do_kernel_power_off(); |
115 | } |
116 | |
117 | /* |
118 | * Restart requires that the secondary CPUs stop performing any activity |
119 | * while the primary CPU resets the system. Systems with multiple CPUs must |
120 | * provide a HW restart implementation, to ensure that all CPUs reset at once. |
121 | * This is required so that any code running after reset on the primary CPU |
122 | * doesn't have to co-ordinate with other CPUs to ensure they aren't still |
123 | * executing pre-reset code, and using RAM that the primary CPU's code wishes |
124 | * to use. Implementing such co-ordination would be essentially impossible. |
125 | */ |
126 | void machine_restart(char *cmd) |
127 | { |
128 | /* Disable interrupts first */ |
129 | local_irq_disable(); |
130 | smp_send_stop(); |
131 | |
132 | /* |
133 | * UpdateCapsule() depends on the system being reset via |
134 | * ResetSystem(). |
135 | */ |
136 | if (efi_enabled(EFI_RUNTIME_SERVICES)) |
137 | efi_reboot(reboot_mode, NULL); |
138 | |
139 | /* Now call the architecture specific reboot code. */ |
140 | do_kernel_restart(cmd); |
141 | |
142 | /* |
143 | * Whoops - the architecture was unable to reboot. |
144 | */ |
145 | printk("Reboot failed -- System halted\n" ); |
146 | while (1); |
147 | } |
148 | |
149 | #define bstr(suffix, str) [PSR_BTYPE_ ## suffix >> PSR_BTYPE_SHIFT] = str |
150 | static const char *const btypes[] = { |
151 | bstr(NONE, "--" ), |
152 | bstr( JC, "jc" ), |
153 | bstr( C, "-c" ), |
154 | bstr( J , "j-" ) |
155 | }; |
156 | #undef bstr |
157 | |
158 | static void print_pstate(struct pt_regs *regs) |
159 | { |
160 | u64 pstate = regs->pstate; |
161 | |
162 | if (compat_user_mode(regs)) { |
163 | printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c %cDIT %cSSBS)\n" , |
164 | pstate, |
165 | pstate & PSR_AA32_N_BIT ? 'N' : 'n', |
166 | pstate & PSR_AA32_Z_BIT ? 'Z' : 'z', |
167 | pstate & PSR_AA32_C_BIT ? 'C' : 'c', |
168 | pstate & PSR_AA32_V_BIT ? 'V' : 'v', |
169 | pstate & PSR_AA32_Q_BIT ? 'Q' : 'q', |
170 | pstate & PSR_AA32_T_BIT ? "T32" : "A32" , |
171 | pstate & PSR_AA32_E_BIT ? "BE" : "LE" , |
172 | pstate & PSR_AA32_A_BIT ? 'A' : 'a', |
173 | pstate & PSR_AA32_I_BIT ? 'I' : 'i', |
174 | pstate & PSR_AA32_F_BIT ? 'F' : 'f', |
175 | pstate & PSR_AA32_DIT_BIT ? '+' : '-', |
176 | pstate & PSR_AA32_SSBS_BIT ? '+' : '-'); |
177 | } else { |
178 | const char *btype_str = btypes[(pstate & PSR_BTYPE_MASK) >> |
179 | PSR_BTYPE_SHIFT]; |
180 | |
181 | printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO %cTCO %cDIT %cSSBS BTYPE=%s)\n" , |
182 | pstate, |
183 | pstate & PSR_N_BIT ? 'N' : 'n', |
184 | pstate & PSR_Z_BIT ? 'Z' : 'z', |
185 | pstate & PSR_C_BIT ? 'C' : 'c', |
186 | pstate & PSR_V_BIT ? 'V' : 'v', |
187 | pstate & PSR_D_BIT ? 'D' : 'd', |
188 | pstate & PSR_A_BIT ? 'A' : 'a', |
189 | pstate & PSR_I_BIT ? 'I' : 'i', |
190 | pstate & PSR_F_BIT ? 'F' : 'f', |
191 | pstate & PSR_PAN_BIT ? '+' : '-', |
192 | pstate & PSR_UAO_BIT ? '+' : '-', |
193 | pstate & PSR_TCO_BIT ? '+' : '-', |
194 | pstate & PSR_DIT_BIT ? '+' : '-', |
195 | pstate & PSR_SSBS_BIT ? '+' : '-', |
196 | btype_str); |
197 | } |
198 | } |
199 | |
200 | void __show_regs(struct pt_regs *regs) |
201 | { |
202 | int i, top_reg; |
203 | u64 lr, sp; |
204 | |
205 | if (compat_user_mode(regs)) { |
206 | lr = regs->compat_lr; |
207 | sp = regs->compat_sp; |
208 | top_reg = 12; |
209 | } else { |
210 | lr = regs->regs[30]; |
211 | sp = regs->sp; |
212 | top_reg = 29; |
213 | } |
214 | |
215 | show_regs_print_info(KERN_DEFAULT); |
216 | print_pstate(regs); |
217 | |
218 | if (!user_mode(regs)) { |
219 | printk("pc : %pS\n" , (void *)regs->pc); |
220 | printk("lr : %pS\n" , (void *)ptrauth_strip_kernel_insn_pac(lr)); |
221 | } else { |
222 | printk("pc : %016llx\n" , regs->pc); |
223 | printk("lr : %016llx\n" , lr); |
224 | } |
225 | |
226 | printk("sp : %016llx\n" , sp); |
227 | |
228 | if (system_uses_irq_prio_masking()) |
229 | printk("pmr_save: %08llx\n" , regs->pmr_save); |
230 | |
231 | i = top_reg; |
232 | |
233 | while (i >= 0) { |
234 | printk("x%-2d: %016llx" , i, regs->regs[i]); |
235 | |
236 | while (i-- % 3) |
237 | pr_cont(" x%-2d: %016llx" , i, regs->regs[i]); |
238 | |
239 | pr_cont("\n" ); |
240 | } |
241 | } |
242 | |
243 | void show_regs(struct pt_regs *regs) |
244 | { |
245 | __show_regs(regs); |
246 | dump_backtrace(regs, NULL, KERN_DEFAULT); |
247 | } |
248 | |
249 | static void tls_thread_flush(void) |
250 | { |
251 | write_sysreg(0, tpidr_el0); |
252 | if (system_supports_tpidr2()) |
253 | write_sysreg_s(0, SYS_TPIDR2_EL0); |
254 | |
255 | if (is_compat_task()) { |
256 | current->thread.uw.tp_value = 0; |
257 | |
258 | /* |
259 | * We need to ensure ordering between the shadow state and the |
260 | * hardware state, so that we don't corrupt the hardware state |
261 | * with a stale shadow state during context switch. |
262 | */ |
263 | barrier(); |
264 | write_sysreg(0, tpidrro_el0); |
265 | } |
266 | } |
267 | |
268 | static void flush_tagged_addr_state(void) |
269 | { |
270 | if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI)) |
271 | clear_thread_flag(TIF_TAGGED_ADDR); |
272 | } |
273 | |
274 | void flush_thread(void) |
275 | { |
276 | fpsimd_flush_thread(); |
277 | tls_thread_flush(); |
278 | flush_ptrace_hw_breakpoint(current); |
279 | flush_tagged_addr_state(); |
280 | } |
281 | |
282 | void arch_release_task_struct(struct task_struct *tsk) |
283 | { |
284 | fpsimd_release_task(tsk); |
285 | } |
286 | |
287 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) |
288 | { |
289 | if (current->mm) |
290 | fpsimd_preserve_current_state(); |
291 | *dst = *src; |
292 | |
293 | /* |
294 | * Detach src's sve_state (if any) from dst so that it does not |
295 | * get erroneously used or freed prematurely. dst's copies |
296 | * will be allocated on demand later on if dst uses SVE. |
297 | * For consistency, also clear TIF_SVE here: this could be done |
298 | * later in copy_process(), but to avoid tripping up future |
299 | * maintainers it is best not to leave TIF flags and buffers in |
300 | * an inconsistent state, even temporarily. |
301 | */ |
302 | dst->thread.sve_state = NULL; |
303 | clear_tsk_thread_flag(dst, TIF_SVE); |
304 | |
305 | /* |
306 | * In the unlikely event that we create a new thread with ZA |
307 | * enabled we should retain the ZA and ZT state so duplicate |
308 | * it here. This may be shortly freed if we exec() or if |
309 | * CLONE_SETTLS but it's simpler to do it here. To avoid |
310 | * confusing the rest of the code ensure that we have a |
311 | * sve_state allocated whenever sme_state is allocated. |
312 | */ |
313 | if (thread_za_enabled(&src->thread)) { |
314 | dst->thread.sve_state = kzalloc(size: sve_state_size(src), |
315 | GFP_KERNEL); |
316 | if (!dst->thread.sve_state) |
317 | return -ENOMEM; |
318 | |
319 | dst->thread.sme_state = kmemdup(src->thread.sme_state, |
320 | sme_state_size(src), |
321 | GFP_KERNEL); |
322 | if (!dst->thread.sme_state) { |
323 | kfree(objp: dst->thread.sve_state); |
324 | dst->thread.sve_state = NULL; |
325 | return -ENOMEM; |
326 | } |
327 | } else { |
328 | dst->thread.sme_state = NULL; |
329 | clear_tsk_thread_flag(dst, TIF_SME); |
330 | } |
331 | |
332 | dst->thread.fp_type = FP_STATE_FPSIMD; |
333 | |
334 | /* clear any pending asynchronous tag fault raised by the parent */ |
335 | clear_tsk_thread_flag(dst, TIF_MTE_ASYNC_FAULT); |
336 | |
337 | return 0; |
338 | } |
339 | |
340 | asmlinkage void ret_from_fork(void) asm("ret_from_fork" ); |
341 | |
342 | int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) |
343 | { |
344 | unsigned long clone_flags = args->flags; |
345 | unsigned long stack_start = args->stack; |
346 | unsigned long tls = args->tls; |
347 | struct pt_regs *childregs = task_pt_regs(p); |
348 | |
349 | memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); |
350 | |
351 | /* |
352 | * In case p was allocated the same task_struct pointer as some |
353 | * other recently-exited task, make sure p is disassociated from |
354 | * any cpu that may have run that now-exited task recently. |
355 | * Otherwise we could erroneously skip reloading the FPSIMD |
356 | * registers for p. |
357 | */ |
358 | fpsimd_flush_task_state(p); |
359 | |
360 | ptrauth_thread_init_kernel(p); |
361 | |
362 | if (likely(!args->fn)) { |
363 | *childregs = *current_pt_regs(); |
364 | childregs->regs[0] = 0; |
365 | |
366 | /* |
367 | * Read the current TLS pointer from tpidr_el0 as it may be |
368 | * out-of-sync with the saved value. |
369 | */ |
370 | *task_user_tls(p) = read_sysreg(tpidr_el0); |
371 | if (system_supports_tpidr2()) |
372 | p->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0); |
373 | |
374 | if (stack_start) { |
375 | if (is_compat_thread(task_thread_info(p))) |
376 | childregs->compat_sp = stack_start; |
377 | else |
378 | childregs->sp = stack_start; |
379 | } |
380 | |
381 | /* |
382 | * If a TLS pointer was passed to clone, use it for the new |
383 | * thread. We also reset TPIDR2 if it's in use. |
384 | */ |
385 | if (clone_flags & CLONE_SETTLS) { |
386 | p->thread.uw.tp_value = tls; |
387 | p->thread.tpidr2_el0 = 0; |
388 | } |
389 | } else { |
390 | /* |
391 | * A kthread has no context to ERET to, so ensure any buggy |
392 | * ERET is treated as an illegal exception return. |
393 | * |
394 | * When a user task is created from a kthread, childregs will |
395 | * be initialized by start_thread() or start_compat_thread(). |
396 | */ |
397 | memset(childregs, 0, sizeof(struct pt_regs)); |
398 | childregs->pstate = PSR_MODE_EL1h | PSR_IL_BIT; |
399 | |
400 | p->thread.cpu_context.x19 = (unsigned long)args->fn; |
401 | p->thread.cpu_context.x20 = (unsigned long)args->fn_arg; |
402 | } |
403 | p->thread.cpu_context.pc = (unsigned long)ret_from_fork; |
404 | p->thread.cpu_context.sp = (unsigned long)childregs; |
405 | /* |
406 | * For the benefit of the unwinder, set up childregs->stackframe |
407 | * as the final frame for the new task. |
408 | */ |
409 | p->thread.cpu_context.fp = (unsigned long)childregs->stackframe; |
410 | |
411 | ptrace_hw_copy_thread(p); |
412 | |
413 | return 0; |
414 | } |
415 | |
416 | void tls_preserve_current_state(void) |
417 | { |
418 | *task_user_tls(current) = read_sysreg(tpidr_el0); |
419 | if (system_supports_tpidr2() && !is_compat_task()) |
420 | current->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0); |
421 | } |
422 | |
423 | static void tls_thread_switch(struct task_struct *next) |
424 | { |
425 | tls_preserve_current_state(); |
426 | |
427 | if (is_compat_thread(task_thread_info(next))) |
428 | write_sysreg(next->thread.uw.tp_value, tpidrro_el0); |
429 | else if (!arm64_kernel_unmapped_at_el0()) |
430 | write_sysreg(0, tpidrro_el0); |
431 | |
432 | write_sysreg(*task_user_tls(next), tpidr_el0); |
433 | if (system_supports_tpidr2()) |
434 | write_sysreg_s(next->thread.tpidr2_el0, SYS_TPIDR2_EL0); |
435 | } |
436 | |
437 | /* |
438 | * Force SSBS state on context-switch, since it may be lost after migrating |
439 | * from a CPU which treats the bit as RES0 in a heterogeneous system. |
440 | */ |
441 | static void ssbs_thread_switch(struct task_struct *next) |
442 | { |
443 | /* |
444 | * Nothing to do for kernel threads, but 'regs' may be junk |
445 | * (e.g. idle task) so check the flags and bail early. |
446 | */ |
447 | if (unlikely(next->flags & PF_KTHREAD)) |
448 | return; |
449 | |
450 | /* |
451 | * If all CPUs implement the SSBS extension, then we just need to |
452 | * context-switch the PSTATE field. |
453 | */ |
454 | if (alternative_has_cap_unlikely(ARM64_SSBS)) |
455 | return; |
456 | |
457 | spectre_v4_enable_task_mitigation(next); |
458 | } |
459 | |
460 | /* |
461 | * We store our current task in sp_el0, which is clobbered by userspace. Keep a |
462 | * shadow copy so that we can restore this upon entry from userspace. |
463 | * |
464 | * This is *only* for exception entry from EL0, and is not valid until we |
465 | * __switch_to() a user task. |
466 | */ |
467 | DEFINE_PER_CPU(struct task_struct *, __entry_task); |
468 | |
469 | static void entry_task_switch(struct task_struct *next) |
470 | { |
471 | __this_cpu_write(__entry_task, next); |
472 | } |
473 | |
474 | /* |
475 | * ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT. |
476 | * Ensure access is disabled when switching to a 32bit task, ensure |
477 | * access is enabled when switching to a 64bit task. |
478 | */ |
479 | static void erratum_1418040_thread_switch(struct task_struct *next) |
480 | { |
481 | if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) || |
482 | !this_cpu_has_cap(ARM64_WORKAROUND_1418040)) |
483 | return; |
484 | |
485 | if (is_compat_thread(task_thread_info(next))) |
486 | sysreg_clear_set(cntkctl_el1, ARCH_TIMER_USR_VCT_ACCESS_EN, 0); |
487 | else |
488 | sysreg_clear_set(cntkctl_el1, 0, ARCH_TIMER_USR_VCT_ACCESS_EN); |
489 | } |
490 | |
491 | static void erratum_1418040_new_exec(void) |
492 | { |
493 | preempt_disable(); |
494 | erratum_1418040_thread_switch(current); |
495 | preempt_enable(); |
496 | } |
497 | |
498 | /* |
499 | * __switch_to() checks current->thread.sctlr_user as an optimisation. Therefore |
500 | * this function must be called with preemption disabled and the update to |
501 | * sctlr_user must be made in the same preemption disabled block so that |
502 | * __switch_to() does not see the variable update before the SCTLR_EL1 one. |
503 | */ |
504 | void update_sctlr_el1(u64 sctlr) |
505 | { |
506 | /* |
507 | * EnIA must not be cleared while in the kernel as this is necessary for |
508 | * in-kernel PAC. It will be cleared on kernel exit if needed. |
509 | */ |
510 | sysreg_clear_set(sctlr_el1, SCTLR_USER_MASK & ~SCTLR_ELx_ENIA, sctlr); |
511 | |
512 | /* ISB required for the kernel uaccess routines when setting TCF0. */ |
513 | isb(); |
514 | } |
515 | |
516 | /* |
517 | * Thread switching. |
518 | */ |
519 | __notrace_funcgraph __sched |
520 | struct task_struct *__switch_to(struct task_struct *prev, |
521 | struct task_struct *next) |
522 | { |
523 | struct task_struct *last; |
524 | |
525 | fpsimd_thread_switch(next); |
526 | tls_thread_switch(next); |
527 | hw_breakpoint_thread_switch(next); |
528 | contextidr_thread_switch(next); |
529 | entry_task_switch(next); |
530 | ssbs_thread_switch(next); |
531 | erratum_1418040_thread_switch(next); |
532 | ptrauth_thread_switch_user(next); |
533 | |
534 | /* |
535 | * Complete any pending TLB or cache maintenance on this CPU in case |
536 | * the thread migrates to a different CPU. |
537 | * This full barrier is also required by the membarrier system |
538 | * call. |
539 | */ |
540 | dsb(ish); |
541 | |
542 | /* |
543 | * MTE thread switching must happen after the DSB above to ensure that |
544 | * any asynchronous tag check faults have been logged in the TFSR*_EL1 |
545 | * registers. |
546 | */ |
547 | mte_thread_switch(next); |
548 | /* avoid expensive SCTLR_EL1 accesses if no change */ |
549 | if (prev->thread.sctlr_user != next->thread.sctlr_user) |
550 | update_sctlr_el1(sctlr: next->thread.sctlr_user); |
551 | |
552 | /* the actual thread switch */ |
553 | last = cpu_switch_to(prev, next); |
554 | |
555 | return last; |
556 | } |
557 | |
558 | struct wchan_info { |
559 | unsigned long pc; |
560 | int count; |
561 | }; |
562 | |
563 | static bool get_wchan_cb(void *arg, unsigned long pc) |
564 | { |
565 | struct wchan_info *wchan_info = arg; |
566 | |
567 | if (!in_sched_functions(addr: pc)) { |
568 | wchan_info->pc = pc; |
569 | return false; |
570 | } |
571 | return wchan_info->count++ < 16; |
572 | } |
573 | |
574 | unsigned long __get_wchan(struct task_struct *p) |
575 | { |
576 | struct wchan_info wchan_info = { |
577 | .pc = 0, |
578 | .count = 0, |
579 | }; |
580 | |
581 | if (!try_get_task_stack(tsk: p)) |
582 | return 0; |
583 | |
584 | arch_stack_walk(consume_entry: get_wchan_cb, cookie: &wchan_info, task: p, NULL); |
585 | |
586 | put_task_stack(tsk: p); |
587 | |
588 | return wchan_info.pc; |
589 | } |
590 | |
591 | unsigned long arch_align_stack(unsigned long sp) |
592 | { |
593 | if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) |
594 | sp -= get_random_u32_below(PAGE_SIZE); |
595 | return sp & ~0xf; |
596 | } |
597 | |
598 | #ifdef CONFIG_COMPAT |
599 | int compat_elf_check_arch(const struct elf32_hdr *hdr) |
600 | { |
601 | if (!system_supports_32bit_el0()) |
602 | return false; |
603 | |
604 | if ((hdr)->e_machine != EM_ARM) |
605 | return false; |
606 | |
607 | if (!((hdr)->e_flags & EF_ARM_EABI_MASK)) |
608 | return false; |
609 | |
610 | /* |
611 | * Prevent execve() of a 32-bit program from a deadline task |
612 | * if the restricted affinity mask would be inadmissible on an |
613 | * asymmetric system. |
614 | */ |
615 | return !static_branch_unlikely(&arm64_mismatched_32bit_el0) || |
616 | !dl_task_check_affinity(current, system_32bit_el0_cpumask()); |
617 | } |
618 | #endif |
619 | |
620 | /* |
621 | * Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY. |
622 | */ |
623 | void arch_setup_new_exec(void) |
624 | { |
625 | unsigned long mmflags = 0; |
626 | |
627 | if (is_compat_task()) { |
628 | mmflags = MMCF_AARCH32; |
629 | |
630 | /* |
631 | * Restrict the CPU affinity mask for a 32-bit task so that |
632 | * it contains only 32-bit-capable CPUs. |
633 | * |
634 | * From the perspective of the task, this looks similar to |
635 | * what would happen if the 64-bit-only CPUs were hot-unplugged |
636 | * at the point of execve(), although we try a bit harder to |
637 | * honour the cpuset hierarchy. |
638 | */ |
639 | if (static_branch_unlikely(&arm64_mismatched_32bit_el0)) |
640 | force_compatible_cpus_allowed_ptr(current); |
641 | } else if (static_branch_unlikely(&arm64_mismatched_32bit_el0)) { |
642 | relax_compatible_cpus_allowed_ptr(current); |
643 | } |
644 | |
645 | current->mm->context.flags = mmflags; |
646 | ptrauth_thread_init_user(); |
647 | mte_thread_init_user(); |
648 | erratum_1418040_new_exec(); |
649 | |
650 | if (task_spec_ssb_noexec(current)) { |
651 | arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS, |
652 | PR_SPEC_ENABLE); |
653 | } |
654 | } |
655 | |
656 | #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI |
657 | /* |
658 | * Control the relaxed ABI allowing tagged user addresses into the kernel. |
659 | */ |
660 | static unsigned int tagged_addr_disabled; |
661 | |
662 | long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg) |
663 | { |
664 | unsigned long valid_mask = PR_TAGGED_ADDR_ENABLE; |
665 | struct thread_info *ti = task_thread_info(task); |
666 | |
667 | if (is_compat_thread(ti)) |
668 | return -EINVAL; |
669 | |
670 | if (system_supports_mte()) |
671 | valid_mask |= PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC \ |
672 | | PR_MTE_TAG_MASK; |
673 | |
674 | if (arg & ~valid_mask) |
675 | return -EINVAL; |
676 | |
677 | /* |
678 | * Do not allow the enabling of the tagged address ABI if globally |
679 | * disabled via sysctl abi.tagged_addr_disabled. |
680 | */ |
681 | if (arg & PR_TAGGED_ADDR_ENABLE && tagged_addr_disabled) |
682 | return -EINVAL; |
683 | |
684 | if (set_mte_ctrl(task, arg) != 0) |
685 | return -EINVAL; |
686 | |
687 | update_ti_thread_flag(ti, TIF_TAGGED_ADDR, arg & PR_TAGGED_ADDR_ENABLE); |
688 | |
689 | return 0; |
690 | } |
691 | |
692 | long get_tagged_addr_ctrl(struct task_struct *task) |
693 | { |
694 | long ret = 0; |
695 | struct thread_info *ti = task_thread_info(task); |
696 | |
697 | if (is_compat_thread(ti)) |
698 | return -EINVAL; |
699 | |
700 | if (test_ti_thread_flag(ti, TIF_TAGGED_ADDR)) |
701 | ret = PR_TAGGED_ADDR_ENABLE; |
702 | |
703 | ret |= get_mte_ctrl(task); |
704 | |
705 | return ret; |
706 | } |
707 | |
708 | /* |
709 | * Global sysctl to disable the tagged user addresses support. This control |
710 | * only prevents the tagged address ABI enabling via prctl() and does not |
711 | * disable it for tasks that already opted in to the relaxed ABI. |
712 | */ |
713 | |
714 | static struct ctl_table tagged_addr_sysctl_table[] = { |
715 | { |
716 | .procname = "tagged_addr_disabled" , |
717 | .mode = 0644, |
718 | .data = &tagged_addr_disabled, |
719 | .maxlen = sizeof(int), |
720 | .proc_handler = proc_dointvec_minmax, |
721 | .extra1 = SYSCTL_ZERO, |
722 | .extra2 = SYSCTL_ONE, |
723 | }, |
724 | }; |
725 | |
726 | static int __init tagged_addr_init(void) |
727 | { |
728 | if (!register_sysctl("abi" , tagged_addr_sysctl_table)) |
729 | return -EINVAL; |
730 | return 0; |
731 | } |
732 | |
733 | core_initcall(tagged_addr_init); |
734 | #endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */ |
735 | |
736 | #ifdef CONFIG_BINFMT_ELF |
737 | int arch_elf_adjust_prot(int prot, const struct arch_elf_state *state, |
738 | bool has_interp, bool is_interp) |
739 | { |
740 | /* |
741 | * For dynamically linked executables the interpreter is |
742 | * responsible for setting PROT_BTI on everything except |
743 | * itself. |
744 | */ |
745 | if (is_interp != has_interp) |
746 | return prot; |
747 | |
748 | if (!(state->flags & ARM64_ELF_BTI)) |
749 | return prot; |
750 | |
751 | if (prot & PROT_EXEC) |
752 | prot |= PROT_BTI; |
753 | |
754 | return prot; |
755 | } |
756 | #endif |
757 | |