1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Based on arch/arm/kernel/sys_arm.c |
4 | * |
5 | * Copyright (C) People who wrote linux/arch/i386/kernel/sys_i386.c |
6 | * Copyright (C) 1995, 1996 Russell King. |
7 | * Copyright (C) 2012 ARM Ltd. |
8 | */ |
9 | |
10 | #include <linux/compat.h> |
11 | #include <linux/cpufeature.h> |
12 | #include <linux/sched.h> |
13 | #include <linux/sched/signal.h> |
14 | #include <linux/slab.h> |
15 | #include <linux/syscalls.h> |
16 | #include <linux/uaccess.h> |
17 | |
18 | #include <asm/cacheflush.h> |
19 | #include <asm/system_misc.h> |
20 | #include <asm/tlbflush.h> |
21 | #include <asm/unistd.h> |
22 | |
23 | static long |
24 | __do_compat_cache_op(unsigned long start, unsigned long end) |
25 | { |
26 | long ret; |
27 | |
28 | do { |
29 | unsigned long chunk = min(PAGE_SIZE, end - start); |
30 | |
31 | if (fatal_signal_pending(current)) |
32 | return 0; |
33 | |
34 | if (cpus_have_final_cap(ARM64_WORKAROUND_1542419)) { |
35 | /* |
36 | * The workaround requires an inner-shareable tlbi. |
37 | * We pick the reserved-ASID to minimise the impact. |
38 | */ |
39 | __tlbi(aside1is, __TLBI_VADDR(0, 0)); |
40 | dsb(ish); |
41 | } |
42 | |
43 | ret = caches_clean_inval_user_pou(start, start + chunk); |
44 | if (ret) |
45 | return ret; |
46 | |
47 | cond_resched(); |
48 | start += chunk; |
49 | } while (start < end); |
50 | |
51 | return 0; |
52 | } |
53 | |
54 | static inline long |
55 | do_compat_cache_op(unsigned long start, unsigned long end, int flags) |
56 | { |
57 | if (end < start || flags) |
58 | return -EINVAL; |
59 | |
60 | if (!access_ok((const void __user *)start, end - start)) |
61 | return -EFAULT; |
62 | |
63 | return __do_compat_cache_op(start, end); |
64 | } |
65 | /* |
66 | * Handle all unrecognised system calls. |
67 | */ |
68 | long compat_arm_syscall(struct pt_regs *regs, int scno) |
69 | { |
70 | unsigned long addr; |
71 | |
72 | switch (scno) { |
73 | /* |
74 | * Flush a region from virtual address 'r0' to virtual address 'r1' |
75 | * _exclusive_. There is no alignment requirement on either address; |
76 | * user space does not need to know the hardware cache layout. |
77 | * |
78 | * r2 contains flags. It should ALWAYS be passed as ZERO until it |
79 | * is defined to be something else. For now we ignore it, but may |
80 | * the fires of hell burn in your belly if you break this rule. ;) |
81 | * |
82 | * (at a later date, we may want to allow this call to not flush |
83 | * various aspects of the cache. Passing '0' will guarantee that |
84 | * everything necessary gets flushed to maintain consistency in |
85 | * the specified region). |
86 | */ |
87 | case __ARM_NR_compat_cacheflush: |
88 | return do_compat_cache_op(start: regs->regs[0], end: regs->regs[1], flags: regs->regs[2]); |
89 | |
90 | case __ARM_NR_compat_set_tls: |
91 | current->thread.uw.tp_value = regs->regs[0]; |
92 | |
93 | /* |
94 | * Protect against register corruption from context switch. |
95 | * See comment in tls_thread_flush. |
96 | */ |
97 | barrier(); |
98 | write_sysreg(regs->regs[0], tpidrro_el0); |
99 | return 0; |
100 | |
101 | default: |
102 | /* |
103 | * Calls 0xf0xxx..0xf07ff are defined to return -ENOSYS |
104 | * if not implemented, rather than raising SIGILL. This |
105 | * way the calling program can gracefully determine whether |
106 | * a feature is supported. |
107 | */ |
108 | if (scno < __ARM_NR_COMPAT_END) |
109 | return -ENOSYS; |
110 | break; |
111 | } |
112 | |
113 | addr = instruction_pointer(regs) - (compat_thumb_mode(regs) ? 2 : 4); |
114 | |
115 | arm64_notify_die("Oops - bad compat syscall(2)" , regs, |
116 | SIGILL, ILL_ILLTRP, addr, 0); |
117 | return 0; |
118 | } |
119 | |