1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _ASM_X86_UACCESS_64_H |
3 | #define _ASM_X86_UACCESS_64_H |
4 | |
5 | /* |
6 | * User space memory access functions |
7 | */ |
8 | #include <linux/compiler.h> |
9 | #include <linux/lockdep.h> |
10 | #include <linux/kasan-checks.h> |
11 | #include <asm/alternative.h> |
12 | #include <asm/cpufeatures.h> |
13 | #include <asm/page.h> |
14 | #include <asm/percpu.h> |
15 | #include <asm/runtime-const.h> |
16 | |
17 | /* |
18 | * Virtual variable: there's no actual backing store for this, |
19 | * it can purely be used as 'runtime_const_ptr(USER_PTR_MAX)' |
20 | */ |
21 | extern unsigned long USER_PTR_MAX; |
22 | |
23 | #ifdef CONFIG_ADDRESS_MASKING |
24 | /* |
25 | * Mask out tag bits from the address. |
26 | */ |
27 | static inline unsigned long __untagged_addr(unsigned long addr) |
28 | { |
29 | asm_inline (ALTERNATIVE("" , "and " __percpu_arg([mask]) ", %[addr]" , |
30 | X86_FEATURE_LAM) |
31 | : [addr] "+r" (addr) |
32 | : [mask] "m" (__my_cpu_var(tlbstate_untag_mask))); |
33 | |
34 | return addr; |
35 | } |
36 | |
37 | #define untagged_addr(addr) ({ \ |
38 | unsigned long __addr = (__force unsigned long)(addr); \ |
39 | (__force __typeof__(addr))__untagged_addr(__addr); \ |
40 | }) |
41 | |
42 | static inline unsigned long __untagged_addr_remote(struct mm_struct *mm, |
43 | unsigned long addr) |
44 | { |
45 | mmap_assert_locked(mm); |
46 | return addr & (mm)->context.untag_mask; |
47 | } |
48 | |
49 | #define untagged_addr_remote(mm, addr) ({ \ |
50 | unsigned long __addr = (__force unsigned long)(addr); \ |
51 | (__force __typeof__(addr))__untagged_addr_remote(mm, __addr); \ |
52 | }) |
53 | |
54 | #endif |
55 | |
56 | #define valid_user_address(x) \ |
57 | likely((__force unsigned long)(x) <= runtime_const_ptr(USER_PTR_MAX)) |
58 | |
59 | /* |
60 | * Masking the user address is an alternative to a conditional |
61 | * user_access_begin that can avoid the fencing. This only works |
62 | * for dense accesses starting at the address. |
63 | */ |
64 | static inline void __user *mask_user_address(const void __user *ptr) |
65 | { |
66 | void __user *ret; |
67 | asm("cmp %1,%0\n\t" |
68 | "cmova %1,%0" |
69 | :"=r" (ret) |
70 | :"r" (runtime_const_ptr(USER_PTR_MAX)), |
71 | "0" (ptr)); |
72 | return ret; |
73 | } |
74 | #define masked_user_access_begin(x) ({ \ |
75 | __auto_type __masked_ptr = (x); \ |
76 | __masked_ptr = mask_user_address(__masked_ptr); \ |
77 | __uaccess_begin(); __masked_ptr; }) |
78 | |
79 | /* |
80 | * User pointers can have tag bits on x86-64. This scheme tolerates |
81 | * arbitrary values in those bits rather then masking them off. |
82 | * |
83 | * Enforce two rules: |
84 | * 1. 'ptr' must be in the user part of the address space |
85 | * 2. 'ptr+size' must not overflow into kernel addresses |
86 | * |
87 | * Note that we always have at least one guard page between the |
88 | * max user address and the non-canonical gap, allowing us to |
89 | * ignore small sizes entirely. |
90 | * |
91 | * In fact, we could probably remove the size check entirely, since |
92 | * any kernel accesses will be in increasing address order starting |
93 | * at 'ptr'. |
94 | * |
95 | * That's a separate optimization, for now just handle the small |
96 | * constant case. |
97 | */ |
98 | static inline bool __access_ok(const void __user *ptr, unsigned long size) |
99 | { |
100 | if (__builtin_constant_p(size <= PAGE_SIZE) && size <= PAGE_SIZE) { |
101 | return valid_user_address(ptr); |
102 | } else { |
103 | unsigned long sum = size + (__force unsigned long)ptr; |
104 | |
105 | return valid_user_address(sum) && sum >= (__force unsigned long)ptr; |
106 | } |
107 | } |
108 | #define __access_ok __access_ok |
109 | |
110 | /* |
111 | * Copy To/From Userspace |
112 | */ |
113 | |
114 | /* Handles exceptions in both to and from, but doesn't do access_ok */ |
115 | __must_check unsigned long |
116 | rep_movs_alternative(void *to, const void *from, unsigned len); |
117 | |
118 | static __always_inline __must_check unsigned long |
119 | copy_user_generic(void *to, const void *from, unsigned long len) |
120 | { |
121 | stac(); |
122 | /* |
123 | * If CPU has FSRM feature, use 'rep movs'. |
124 | * Otherwise, use rep_movs_alternative. |
125 | */ |
126 | asm volatile( |
127 | "1:\n\t" |
128 | ALTERNATIVE("rep movsb" , |
129 | "call rep_movs_alternative" , ALT_NOT(X86_FEATURE_FSRM)) |
130 | "2:\n" |
131 | _ASM_EXTABLE_UA(1b, 2b) |
132 | :"+c" (len), "+D" (to), "+S" (from), ASM_CALL_CONSTRAINT |
133 | : : "memory" , "rax" ); |
134 | clac(); |
135 | return len; |
136 | } |
137 | |
138 | static __always_inline __must_check unsigned long |
139 | raw_copy_from_user(void *dst, const void __user *src, unsigned long size) |
140 | { |
141 | return copy_user_generic(to: dst, from: (__force void *)src, len: size); |
142 | } |
143 | |
144 | static __always_inline __must_check unsigned long |
145 | raw_copy_to_user(void __user *dst, const void *src, unsigned long size) |
146 | { |
147 | return copy_user_generic(to: (__force void *)dst, from: src, len: size); |
148 | } |
149 | |
150 | extern long __copy_user_nocache(void *dst, const void __user *src, unsigned size); |
151 | extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size); |
152 | |
153 | static inline int |
154 | __copy_from_user_inatomic_nocache(void *dst, const void __user *src, |
155 | unsigned size) |
156 | { |
157 | long ret; |
158 | kasan_check_write(p: dst, size); |
159 | stac(); |
160 | ret = __copy_user_nocache(dst, src, size); |
161 | clac(); |
162 | return ret; |
163 | } |
164 | |
165 | static inline int |
166 | __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size) |
167 | { |
168 | kasan_check_write(p: dst, size); |
169 | return __copy_user_flushcache(dst, src, size); |
170 | } |
171 | |
172 | /* |
173 | * Zero Userspace. |
174 | */ |
175 | |
176 | __must_check unsigned long |
177 | rep_stos_alternative(void __user *addr, unsigned long len); |
178 | |
179 | static __always_inline __must_check unsigned long __clear_user(void __user *addr, unsigned long size) |
180 | { |
181 | might_fault(); |
182 | stac(); |
183 | |
184 | /* |
185 | * No memory constraint because it doesn't change any memory gcc |
186 | * knows about. |
187 | */ |
188 | asm volatile( |
189 | "1:\n\t" |
190 | ALTERNATIVE("rep stosb" , |
191 | "call rep_stos_alternative" , ALT_NOT(X86_FEATURE_FSRS)) |
192 | "2:\n" |
193 | _ASM_EXTABLE_UA(1b, 2b) |
194 | : "+c" (size), "+D" (addr), ASM_CALL_CONSTRAINT |
195 | : "a" (0)); |
196 | |
197 | clac(); |
198 | |
199 | return size; |
200 | } |
201 | |
202 | static __always_inline unsigned long clear_user(void __user *to, unsigned long n) |
203 | { |
204 | if (__access_ok(ptr: to, size: n)) |
205 | return __clear_user(addr: to, size: n); |
206 | return n; |
207 | } |
208 | #endif /* _ASM_X86_UACCESS_64_H */ |
209 | |