1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef __LINUX_UACCESS_H__ |
3 | #define __LINUX_UACCESS_H__ |
4 | |
5 | #include <linux/fault-inject-usercopy.h> |
6 | #include <linux/instrumented.h> |
7 | #include <linux/minmax.h> |
8 | #include <linux/sched.h> |
9 | #include <linux/thread_info.h> |
10 | |
11 | #include <asm/uaccess.h> |
12 | |
13 | /* |
14 | * Architectures that support memory tagging (assigning tags to memory regions, |
15 | * embedding these tags into addresses that point to these memory regions, and |
16 | * checking that the memory and the pointer tags match on memory accesses) |
17 | * redefine this macro to strip tags from pointers. |
18 | * |
19 | * Passing down mm_struct allows to define untagging rules on per-process |
20 | * basis. |
21 | * |
22 | * It's defined as noop for architectures that don't support memory tagging. |
23 | */ |
24 | #ifndef untagged_addr |
25 | #define untagged_addr(addr) (addr) |
26 | #endif |
27 | |
28 | #ifndef untagged_addr_remote |
29 | #define untagged_addr_remote(mm, addr) ({ \ |
30 | mmap_assert_locked(mm); \ |
31 | untagged_addr(addr); \ |
32 | }) |
33 | #endif |
34 | |
35 | /* |
36 | * Architectures should provide two primitives (raw_copy_{to,from}_user()) |
37 | * and get rid of their private instances of copy_{to,from}_user() and |
38 | * __copy_{to,from}_user{,_inatomic}(). |
39 | * |
40 | * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and |
41 | * return the amount left to copy. They should assume that access_ok() has |
42 | * already been checked (and succeeded); they should *not* zero-pad anything. |
43 | * No KASAN or object size checks either - those belong here. |
44 | * |
45 | * Both of these functions should attempt to copy size bytes starting at from |
46 | * into the area starting at to. They must not fetch or store anything |
47 | * outside of those areas. Return value must be between 0 (everything |
48 | * copied successfully) and size (nothing copied). |
49 | * |
50 | * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting |
51 | * at to must become equal to the bytes fetched from the corresponding area |
52 | * starting at from. All data past to + size - N must be left unmodified. |
53 | * |
54 | * If copying succeeds, the return value must be 0. If some data cannot be |
55 | * fetched, it is permitted to copy less than had been fetched; the only |
56 | * hard requirement is that not storing anything at all (i.e. returning size) |
57 | * should happen only when nothing could be copied. In other words, you don't |
58 | * have to squeeze as much as possible - it is allowed, but not necessary. |
59 | * |
60 | * For raw_copy_from_user() to always points to kernel memory and no faults |
61 | * on store should happen. Interpretation of from is affected by set_fs(). |
62 | * For raw_copy_to_user() it's the other way round. |
63 | * |
64 | * Both can be inlined - it's up to architectures whether it wants to bother |
65 | * with that. They should not be used directly; they are used to implement |
66 | * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic()) |
67 | * that are used instead. Out of those, __... ones are inlined. Plain |
68 | * copy_{to,from}_user() might or might not be inlined. If you want them |
69 | * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER. |
70 | * |
71 | * NOTE: only copy_from_user() zero-pads the destination in case of short copy. |
72 | * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything |
73 | * at all; their callers absolutely must check the return value. |
74 | * |
75 | * Biarch ones should also provide raw_copy_in_user() - similar to the above, |
76 | * but both source and destination are __user pointers (affected by set_fs() |
77 | * as usual) and both source and destination can trigger faults. |
78 | */ |
79 | |
80 | static __always_inline __must_check unsigned long |
81 | __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) |
82 | { |
83 | unsigned long res; |
84 | |
85 | instrument_copy_from_user_before(to, from, n); |
86 | check_object_size(ptr: to, n, to_user: false); |
87 | res = raw_copy_from_user(dst: to, src: from, size: n); |
88 | instrument_copy_from_user_after(to, from, n, left: res); |
89 | return res; |
90 | } |
91 | |
92 | static __always_inline __must_check unsigned long |
93 | __copy_from_user(void *to, const void __user *from, unsigned long n) |
94 | { |
95 | unsigned long res; |
96 | |
97 | might_fault(); |
98 | instrument_copy_from_user_before(to, from, n); |
99 | if (should_fail_usercopy()) |
100 | return n; |
101 | check_object_size(ptr: to, n, to_user: false); |
102 | res = raw_copy_from_user(dst: to, src: from, size: n); |
103 | instrument_copy_from_user_after(to, from, n, left: res); |
104 | return res; |
105 | } |
106 | |
107 | /** |
108 | * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking. |
109 | * @to: Destination address, in user space. |
110 | * @from: Source address, in kernel space. |
111 | * @n: Number of bytes to copy. |
112 | * |
113 | * Context: User context only. |
114 | * |
115 | * Copy data from kernel space to user space. Caller must check |
116 | * the specified block with access_ok() before calling this function. |
117 | * The caller should also make sure he pins the user space address |
118 | * so that we don't result in page fault and sleep. |
119 | */ |
120 | static __always_inline __must_check unsigned long |
121 | __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) |
122 | { |
123 | if (should_fail_usercopy()) |
124 | return n; |
125 | instrument_copy_to_user(to, from, n); |
126 | check_object_size(ptr: from, n, to_user: true); |
127 | return raw_copy_to_user(dst: to, src: from, size: n); |
128 | } |
129 | |
130 | static __always_inline __must_check unsigned long |
131 | __copy_to_user(void __user *to, const void *from, unsigned long n) |
132 | { |
133 | might_fault(); |
134 | if (should_fail_usercopy()) |
135 | return n; |
136 | instrument_copy_to_user(to, from, n); |
137 | check_object_size(ptr: from, n, to_user: true); |
138 | return raw_copy_to_user(dst: to, src: from, size: n); |
139 | } |
140 | |
141 | #ifdef INLINE_COPY_FROM_USER |
142 | static inline __must_check unsigned long |
143 | _copy_from_user(void *to, const void __user *from, unsigned long n) |
144 | { |
145 | unsigned long res = n; |
146 | might_fault(); |
147 | if (!should_fail_usercopy() && likely(access_ok(from, n))) { |
148 | instrument_copy_from_user_before(to, from, n); |
149 | res = raw_copy_from_user(to, from, n); |
150 | instrument_copy_from_user_after(to, from, n, res); |
151 | } |
152 | if (unlikely(res)) |
153 | memset(to + (n - res), 0, res); |
154 | return res; |
155 | } |
156 | #else |
157 | extern __must_check unsigned long |
158 | _copy_from_user(void *, const void __user *, unsigned long); |
159 | #endif |
160 | |
161 | #ifdef INLINE_COPY_TO_USER |
162 | static inline __must_check unsigned long |
163 | _copy_to_user(void __user *to, const void *from, unsigned long n) |
164 | { |
165 | might_fault(); |
166 | if (should_fail_usercopy()) |
167 | return n; |
168 | if (access_ok(to, n)) { |
169 | instrument_copy_to_user(to, from, n); |
170 | n = raw_copy_to_user(to, from, n); |
171 | } |
172 | return n; |
173 | } |
174 | #else |
175 | extern __must_check unsigned long |
176 | _copy_to_user(void __user *, const void *, unsigned long); |
177 | #endif |
178 | |
179 | static __always_inline unsigned long __must_check |
180 | copy_from_user(void *to, const void __user *from, unsigned long n) |
181 | { |
182 | if (check_copy_size(addr: to, bytes: n, is_source: false)) |
183 | n = _copy_from_user(to, from, n); |
184 | return n; |
185 | } |
186 | |
187 | static __always_inline unsigned long __must_check |
188 | copy_to_user(void __user *to, const void *from, unsigned long n) |
189 | { |
190 | if (check_copy_size(addr: from, bytes: n, is_source: true)) |
191 | n = _copy_to_user(to, from, n); |
192 | return n; |
193 | } |
194 | |
195 | #ifndef copy_mc_to_kernel |
196 | /* |
197 | * Without arch opt-in this generic copy_mc_to_kernel() will not handle |
198 | * #MC (or arch equivalent) during source read. |
199 | */ |
200 | static inline unsigned long __must_check |
201 | copy_mc_to_kernel(void *dst, const void *src, size_t cnt) |
202 | { |
203 | memcpy(dst, src, cnt); |
204 | return 0; |
205 | } |
206 | #endif |
207 | |
208 | static __always_inline void pagefault_disabled_inc(void) |
209 | { |
210 | current->pagefault_disabled++; |
211 | } |
212 | |
213 | static __always_inline void pagefault_disabled_dec(void) |
214 | { |
215 | current->pagefault_disabled--; |
216 | } |
217 | |
218 | /* |
219 | * These routines enable/disable the pagefault handler. If disabled, it will |
220 | * not take any locks and go straight to the fixup table. |
221 | * |
222 | * User access methods will not sleep when called from a pagefault_disabled() |
223 | * environment. |
224 | */ |
225 | static inline void pagefault_disable(void) |
226 | { |
227 | pagefault_disabled_inc(); |
228 | /* |
229 | * make sure to have issued the store before a pagefault |
230 | * can hit. |
231 | */ |
232 | barrier(); |
233 | } |
234 | |
235 | static inline void pagefault_enable(void) |
236 | { |
237 | /* |
238 | * make sure to issue those last loads/stores before enabling |
239 | * the pagefault handler again. |
240 | */ |
241 | barrier(); |
242 | pagefault_disabled_dec(); |
243 | } |
244 | |
245 | /* |
246 | * Is the pagefault handler disabled? If so, user access methods will not sleep. |
247 | */ |
248 | static inline bool pagefault_disabled(void) |
249 | { |
250 | return current->pagefault_disabled != 0; |
251 | } |
252 | |
253 | /* |
254 | * The pagefault handler is in general disabled by pagefault_disable() or |
255 | * when in irq context (via in_atomic()). |
256 | * |
257 | * This function should only be used by the fault handlers. Other users should |
258 | * stick to pagefault_disabled(). |
259 | * Please NEVER use preempt_disable() to disable the fault handler. With |
260 | * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled. |
261 | * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT. |
262 | */ |
263 | #define faulthandler_disabled() (pagefault_disabled() || in_atomic()) |
264 | |
265 | #ifndef CONFIG_ARCH_HAS_SUBPAGE_FAULTS |
266 | |
267 | /** |
268 | * probe_subpage_writeable: probe the user range for write faults at sub-page |
269 | * granularity (e.g. arm64 MTE) |
270 | * @uaddr: start of address range |
271 | * @size: size of address range |
272 | * |
273 | * Returns 0 on success, the number of bytes not probed on fault. |
274 | * |
275 | * It is expected that the caller checked for the write permission of each |
276 | * page in the range either by put_user() or GUP. The architecture port can |
277 | * implement a more efficient get_user() probing if the same sub-page faults |
278 | * are triggered by either a read or a write. |
279 | */ |
280 | static inline size_t probe_subpage_writeable(char __user *uaddr, size_t size) |
281 | { |
282 | return 0; |
283 | } |
284 | |
285 | #endif /* CONFIG_ARCH_HAS_SUBPAGE_FAULTS */ |
286 | |
287 | #ifndef ARCH_HAS_NOCACHE_UACCESS |
288 | |
289 | static inline __must_check unsigned long |
290 | __copy_from_user_inatomic_nocache(void *to, const void __user *from, |
291 | unsigned long n) |
292 | { |
293 | return __copy_from_user_inatomic(to, from, n); |
294 | } |
295 | |
296 | #endif /* ARCH_HAS_NOCACHE_UACCESS */ |
297 | |
298 | extern __must_check int check_zeroed_user(const void __user *from, size_t size); |
299 | |
300 | /** |
301 | * copy_struct_from_user: copy a struct from userspace |
302 | * @dst: Destination address, in kernel space. This buffer must be @ksize |
303 | * bytes long. |
304 | * @ksize: Size of @dst struct. |
305 | * @src: Source address, in userspace. |
306 | * @usize: (Alleged) size of @src struct. |
307 | * |
308 | * Copies a struct from userspace to kernel space, in a way that guarantees |
309 | * backwards-compatibility for struct syscall arguments (as long as future |
310 | * struct extensions are made such that all new fields are *appended* to the |
311 | * old struct, and zeroed-out new fields have the same meaning as the old |
312 | * struct). |
313 | * |
314 | * @ksize is just sizeof(*dst), and @usize should've been passed by userspace. |
315 | * The recommended usage is something like the following: |
316 | * |
317 | * SYSCALL_DEFINE2(foobar, const struct foo __user *, uarg, size_t, usize) |
318 | * { |
319 | * int err; |
320 | * struct foo karg = {}; |
321 | * |
322 | * if (usize > PAGE_SIZE) |
323 | * return -E2BIG; |
324 | * if (usize < FOO_SIZE_VER0) |
325 | * return -EINVAL; |
326 | * |
327 | * err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize); |
328 | * if (err) |
329 | * return err; |
330 | * |
331 | * // ... |
332 | * } |
333 | * |
334 | * There are three cases to consider: |
335 | * * If @usize == @ksize, then it's copied verbatim. |
336 | * * If @usize < @ksize, then the userspace has passed an old struct to a |
337 | * newer kernel. The rest of the trailing bytes in @dst (@ksize - @usize) |
338 | * are to be zero-filled. |
339 | * * If @usize > @ksize, then the userspace has passed a new struct to an |
340 | * older kernel. The trailing bytes unknown to the kernel (@usize - @ksize) |
341 | * are checked to ensure they are zeroed, otherwise -E2BIG is returned. |
342 | * |
343 | * Returns (in all cases, some data may have been copied): |
344 | * * -E2BIG: (@usize > @ksize) and there are non-zero trailing bytes in @src. |
345 | * * -EFAULT: access to userspace failed. |
346 | */ |
347 | static __always_inline __must_check int |
348 | copy_struct_from_user(void *dst, size_t ksize, const void __user *src, |
349 | size_t usize) |
350 | { |
351 | size_t size = min(ksize, usize); |
352 | size_t rest = max(ksize, usize) - size; |
353 | |
354 | /* Double check if ksize is larger than a known object size. */ |
355 | if (WARN_ON_ONCE(ksize > __builtin_object_size(dst, 1))) |
356 | return -E2BIG; |
357 | |
358 | /* Deal with trailing bytes. */ |
359 | if (usize < ksize) { |
360 | memset(dst + size, 0, rest); |
361 | } else if (usize > ksize) { |
362 | int ret = check_zeroed_user(from: src + size, size: rest); |
363 | if (ret <= 0) |
364 | return ret ?: -E2BIG; |
365 | } |
366 | /* Copy the interoperable parts of the struct. */ |
367 | if (copy_from_user(to: dst, from: src, n: size)) |
368 | return -EFAULT; |
369 | return 0; |
370 | } |
371 | |
372 | bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size); |
373 | |
374 | long copy_from_kernel_nofault(void *dst, const void *src, size_t size); |
375 | long notrace copy_to_kernel_nofault(void *dst, const void *src, size_t size); |
376 | |
377 | long copy_from_user_nofault(void *dst, const void __user *src, size_t size); |
378 | long notrace copy_to_user_nofault(void __user *dst, const void *src, |
379 | size_t size); |
380 | |
381 | long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, |
382 | long count); |
383 | |
384 | long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr, |
385 | long count); |
386 | long strnlen_user_nofault(const void __user *unsafe_addr, long count); |
387 | |
388 | #ifndef __get_kernel_nofault |
389 | #define __get_kernel_nofault(dst, src, type, label) \ |
390 | do { \ |
391 | type __user *p = (type __force __user *)(src); \ |
392 | type data; \ |
393 | if (__get_user(data, p)) \ |
394 | goto label; \ |
395 | *(type *)dst = data; \ |
396 | } while (0) |
397 | |
398 | #define __put_kernel_nofault(dst, src, type, label) \ |
399 | do { \ |
400 | type __user *p = (type __force __user *)(dst); \ |
401 | type data = *(type *)src; \ |
402 | if (__put_user(data, p)) \ |
403 | goto label; \ |
404 | } while (0) |
405 | #endif |
406 | |
407 | /** |
408 | * get_kernel_nofault(): safely attempt to read from a location |
409 | * @val: read into this variable |
410 | * @ptr: address to read from |
411 | * |
412 | * Returns 0 on success, or -EFAULT. |
413 | */ |
414 | #define get_kernel_nofault(val, ptr) ({ \ |
415 | const typeof(val) *__gk_ptr = (ptr); \ |
416 | copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\ |
417 | }) |
418 | |
419 | #ifndef user_access_begin |
420 | #define user_access_begin(ptr,len) access_ok(ptr, len) |
421 | #define user_access_end() do { } while (0) |
422 | #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0) |
423 | #define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e) |
424 | #define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e) |
425 | #define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e) |
426 | #define unsafe_copy_from_user(d,s,l,e) unsafe_op_wrap(__copy_from_user(d,s,l),e) |
427 | static inline unsigned long user_access_save(void) { return 0UL; } |
428 | static inline void user_access_restore(unsigned long flags) { } |
429 | #endif |
430 | #ifndef user_write_access_begin |
431 | #define user_write_access_begin user_access_begin |
432 | #define user_write_access_end user_access_end |
433 | #endif |
434 | #ifndef user_read_access_begin |
435 | #define user_read_access_begin user_access_begin |
436 | #define user_read_access_end user_access_end |
437 | #endif |
438 | |
439 | #ifdef CONFIG_HARDENED_USERCOPY |
440 | void __noreturn usercopy_abort(const char *name, const char *detail, |
441 | bool to_user, unsigned long offset, |
442 | unsigned long len); |
443 | #endif |
444 | |
445 | #endif /* __LINUX_UACCESS_H__ */ |
446 | |